text
stringlengths 0
3.34M
|
---|
/-
Copyright (c) 2019 Neil Strickland. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Neil Strickland
! This file was ported from Lean 3 source module data.pnat.xgcd
! leanprover-community/mathlib commit 6afc9b06856ad973f6a2619e3e8a0a8d537a58f2
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Tactic.Ring
import Mathlib.Data.PNat.Prime
/-!
# Euclidean algorithm for ℕ
This file sets up a version of the Euclidean algorithm that only works with natural numbers.
Given `0 < a, b`, it computes the unique `(w, x, y, z, d)` such that the following identities hold:
* `a = (w + x) d`
* `b = (y + z) d`
* `w * z = x * y + 1`
`d` is then the gcd of `a` and `b`, and `a' := a / d = w + x` and `b' := b / d = y + z` are coprime.
This story is closely related to the structure of SL₂(ℕ) (as a free monoid on two generators) and
the theory of continued fractions.
## Main declarations
* `XgcdType`: Helper type in defining the gcd. Encapsulates `(wp, x, y, zp, ap, bp)`. where `wp`
`zp`, `ap`, `bp` are the variables getting changed through the algorithm.
* `IsSpecial`: States `wp * zp = x * y + 1`
* `IsReduced`: States `ap = a ∧ bp = b`
## Notes
See `Nat.Xgcd` for a very similar algorithm allowing values in `ℤ`.
-/
open Nat
namespace PNat
/-- A term of `XgcdType` is a system of six naturals. They should
be thought of as representing the matrix
[[w, x], [y, z]] = [[wp + 1, x], [y, zp + 1]]
together with the vector [a, b] = [ap + 1, bp + 1].
-/
structure XgcdType where
/-- `wp` is a variable which changes through the algorithm. -/
wp : ℕ
/-- `x` satisfies `a / d = w + x` at the final step. -/
x : ℕ
/-- `y` satisfies `b / d = z + y` at the final step. -/
y : ℕ
/-- `zp` is a variable which changes through the algorithm. -/
zp : ℕ
/-- `ap` is a variable which changes through the algorithm. -/
ap : ℕ
/-- `bp` is a variable which changes through the algorithm. -/
bp : ℕ
deriving Inhabited
#align pnat.xgcd_type PNat.XgcdType
namespace XgcdType
variable (u : XgcdType)
instance : SizeOf XgcdType :=
⟨fun u => u.bp⟩
/-- The `Repr` instance converts terms to strings in a way that
reflects the matrix/vector interpretation as above. -/
instance : Repr XgcdType where
reprPrec
| g, _ => s!"[[[ {repr (g.wp + 1)}, {(repr g.x)} ], [" ++
s!"{repr g.y}, {repr (g.zp + 1)}]], [" ++
s!"{repr (g.ap + 1)}, {repr (g.bp + 1)}]]"
/-- Another `mk` using ℕ and ℕ+ -/
def mk' (w : ℕ+) (x : ℕ) (y : ℕ) (z : ℕ+) (a : ℕ+) (b : ℕ+) : XgcdType :=
mk w.val.pred x y z.val.pred a.val.pred b.val.pred
#align pnat.xgcd_type.mk' PNat.XgcdType.mk'
/-- `w = wp + 1` -/
def w : ℕ+ :=
succPNat u.wp
#align pnat.xgcd_type.w PNat.XgcdType.w
/-- `z = zp + 1` -/
def z : ℕ+ :=
succPNat u.zp
#align pnat.xgcd_type.z PNat.XgcdType.z
/-- `a = ap + 1` -/
def a : ℕ+ :=
succPNat u.ap
#align pnat.xgcd_type.a PNat.XgcdType.a
/-- `b = bp + 1` -/
def b : ℕ+ :=
succPNat u.bp
#align pnat.xgcd_type.b PNat.XgcdType.b
/-- `r = a % b`: remainder -/
def r : ℕ :=
(u.ap + 1) % (u.bp + 1)
#align pnat.xgcd_type.r PNat.XgcdType.r
/-- `q = ap / bp`: quotient -/
def q : ℕ :=
(u.ap + 1) / (u.bp + 1)
#align pnat.xgcd_type.q PNat.XgcdType.q
/-- `qp = q - 1` -/
def qp : ℕ :=
u.q - 1
#align pnat.xgcd_type.qp PNat.XgcdType.qp
/-- The map `v` gives the product of the matrix
[[w, x], [y, z]] = [[wp + 1, x], [y, zp + 1]]
and the vector [a, b] = [ap + 1, bp + 1]. The map
`vp` gives [sp, tp] such that v = [sp + 1, tp + 1].
-/
def vp : ℕ × ℕ :=
⟨u.wp + u.x + u.ap + u.wp * u.ap + u.x * u.bp, u.y + u.zp + u.bp + u.y * u.ap + u.zp * u.bp⟩
#align pnat.xgcd_type.vp PNat.XgcdType.vp
/-- `v = [sp + 1, tp + 1]`, check `vp` -/
def v : ℕ × ℕ :=
⟨u.w * u.a + u.x * u.b, u.y * u.a + u.z * u.b⟩
#align pnat.xgcd_type.v PNat.XgcdType.v
/-- `succ₂ [t.1, t.2] = [t.1.succ, t.2.succ]` -/
def succ₂ (t : ℕ × ℕ) : ℕ × ℕ :=
⟨t.1.succ, t.2.succ⟩
#align pnat.xgcd_type.succ₂ PNat.XgcdType.succ₂
theorem v_eq_succ_vp : u.v = succ₂ u.vp := by
ext <;> dsimp [v, vp, w, z, a, b, succ₂] <;> (repeat' rw [Nat.succ_eq_add_one]; ring_nf)
#align pnat.xgcd_type.v_eq_succ_vp PNat.XgcdType.v_eq_succ_vp
/-- `IsSpecial` holds if the matrix has determinant one. -/
def IsSpecial : Prop :=
u.wp + u.zp + u.wp * u.zp = u.x * u.y
#align pnat.xgcd_type.is_special PNat.XgcdType.IsSpecial
/-- `IsSpecial'` is an alternative of `IsSpecial`. -/
def IsSpecial' : Prop :=
u.w * u.z = succPNat (u.x * u.y)
#align pnat.xgcd_type.is_special' PNat.XgcdType.IsSpecial'
theorem isSpecial_iff : u.IsSpecial ↔ u.IsSpecial' := by
dsimp [IsSpecial, IsSpecial']
let ⟨wp, x, y, zp, ap, bp⟩ := u
constructor <;> intro h <;> simp [w, z, succPNat] at * <;>
simp only [← coe_inj, mul_coe, mk_coe] at *
. simp_all [← h, Nat.mul, Nat.succ_eq_add_one]; ring
. simp [Nat.succ_eq_add_one, Nat.mul_add, Nat.add_mul, ← Nat.add_assoc] at h; rw [← h]; ring
-- Porting note: Old code has been removed as it was much more longer.
#align pnat.xgcd_type.is_special_iff PNat.XgcdType.isSpecial_iff
/-- `IsReduced` holds if the two entries in the vector are the
same. The reduction algorithm will produce a system with this
property, whose product vector is the same as for the original
system. -/
def IsReduced : Prop :=
u.ap = u.bp
#align pnat.xgcd_type.is_reduced PNat.XgcdType.IsReduced
/-- `IsReduced'` is an alternative of `IsReduced`. -/
def IsReduced' : Prop :=
u.a = u.b
#align pnat.xgcd_type.is_reduced' PNat.XgcdType.IsReduced'
theorem isReduced_iff : u.IsReduced ↔ u.IsReduced' :=
succPNat_inj.symm
#align pnat.xgcd_type.is_reduced_iff PNat.XgcdType.isReduced_iff
/-- `flip` flips the placement of variables during the algorithm. -/
def flip : XgcdType where
wp := u.zp
x := u.y
y := u.x
zp := u.wp
ap := u.bp
bp := u.ap
#align pnat.xgcd_type.flip PNat.XgcdType.flip
@[simp]
theorem flip_w : (flip u).w = u.z :=
rfl
#align pnat.xgcd_type.flip_w PNat.XgcdType.flip_w
@[simp]
theorem flip_x : (flip u).x = u.y :=
rfl
#align pnat.xgcd_type.flip_x PNat.XgcdType.flip_x
@[simp]
theorem flip_y : (flip u).y = u.x :=
rfl
#align pnat.xgcd_type.flip_y PNat.XgcdType.flip_y
@[simp]
theorem flip_z : (flip u).z = u.w :=
rfl
#align pnat.xgcd_type.flip_z PNat.XgcdType.flip_z
@[simp]
theorem flip_a : (flip u).a = u.b :=
rfl
#align pnat.xgcd_type.flip_a PNat.XgcdType.flip_a
@[simp]
theorem flip_b : (flip u).b = u.a :=
rfl
#align pnat.xgcd_type.flip_b PNat.XgcdType.flip_b
theorem flip_isReduced : (flip u).IsReduced ↔ u.IsReduced := by
dsimp [IsReduced, flip]
constructor <;> intro h <;> exact h.symm
#align pnat.xgcd_type.flip_is_reduced PNat.XgcdType.flip_isReduced
theorem flip_isSpecial : (flip u).IsSpecial ↔ u.IsSpecial := by
dsimp [IsSpecial, flip]
rw [mul_comm u.x, mul_comm u.zp, add_comm u.zp]
#align pnat.xgcd_type.flip_is_special PNat.XgcdType.flip_isSpecial
theorem flip_v : (flip u).v = u.v.swap := by
dsimp [v]
ext
· simp only
ring
· simp only
ring
#align pnat.xgcd_type.flip_v PNat.XgcdType.flip_v
/-- Properties of division with remainder for a / b. -/
theorem rq_eq : u.r + (u.bp + 1) * u.q = u.ap + 1 :=
Nat.mod_add_div (u.ap + 1) (u.bp + 1)
#align pnat.xgcd_type.rq_eq PNat.XgcdType.rq_eq
theorem qp_eq (hr : u.r = 0) : u.q = u.qp + 1 := by
by_cases hq : u.q = 0
· let h := u.rq_eq
rw [hr, hq, mul_zero, add_zero] at h
cases h
· exact (Nat.succ_pred_eq_of_pos (Nat.pos_of_ne_zero hq)).symm
#align pnat.xgcd_type.qp_eq PNat.XgcdType.qp_eq
/-- The following function provides the starting point for
our algorithm. We will apply an iterative reduction process
to it, which will produce a system satisfying IsReduced.
The gcd can be read off from this final system.
-/
def start (a b : ℕ+) : XgcdType :=
⟨0, 0, 0, 0, a - 1, b - 1⟩
#align pnat.xgcd_type.start PNat.XgcdType.start
theorem start_isSpecial (a b : ℕ+) : (start a b).IsSpecial := by
dsimp [start, IsSpecial]
#align pnat.xgcd_type.start_is_special PNat.XgcdType.start_isSpecial
theorem start_v (a b : ℕ+) : (start a b).v = ⟨a, b⟩ := by
dsimp [start, v, XgcdType.a, XgcdType.b, w, z]
have : succ 0 = 1 := rfl
rw [this, one_mul, one_mul, zero_mul, zero_mul, zero_add, add_zero]
rw [← Nat.pred_eq_sub_one, ← Nat.pred_eq_sub_one]
rw [Nat.succ_pred_eq_of_pos a.pos, Nat.succ_pred_eq_of_pos b.pos]
#align pnat.xgcd_type.start_v PNat.XgcdType.start_v
/-- `finish` happens when the reducing process ends. -/
def finish : XgcdType :=
XgcdType.mk u.wp ((u.wp + 1) * u.qp + u.x) u.y (u.y * u.qp + u.zp) u.bp u.bp
#align pnat.xgcd_type.finish PNat.XgcdType.finish
theorem finish_isReduced : u.finish.IsReduced := by
dsimp [IsReduced]
rfl
#align pnat.xgcd_type.finish_is_reduced PNat.XgcdType.finish_isReduced
theorem finish_isSpecial (hs : u.IsSpecial) : u.finish.IsSpecial := by
dsimp [IsSpecial, finish] at hs⊢
rw [add_mul _ _ u.y, add_comm _ (u.x * u.y), ← hs]
ring
#align pnat.xgcd_type.finish_is_special PNat.XgcdType.finish_isSpecial
theorem finish_v (hr : u.r = 0) : u.finish.v = u.v := by
let ha : u.r + u.b * u.q = u.a := u.rq_eq
rw [hr, zero_add] at ha
ext
· change (u.wp + 1) * u.b + ((u.wp + 1) * u.qp + u.x) * u.b = u.w * u.a + u.x * u.b
have : u.wp + 1 = u.w := rfl
rw [this, ← ha, u.qp_eq hr]
ring_nf
· change u.y * u.b + (u.y * u.qp + u.z) * u.b = u.y * u.a + u.z * u.b
rw [← ha, u.qp_eq hr]
ring
#align pnat.xgcd_type.finish_v PNat.XgcdType.finish_v
/-- This is the main reduction step, which is used when u.r ≠ 0, or
equivalently b does not divide a. -/
def step : XgcdType :=
XgcdType.mk (u.y * u.q + u.zp) u.y ((u.wp + 1) * u.q + u.x) u.wp u.bp (u.r - 1)
#align pnat.xgcd_type.step PNat.XgcdType.step
/-- We will apply the above step recursively. The following result
is used to ensure that the process terminates. -/
theorem step_wf (hr : u.r ≠ 0) : SizeOf.sizeOf u.step < SizeOf.sizeOf u := by
change u.r - 1 < u.bp
have h₀ : u.r - 1 + 1 = u.r := Nat.succ_pred_eq_of_pos (Nat.pos_of_ne_zero hr)
have h₁ : u.r < u.bp + 1 := Nat.mod_lt (u.ap + 1) u.bp.succ_pos
rw [← h₀] at h₁
exact lt_of_succ_lt_succ h₁
#align pnat.xgcd_type.step_wf PNat.XgcdType.step_wf
theorem step_isSpecial (hs : u.IsSpecial) : u.step.IsSpecial := by
dsimp [IsSpecial, step] at hs⊢
rw [mul_add, mul_comm u.y u.x, ← hs]
ring
#align pnat.xgcd_type.step_is_special PNat.XgcdType.step_isSpecial
/-- The reduction step does not change the product vector. -/
theorem step_v (hr : u.r ≠ 0) : u.step.v = u.v.swap := by
let ha : u.r + u.b * u.q = u.a := u.rq_eq
let hr : u.r - 1 + 1 = u.r := (add_comm _ 1).trans (add_tsub_cancel_of_le (Nat.pos_of_ne_zero hr))
ext
· change ((u.y * u.q + u.z) * u.b + u.y * (u.r - 1 + 1) : ℕ) = u.y * u.a + u.z * u.b
rw [← ha, hr]
ring
· change ((u.w * u.q + u.x) * u.b + u.w * (u.r - 1 + 1) : ℕ) = u.w * u.a + u.x * u.b
rw [← ha, hr]
ring
#align pnat.xgcd_type.step_v PNat.XgcdType.step_v
-- Porting note: removed 'have' and added decreasing_by to avoid lint errors
/-- We can now define the full reduction function, which applies
step as long as possible, and then applies finish. Note that the
"have" statement puts a fact in the local context, and the
equation compiler uses this fact to help construct the full
definition in terms of well-founded recursion. The same fact
needs to be introduced in all the inductive proofs of properties
given below. -/
def reduce (u : XgcdType) : XgcdType :=
dite (u.r = 0) (fun _ => u.finish) fun _h =>
flip (reduce u.step)
decreasing_by apply u.step_wf _h
#align pnat.xgcd_type.reduce PNat.XgcdType.reduce
theorem reduce_a {u : XgcdType} (h : u.r = 0) : u.reduce = u.finish := by
rw [reduce]
exact if_pos h
#align pnat.xgcd_type.reduce_a PNat.XgcdType.reduce_a
theorem reduce_b {u : XgcdType} (h : u.r ≠ 0) : u.reduce = u.step.reduce.flip := by
rw [reduce]
exact if_neg h
#align pnat.xgcd_type.reduce_b PNat.XgcdType.reduce_b
theorem reduce_isReduced : ∀ u : XgcdType, u.reduce.IsReduced
| u =>
dite (u.r = 0)
(fun h => by
rw [reduce_a h]
exact u.finish_isReduced)
fun h => by
have : SizeOf.sizeOf u.step < SizeOf.sizeOf u := u.step_wf h
rw [reduce_b h, flip_isReduced]
apply reduce_isReduced
#align pnat.xgcd_type.reduce_reduced PNat.XgcdType.reduce_isReduced
theorem reduce_isReduced' (u : XgcdType) : u.reduce.IsReduced' :=
(isReduced_iff _).mp u.reduce_isReduced
#align pnat.xgcd_type.reduce_reduced' PNat.XgcdType.reduce_isReduced'
theorem reduce_isSpecial : ∀ u : XgcdType, u.IsSpecial → u.reduce.IsSpecial
| u =>
dite (u.r = 0)
(fun h hs => by
rw [reduce_a h]
exact u.finish_isSpecial hs)
fun h hs => by
have : SizeOf.sizeOf u.step < SizeOf.sizeOf u := u.step_wf h
rw [reduce_b h]
exact (flip_isSpecial _).mpr (reduce_isSpecial _ (u.step_isSpecial hs))
#align pnat.xgcd_type.reduce_special PNat.XgcdType.reduce_isSpecial
theorem reduce_isSpecial' (u : XgcdType) (hs : u.IsSpecial) : u.reduce.IsSpecial' :=
(isSpecial_iff _).mp (u.reduce_isSpecial hs)
#align pnat.xgcd_type.reduce_special' PNat.XgcdType.reduce_isSpecial'
theorem reduce_v : ∀ u : XgcdType, u.reduce.v = u.v
| u =>
dite (u.r = 0) (fun h => by rw [reduce_a h, finish_v u h]) fun h =>
by
have : SizeOf.sizeOf u.step < SizeOf.sizeOf u := u.step_wf h
rw [reduce_b h, flip_v, reduce_v (step u), step_v u h, Prod.swap_swap]
#align pnat.xgcd_type.reduce_v PNat.XgcdType.reduce_v
end XgcdType
section gcd
variable (a b : ℕ+)
/-- Extended Euclidean algorithm -/
def xgcd : XgcdType :=
(XgcdType.start a b).reduce
#align pnat.xgcd PNat.xgcd
/-- `gcdD a b = gcd a b` -/
def gcdD : ℕ+ :=
(xgcd a b).a
#align pnat.gcd_d PNat.gcdD
/-- Final value of `w` -/
def gcdW : ℕ+ :=
(xgcd a b).w
#align pnat.gcd_w PNat.gcdW
/-- Final value of `x` -/
def gcdX : ℕ :=
(xgcd a b).x
#align pnat.gcd_x PNat.gcdX
/-- Final value of `y` -/
def gcdY : ℕ :=
(xgcd a b).y
#align pnat.gcd_y PNat.gcdY
/-- Final value of `z` -/
def gcdZ : ℕ+ :=
(xgcd a b).z
#align pnat.gcd_z PNat.gcdZ
/-- Final value of `a / d` -/
def gcdA' : ℕ+ :=
succPNat ((xgcd a b).wp + (xgcd a b).x)
#align pnat.gcd_a' PNat.gcdA'
/-- Final value of `b / d` -/
def gcdB' : ℕ+ :=
succPNat ((xgcd a b).y + (xgcd a b).zp)
#align pnat.gcd_b' PNat.gcdB'
theorem gcdA'_coe : (gcdA' a b : ℕ) = gcdW a b + gcdX a b :=
by
dsimp [gcdA', gcdX, gcdW, XgcdType.w]
rw [Nat.succ_eq_add_one, Nat.succ_eq_add_one, add_right_comm]
#align pnat.gcd_a'_coe PNat.gcdA'_coe
theorem gcdB'_coe : (gcdB' a b : ℕ) = gcdY a b + gcdZ a b := by
dsimp [gcdB', gcdY, gcdZ, XgcdType.z]
rw [Nat.succ_eq_add_one, Nat.succ_eq_add_one, add_assoc]
#align pnat.gcd_b'_coe PNat.gcdB'_coe
theorem gcd_props :
let d := gcdD a b
let w := gcdW a b
let x := gcdX a b
let y := gcdY a b
let z := gcdZ a b
let a' := gcdA' a b
let b' := gcdB' a b
w * z = succPNat (x * y) ∧
a = a' * d ∧
b = b' * d ∧
z * a' = succPNat (x * b') ∧
w * b' = succPNat (y * a') ∧ (z * a : ℕ) = x * b + d ∧ (w * b : ℕ) = y * a + d := by
intros d w x y z a' b'
let u := XgcdType.start a b
let ur := u.reduce
have _ : d = ur.a := rfl
have hb : d = ur.b := u.reduce_isReduced'
have ha' : (a' : ℕ) = w + x := gcdA'_coe a b
have hb' : (b' : ℕ) = y + z := gcdB'_coe a b
have hdet : w * z = succPNat (x * y) := u.reduce_isSpecial' rfl
constructor
exact hdet
have hdet' : (w * z : ℕ) = x * y + 1 := by rw [← mul_coe, hdet, succPNat_coe]
have _ : u.v = ⟨a, b⟩ := XgcdType.start_v a b
let hv : Prod.mk (w * d + x * ur.b : ℕ) (y * d + z * ur.b : ℕ) = ⟨a, b⟩ :=
u.reduce_v.trans (XgcdType.start_v a b)
rw [← hb, ← add_mul, ← add_mul, ← ha', ← hb'] at hv
have ha'' : (a : ℕ) = a' * d := (congr_arg Prod.fst hv).symm
have hb'' : (b : ℕ) = b' * d := (congr_arg Prod.snd hv).symm
constructor
exact eq ha''
constructor
exact eq hb''
have hza' : (z * a' : ℕ) = x * b' + 1 := by
rw [ha', hb', mul_add, mul_add, mul_comm (z : ℕ), hdet']
ring
have hwb' : (w * b' : ℕ) = y * a' + 1 := by
rw [ha', hb', mul_add, mul_add, hdet']
ring
constructor
· apply eq
rw [succPNat_coe, Nat.succ_eq_add_one, mul_coe, hza']
constructor
· apply eq
rw [succPNat_coe, Nat.succ_eq_add_one, mul_coe, hwb']
rw [ha'', hb'']
repeat' rw [← @mul_assoc]
rw [hza', hwb']
constructor <;> ring
#align pnat.gcd_props PNat.gcd_props
theorem gcd_eq : gcdD a b = gcd a b :=
by
rcases gcd_props a b with ⟨_, h₁, h₂, _, _, h₅, _⟩
apply dvd_antisymm
· apply dvd_gcd
exact Dvd.intro (gcdA' a b) (h₁.trans (mul_comm _ _)).symm
exact Dvd.intro (gcdB' a b) (h₂.trans (mul_comm _ _)).symm
· have h₇ : (gcd a b : ℕ) ∣ gcdZ a b * a := (Nat.gcd_dvd_left a b).trans (dvd_mul_left _ _)
have h₈ : (gcd a b : ℕ) ∣ gcdX a b * b := (Nat.gcd_dvd_right a b).trans (dvd_mul_left _ _)
rw [h₅] at h₇
rw [dvd_iff]
exact (Nat.dvd_add_iff_right h₈).mpr h₇
#align pnat.gcd_eq PNat.gcd_eq
theorem gcd_det_eq : gcdW a b * gcdZ a b = succPNat (gcdX a b * gcdY a b) :=
(gcd_props a b).1
#align pnat.gcd_det_eq PNat.gcd_det_eq
theorem gcd_b_eq : b = gcdB' a b * gcd a b :=
gcd_eq a b ▸ (gcd_props a b).2.2.1
#align pnat.gcd_b_eq PNat.gcd_b_eq
theorem gcd_rel_left' : gcdZ a b * gcdA' a b = succPNat (gcdX a b * gcdB' a b) :=
(gcd_props a b).2.2.2.1
#align pnat.gcd_rel_left' PNat.gcd_rel_left'
theorem gcd_rel_right' : gcdW a b * gcdB' a b = succPNat (gcdY a b * gcdA' a b) :=
(gcd_props a b).2.2.2.2.1
#align pnat.gcd_rel_right' PNat.gcd_rel_right'
theorem gcd_rel_left : (gcdZ a b * a : ℕ) = gcdX a b * b + gcd a b :=
gcd_eq a b ▸ (gcd_props a b).2.2.2.2.2.1
#align pnat.gcd_rel_left PNat.gcd_rel_left
theorem gcd_rel_right : (gcdW a b * b : ℕ) = gcdY a b * a + gcd a b :=
gcd_eq a b ▸ (gcd_props a b).2.2.2.2.2.2
#align pnat.gcd_rel_right PNat.gcd_rel_right
end gcd
end PNat
|
{-# OPTIONS --universe-polymorphism #-}
module Categories.Monoidal.IntConstruction where
open import Level
open import Data.Fin
open import Data.Product
open import Categories.Category
open import Categories.Product
open import Categories.Monoidal
open import Categories.Functor hiding (id; _∘_; identityʳ; assoc)
open import Categories.Monoidal.Braided
open import Categories.Monoidal.Helpers
open import Categories.Monoidal.Braided.Helpers
open import Categories.Monoidal.Symmetric
open import Categories.NaturalIsomorphism
open import Categories.NaturalTransformation hiding (id)
open import Categories.Monoidal.Traced
------------------------------------------------------------------------------
record Polarized {o o' : Level} (A : Set o) (B : Set o') : Set (o ⊔ o') where
constructor ±
field
pos : A
neg : B
IntC : ∀ {o ℓ e}
{C : Category o ℓ e} {M : Monoidal C} {B : Braided M} {S : Symmetric B} →
(T : Traced S) → Category o ℓ e
IntC {o} {ℓ} {e} {C} {M} {B} {S} T = record {
Obj = Polarized C.Obj C.Obj
; _⇒_ = λ { (± A+ A-) (± B+ B-) → C [ F.⊗ₒ (A+ , B-) , F.⊗ₒ (B+ , A-) ]}
; _≡_ = C._≡_
; _∘_ = λ { {(± A+ A-)} {(± B+ B-)} {(± C+ C-)} g f →
T.trace { B- } {F.⊗ₒ (A+ , C-)} {F.⊗ₒ (C+ , A-)}
(ηassoc⇐ (ternary C C+ A- B-) C.∘
F.⊗ₘ (C.id , ηbraid (binary C B- A-)) C.∘
ηassoc⇒ (ternary C C+ B- A-) C.∘
F.⊗ₘ (g , C.id) C.∘
ηassoc⇐ (ternary C B+ C- A-) C.∘
F.⊗ₘ (C.id , ηbraid (binary C A- C-)) C.∘
ηassoc⇒ (ternary C B+ A- C-) C.∘
F.⊗ₘ (f , C.id) C.∘
ηassoc⇐ (ternary C A+ B- C-) C.∘
F.⊗ₘ (C.id , ηbraid (binary C C- B-)) C.∘
ηassoc⇒ (ternary C A+ C- B-))}
; id = F.⊗ₘ (C.id , C.id)
; assoc = λ { {(± A+ A-)} {(± B+ B-)} {(± C+ C-)} {(± D+ D-)} {f} {g} {h} →
{!!} }
; identityˡ = λ { {(± A+ A-)} {(± B+ B-)} {f} →
(begin
{!!}
↓⟨ {!!} ⟩
f ∎) }
; identityʳ = λ { {(± A+ A-)} {(± B+ B-)} {f} →
{!!} }
; equiv = C.equiv
; ∘-resp-≡ = λ { {(± A+ A-)} {(± B+ B-)} {(± C+ C-)} {f} {h} {g} {i} f≡h g≡i →
{!!} }
}
where
module C = Category C
open C.HomReasoning
module M = Monoidal M renaming (id to 𝟙)
module F = Functor M.⊗ renaming (F₀ to ⊗ₒ; F₁ to ⊗ₘ)
module B = Braided B
module S = Symmetric S
module T = Traced T
module NIassoc = NaturalIsomorphism M.assoc
open NaturalTransformation NIassoc.F⇒G renaming (η to ηassoc⇒)
open NaturalTransformation NIassoc.F⇐G renaming (η to ηassoc⇐)
module NIbraid = NaturalIsomorphism B.braid
open NaturalTransformation NIbraid.F⇒G renaming (η to ηbraid)
IntConstruction : ∀ {o ℓ e}
{C : Category o ℓ e} {M : Monoidal C} {B : Braided M} {S : Symmetric B} →
(T : Traced S) →
Σ[ IntC ∈ Category o ℓ e ]
Σ[ MIntC ∈ Monoidal IntC ]
Σ[ BIntC ∈ Braided MIntC ]
Σ[ SIntC ∈ Symmetric BIntC ]
Traced SIntC
IntConstruction = {!!}
|
Finally we found a safe & secure tool that allows us to surf the net from all over the world.
As QA Manager/Lead Tester for an organization that currently performs financial transactions in over 60 countries worldwide, GeoSurf has been an invaluable tool, allowing me to perform verifications of transaction flows and localized UI components that I would otherwise be unable to check in person. This reduces test cycle times immensely and in a lot of circumstances allows me to do testing that I simply would not be able to do otherwise.
With GeoSurf, we can build truly global applications and experience our games as users from every corner of the world.
We at iMesh are constantly using GeoSurf in the Marketing, Development, and of course, the QA departments. Since we started using GeoSurf, it is now easier than ever to research and qualify new markets and verticals that we haven’t yet explored. GeoSurf saves us LOTS of valuable time!
GeoSurf is by far one of the most powerful tools Secco Squared uses on a daily basis. The intelligence we gather from GeoSurf allows Secco Squared to stay ahead of the curve.
GeoSurf is a flexible and powerful tool that has proven invaluable to our international ad operations. It’s customizable, easy to use and furthermore, they’re always innovating with their products – they listen to their customers.
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
(*
* This file contains theorems for dealing with a "simply" lifted
* heap, where each byte of memory can be accessed as one (and only)
* type.
*
* This is a simpler model of Tuch's "lift_t" model, where nested
* struct fields cannot be directly accessed as pointers.
*)
theory TypHeapSimple
imports
"../../lib/TypHeapLib"
begin
(*
* Each address in the heap can contain one of three things:
*
* - A type tag, which inidicates that this address is the first
* byte of an object;
*
* - A footprint, which indicates that this address is a latter byte
* of an object;
*
* - Nothing, which indicates that this address does not fall inside
* an object.
*)
datatype heap_typ_contents =
HeapType typ_uinfo
| HeapFootprint
| HeapEmpty
(*
* Given a Tuch-style heap representation (where each memory location
* contains a set of different types, representing nested field types)
* calculate a single top-level type of the heap.
*
* We just want to commit to a single type for this heap location,
* and nothing more.
*)
definition
heap_type_tag :: "heap_typ_desc \<Rightarrow> word32 \<Rightarrow> heap_typ_contents"
where
"heap_type_tag d a \<equiv>
(if fst (d a) = False \<or> (\<forall>x. (snd (d a)) x = None) \<or> (\<forall>x. (snd (d a)) x \<noteq> None) then
HeapEmpty
else
case (snd (d a)) (GREATEST x. snd (d a) x \<noteq> None) of
Some (_, False) \<Rightarrow> HeapFootprint
| Some (x, True) \<Rightarrow> HeapType x
| None \<Rightarrow> HeapEmpty)"
(*
* Determine if the heap has a valid footprint for the given type at
* the given address.
*
* A valid footprint means that the user has committed that the given
* memory location will only be used for the given type.
*
* A "simple" footprint differs from the Tuch-style because we only
* commit to a single type, and have no support for accessing nested
* structures.
*)
definition
valid_simple_footprint :: "heap_typ_desc \<Rightarrow> word32 \<Rightarrow> typ_uinfo \<Rightarrow> bool"
where
"valid_simple_footprint d x t \<equiv>
heap_type_tag d x = HeapType t \<and>
(\<forall>y. y \<in> {x + 1..+ (size_td t)- Suc 0} \<longrightarrow> heap_type_tag d y = HeapFootprint)"
lemma valid_simple_footprintI:
"\<lbrakk> heap_type_tag d x = HeapType t; \<And>y. y \<in> {x + 1..+(size_td t) - Suc 0} \<Longrightarrow> heap_type_tag d y = HeapFootprint \<rbrakk>
\<Longrightarrow> valid_simple_footprint d x t"
by (clarsimp simp: valid_simple_footprint_def)
lemma valid_simple_footprintD:
"valid_simple_footprint d x t \<Longrightarrow> heap_type_tag d x = HeapType t"
by (simp add: valid_simple_footprint_def)
lemma valid_simple_footprintD2:
"\<lbrakk> valid_simple_footprint d x t; y \<in> {x + 1..+(size_td t) - Suc 0} \<rbrakk> \<Longrightarrow> heap_type_tag d y = HeapFootprint"
by (simp add: valid_simple_footprint_def)
lemma typ_slices_not_empty:
"typ_slices (x::('a::{mem_type} itself)) \<noteq> []"
apply (clarsimp simp: typ_slices_def)
done
lemma last_typ_slice_t:
"(last (typ_slice_t t 0)) = (t, True)"
apply (case_tac t)
apply clarsimp
done
lemma if_eqI:
"\<lbrakk> a \<Longrightarrow> x = z; \<not> a \<Longrightarrow> y = z \<rbrakk> \<Longrightarrow> (if a then x else y) = z"
by simp
lemma heap_type_tag_ptr_retyp:
"snd (s (ptr_val t)) = empty \<Longrightarrow>
heap_type_tag (ptr_retyp (t :: 'a::mem_type ptr) s) (ptr_val t) = HeapType (typ_uinfo_t TYPE('a))"
apply (unfold ptr_retyp_def heap_type_tag_def)
apply (subst htd_update_list_index, fastforce, fastforce)+
apply (rule if_eqI)
apply clarsimp
apply (erule disjE)
apply (erule_tac x=0 in allE)
apply clarsimp
apply (erule_tac x="length (typ_slice_t (typ_uinfo_t TYPE('a)) 0)" in allE)
apply (clarsimp simp: list_map_eq)
apply (clarsimp simp: list_map_eq last_conv_nth [simplified, symmetric] last_typ_slice_t
split: option.splits if_split_asm prod.splits)
done
lemma not_snd_last_typ_slice_t:
"k \<noteq> 0 \<Longrightarrow> \<not> snd (last (typ_slice_t z k))"
by (case_tac z, clarsimp)
lemma heap_type_tag_ptr_retyp_rest:
"\<lbrakk> snd (s (ptr_val t + k)) = empty; 0 < k; unat k < size_td (typ_uinfo_t TYPE('a)) \<rbrakk> \<Longrightarrow>
heap_type_tag (ptr_retyp (t :: 'a::mem_type ptr) s) (ptr_val t + k) = HeapFootprint"
apply (unfold ptr_retyp_def heap_type_tag_def)
apply (subst htd_update_list_index, simp, clarsimp,
metis intvlI size_of_def word_unat.Rep_inverse)+
apply (rule if_eqI)
apply clarsimp
apply (erule disjE)
apply (erule_tac x=0 in allE)
apply (clarsimp simp: typ_slices_index size_of_def)
apply (erule_tac x="length (typ_slice_t (typ_uinfo_t TYPE('a)) (unat k))" in allE)
apply (clarsimp simp: typ_slices_index size_of_def list_map_eq)
apply (clarsimp simp: list_map_eq last_conv_nth [simplified, symmetric] size_of_def
split: option.splits if_split_asm prod.splits bool.splits)
apply (metis surj_pair)
apply (subst (asm) (2) surjective_pairing)
apply (subst (asm) not_snd_last_typ_slice_t)
apply clarsimp
apply unat_arith
apply simp
done
lemma typ_slices_addr_card [simp]:
"length (typ_slices (x::('a::{mem_type} itself))) < addr_card"
apply (clarsimp simp: typ_slices_def)
done
lemma htd_update_list_same':
"\<lbrakk>0 < unat k; unat k \<le> addr_card - length v\<rbrakk> \<Longrightarrow> htd_update_list (p + k) v h p = h p"
apply (insert htd_update_list_same [where v=v and p=p and h=h and k="unat k"])
apply clarsimp
done
lemma unat_less_impl_less:
"unat a < unat b \<Longrightarrow> a < b"
by unat_arith
lemma valid_simple_footprint_ptr_retyp:
"\<lbrakk> \<forall>k < size_td (typ_uinfo_t TYPE('a)). snd (s (ptr_val t + of_nat k)) = Map.empty;
1 \<le> size_td (typ_uinfo_t TYPE('a));
size_td (typ_uinfo_t TYPE('a)) < addr_card \<rbrakk>
\<Longrightarrow> valid_simple_footprint (ptr_retyp (t :: 'a::mem_type ptr) s) (ptr_val t) (typ_uinfo_t TYPE('a))"
apply (clarsimp simp: valid_simple_footprint_def)
apply (rule conjI)
apply (subst heap_type_tag_ptr_retyp)
apply (erule allE [where x="0"])
apply clarsimp
apply clarsimp
apply (clarsimp simp: intvl_def)
apply (erule_tac x="k + 1" in allE)
apply (erule impE)
apply (metis One_nat_def less_diff_conv)
apply (subst add.assoc, subst heap_type_tag_ptr_retyp_rest)
apply clarsimp
apply (case_tac "1 + of_nat k = (0 :: word32)")
apply (metis add.left_neutral intvlI intvl_Suc_nmem size_of_def)
apply unat_arith
apply clarsimp
apply (metis lt_size_of_unat_simps size_of_def Suc_eq_plus1 One_nat_def less_diff_conv of_nat_Suc)
apply simp
done
(* Determine if the given pointer is valid in the given heap. *)
definition
heap_ptr_valid :: "heap_typ_desc \<Rightarrow> 'a::c_type ptr \<Rightarrow> bool"
where
"heap_ptr_valid d p \<equiv>
valid_simple_footprint d (ptr_val (p::'a ptr)) (typ_uinfo_t TYPE('a))
\<and> c_guard p"
(*
* Lift a heap from raw bytes and a heap description into
* higher-level objects.
*
* This differs from Tuch's "lift_t" because we only support
* simple lifting; that is, each byte in the heap may only
* be accessed as a single type. Accessing struct fields by
* their pointers is not supported.
*)
definition
simple_lift :: "heap_raw_state \<Rightarrow> ('a::c_type) ptr \<Rightarrow> 'a option"
where
"simple_lift s p = (
if (heap_ptr_valid (hrs_htd s) p) then
(Some (h_val (hrs_mem s) p))
else
None)"
lemma simple_lift_heap_ptr_valid:
"simple_lift s p = Some x \<Longrightarrow> heap_ptr_valid (hrs_htd s) p"
apply (clarsimp simp: simple_lift_def split: if_split_asm)
done
lemma simple_lift_c_guard:
"simple_lift s p = Some x \<Longrightarrow> c_guard p"
apply (clarsimp simp: simple_lift_def heap_ptr_valid_def split: if_split_asm)
done
(* Two valid footprints will either overlap completely or not at all. *)
lemma valid_simple_footprint_neq:
assumes valid_p: "valid_simple_footprint d p s"
and valid_q: "valid_simple_footprint d q t"
and neq: "p \<noteq> q"
shows "p \<notin> {q..+ (size_td t)}"
proof -
have heap_type_p: "heap_type_tag d p = HeapType s"
apply (metis valid_p valid_simple_footprint_def)
done
have heap_type_q: "heap_type_tag d q = HeapType t"
apply (metis valid_q valid_simple_footprint_def)
done
have heap_type_q_footprint:
"\<And>x. x \<in> {(q + 1)..+(size_td t - Suc 0)} \<Longrightarrow> heap_type_tag d x = HeapFootprint"
apply (insert valid_q)
apply (simp add: valid_simple_footprint_def)
done
show ?thesis
using heap_type_q_footprint heap_type_p neq
intvl_neq_start heap_type_q
by (metis heap_typ_contents.simps(2))
qed
(* Two valid footprints with different types will never overlap. *)
lemma valid_simple_footprint_type_neq:
"\<lbrakk> valid_simple_footprint d p s;
valid_simple_footprint d q t;
s \<noteq> t \<rbrakk> \<Longrightarrow>
p \<notin> {q..+ (size_td t)}"
apply (subgoal_tac "p \<noteq> q")
apply (rule valid_simple_footprint_neq, simp_all)[1]
apply (clarsimp simp: valid_simple_footprint_def)
done
lemma valid_simple_footprint_neq_disjoint:
"\<lbrakk> valid_simple_footprint d p s; valid_simple_footprint d q t; p \<noteq> q \<rbrakk> \<Longrightarrow>
{p..+(size_td s)} \<inter> {q..+ (size_td t)} = {}"
apply (rule ccontr)
apply (fastforce simp: valid_simple_footprint_neq dest!: intvl_inter)
done
lemma valid_simple_footprint_type_neq_disjoint:
"\<lbrakk> valid_simple_footprint d p s;
valid_simple_footprint d q t;
s \<noteq> t \<rbrakk> \<Longrightarrow>
{p..+(size_td s)} \<inter> {q..+ (size_td t)} = {}"
apply (subgoal_tac "p \<noteq> q")
apply (rule valid_simple_footprint_neq_disjoint, simp_all)[1]
apply (clarsimp simp: valid_simple_footprint_def)
done
lemma heap_ptr_valid_neq_disjoint:
"\<lbrakk> heap_ptr_valid d (p::'a::c_type ptr);
heap_ptr_valid d (q::'b::c_type ptr);
ptr_val p \<noteq> ptr_val q \<rbrakk> \<Longrightarrow>
{ptr_val p..+size_of TYPE('a)} \<inter>
{ptr_val q..+size_of TYPE('b)} = {}"
apply (clarsimp simp only: size_of_tag [symmetric])
apply (rule valid_simple_footprint_neq_disjoint [where d="d"])
apply (clarsimp simp: heap_ptr_valid_def)
apply (clarsimp simp: heap_ptr_valid_def)
apply simp
done
lemma heap_ptr_valid_type_neq_disjoint:
"\<lbrakk> heap_ptr_valid d (p::'a::c_type ptr);
heap_ptr_valid d (q::'b::c_type ptr);
typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b) \<rbrakk> \<Longrightarrow>
{ptr_val p..+size_of TYPE('a)} \<inter>
{ptr_val q..+size_of TYPE('b)} = {}"
apply (subgoal_tac "ptr_val p \<noteq> ptr_val q")
apply (rule heap_ptr_valid_neq_disjoint, auto)[1]
apply (clarsimp simp: heap_ptr_valid_def valid_simple_footprint_def)
done
(* If we update one pointer in the heap, other valid pointers will be unaffected. *)
lemma heap_ptr_valid_heap_update_other:
assumes val_p: "heap_ptr_valid d (p::'a::mem_type ptr)"
and val_q: "heap_ptr_valid d (q::'b::c_type ptr)"
and neq: "ptr_val p \<noteq> ptr_val q"
shows "h_val (heap_update p v h) q = h_val h q"
apply (clarsimp simp: h_val_def heap_update_def)
apply (subst heap_list_update_disjoint_same)
apply simp
apply (rule heap_ptr_valid_neq_disjoint [OF val_p val_q neq])
apply simp
done
(* If we update one type in the heap, other types will be unaffected. *)
lemma heap_ptr_valid_heap_update_other_typ:
assumes val_p: "heap_ptr_valid d (p::'a::mem_type ptr)"
and val_q: "heap_ptr_valid d (q::'b::c_type ptr)"
and neq: "typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b)"
shows "h_val (heap_update p v h) q = h_val h q"
apply (clarsimp simp: h_val_def heap_update_def)
apply (subst heap_list_update_disjoint_same)
apply simp
apply (rule heap_ptr_valid_type_neq_disjoint [OF val_p val_q neq])
apply simp
done
(* Updating the raw heap is equivalent to updating the lifted heap. *)
lemma simple_lift_heap_update:
"\<lbrakk> heap_ptr_valid (hrs_htd h) p \<rbrakk> \<Longrightarrow>
simple_lift (hrs_mem_update (heap_update p v) h)
= (simple_lift h)(p := Some (v::'a::mem_type))"
apply (rule ext)
apply (clarsimp simp: simple_lift_def hrs_mem_update h_val_heap_update)
apply (fastforce simp: heap_ptr_valid_heap_update_other ptr_val_inj)
done
(* Updating the raw heap of one type doesn't affect the lifted heap of other types. *)
lemma simple_lift_heap_update_other:
"\<lbrakk> heap_ptr_valid (hrs_htd d) (p::'b::mem_type ptr);
typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b) \<rbrakk> \<Longrightarrow>
simple_lift (hrs_mem_update (heap_update p v) d) = ((simple_lift d)::'a::c_type typ_heap)"
apply (rule ext)+
apply (clarsimp simp: simple_lift_def h_val_heap_update hrs_mem_update)
apply (auto intro: heap_ptr_valid_heap_update_other_typ)
done
lemma h_val_simple_lift:
"simple_lift h p = Some v \<Longrightarrow> h_val (hrs_mem h) p = v"
apply (clarsimp simp: simple_lift_def split: if_split_asm)
done
lemma h_val_field_simple_lift:
"\<lbrakk> simple_lift h (pa :: 'a ptr) = Some (v::'a::mem_type);
\<exists>t. field_ti TYPE('a) f = Some t;
export_uinfo (the (field_ti TYPE('a) f)) = export_uinfo (typ_info_t TYPE('b :: mem_type)) \<rbrakk> \<Longrightarrow>
h_val (hrs_mem h) (Ptr &(pa\<rightarrow>f) :: 'b :: mem_type ptr) = from_bytes (access_ti\<^sub>0 (the (field_ti TYPE('a) f)) v)"
apply (clarsimp simp: simple_lift_def split: if_split_asm)
apply (clarsimp simp: h_val_field_from_bytes)
done
lemma simple_lift_heap_update':
"simple_lift h p = Some v' \<Longrightarrow>
simple_lift (hrs_mem_update (heap_update (p::('a::{mem_type}) ptr) v) h)
= (simple_lift h)(p := Some v)"
apply (rule simple_lift_heap_update)
apply (erule simple_lift_heap_ptr_valid)
done
lemma simple_lift_hrs_mem_update_None [simp]:
"(simple_lift (hrs_mem_update a hp) x = None) = (simple_lift hp x = None)"
apply (clarsimp simp: simple_lift_def)
done
lemma simple_lift_data_eq:
"\<lbrakk> h_val (hrs_mem h) p = h_val (hrs_mem h') p';
heap_ptr_valid (hrs_htd h) p = heap_ptr_valid (hrs_htd h') p' \<rbrakk> \<Longrightarrow>
simple_lift h p = simple_lift h' p'"
apply (clarsimp simp: simple_lift_def)
done
lemma h_val_heap_update_disjoint:
"\<lbrakk> {ptr_val p ..+ size_of TYPE('a::c_type)}
\<inter> {ptr_val q ..+ size_of TYPE('b::mem_type)} = {} \<rbrakk> \<Longrightarrow>
h_val (heap_update (q :: 'b ptr) r h) (p :: 'a ptr) = h_val h p"
apply (clarsimp simp: h_val_def)
apply (clarsimp simp: heap_update_def)
apply (subst heap_list_update_disjoint_same)
apply clarsimp
apply blast
apply clarsimp
done
lemma update_ti_t_valid_size:
"size_of TYPE('b) = size_td t \<Longrightarrow>
update_ti_t t (to_bytes_p (val::'b::mem_type)) obj = update_ti t (to_bytes_p val) obj"
apply (clarsimp simp: update_ti_t_def to_bytes_p_def)
done
lemma h_val_field_from_bytes':
"\<lbrakk> field_ti TYPE('a::{mem_type}) f = Some t;
export_uinfo t = export_uinfo (typ_info_t TYPE('b::{mem_type})) \<rbrakk> \<Longrightarrow>
h_val h (Ptr &(pa\<rightarrow>f) :: 'b ptr) = from_bytes (access_ti\<^sub>0 t (h_val h pa))"
apply (insert h_val_field_from_bytes [where f=f and pa=pa and t=t and h="(h,x)" and 'a='a and 'b='b])
apply (clarsimp simp: hrs_mem_def)
done
lemma simple_lift_super_field_update_lookup:
fixes dummy :: "'b :: mem_type"
assumes "field_lookup (typ_info_t TYPE('b::mem_type)) f 0 = Some (s,n)"
and "typ_uinfo_t TYPE('a) = export_uinfo s"
and "simple_lift h p = Some v'"
shows "(super_field_update_t (Ptr (&(p\<rightarrow>f))) (v::'a::mem_type) ((simple_lift h)::'b ptr \<Rightarrow> 'b option)) =
((simple_lift h)(p \<mapsto> field_update (field_desc s) (to_bytes_p v) v'))"
proof -
from assms have [simp]: "unat (of_nat n :: 32 word) = n"
apply (subst unat_of_nat)
apply (subst mod_less)
apply (drule td_set_field_lookupD)+
apply (drule td_set_offset_size)+
apply (subst len_of_addr_card)
apply (subst (asm) size_of_def [symmetric, where t="TYPE('b)"])+
apply (subgoal_tac "size_of TYPE('b) < addr_card")
apply arith
apply simp
apply simp
done
from assms show ?thesis
apply (clarsimp simp: super_field_update_t_def)
apply (rule ext)
apply (clarsimp simp: field_lvalue_def split: option.splits)
apply (safe, simp_all)
apply (frule_tac v=v and v'=v' in update_field_update)
apply (clarsimp simp: field_of_t_def field_of_def typ_uinfo_t_def)
apply (frule_tac m=0 in field_names_SomeD2)
apply simp
apply clarsimp
apply (simp add: field_typ_def field_typ_untyped_def)
apply (frule field_lookup_export_uinfo_Some)
apply (frule_tac s=k in field_lookup_export_uinfo_Some)
apply simp
apply (drule (1) field_lookup_inject)
apply (subst typ_uinfo_t_def [symmetric, where t="TYPE('b)"])
apply simp
apply simp
apply (drule field_of_t_mem)+
apply (case_tac h)
apply (clarsimp simp: simple_lift_def split: if_split_asm)
apply (drule (1) heap_ptr_valid_neq_disjoint)
apply simp
apply fast
apply (clarsimp simp: field_of_t_def field_of_def)
apply (subst (asm) td_set_field_lookup)
apply simp
apply simp
apply (frule field_lookup_export_uinfo_Some)
apply (simp add: typ_uinfo_t_def)
apply (clarsimp simp: field_of_t_def field_of_def)
apply (subst (asm) td_set_field_lookup)
apply simp
apply simp
apply (frule field_lookup_export_uinfo_Some)
apply (simp add: typ_uinfo_t_def)
done
qed
lemma field_offset_addr_card:
"\<exists>x. field_lookup (typ_info_t TYPE('a::mem_type)) f 0 = Some x \<Longrightarrow> field_offset TYPE('a) f < addr_card"
apply (clarsimp simp: field_offset_def field_offset_untyped_def typ_uinfo_t_def)
apply (subst field_lookup_export_uinfo_Some)
apply assumption
apply (frule td_set_field_lookupD)
apply (drule td_set_offset_size)
apply (insert max_size [where ?'a="'a"])
apply (clarsimp simp: size_of_def)
done
lemma unat_of_nat_field_offset:
"\<exists>x. field_lookup (typ_info_t TYPE('a::mem_type)) f 0 = Some x \<Longrightarrow>
unat (of_nat (field_offset TYPE('a) f) :: word32 ) = field_offset TYPE('a) f"
apply (subst word_unat.Abs_inverse)
apply (clarsimp simp: unats_def)
apply (insert field_offset_addr_card [where f=f and ?'a="'a"])[1]
apply (fastforce simp: addr_card)
apply simp
done
lemma field_of_t_field_lookup:
assumes a: "field_lookup (typ_info_t TYPE('a::mem_type)) f 0 = Some (s, n)"
assumes b: "export_uinfo s = typ_uinfo_t TYPE('b::mem_type)"
assumes n: "n = field_offset TYPE('a) f"
shows "field_of_t (Ptr &(ptr\<rightarrow>f) :: ('b ptr)) (ptr :: 'a ptr)"
apply (clarsimp simp del: field_lookup_offset_eq
simp: field_of_t_def field_of_def)
apply (subst td_set_field_lookup)
apply (rule wf_desc_typ_tag)
apply (rule exI [where x=f])
using a[simplified n] b
apply (clarsimp simp: typ_uinfo_t_def)
apply (subst field_lookup_export_uinfo_Some)
apply assumption
apply (clarsimp simp del: field_lookup_offset_eq
simp: field_lvalue_def unat_of_nat_field_offset)
done
lemma simple_lift_field_update':
fixes val :: "'b :: mem_type" and ptr :: "'a :: mem_type ptr"
assumes fl: "field_lookup (typ_info_t TYPE('a)) f 0 =
Some (adjust_ti (typ_info_t TYPE('b)) xf xfu, n)"
and xf_xfu: "fg_cons xf xfu"
and cl: "simple_lift hp ptr = Some z"
shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\<rightarrow>f)) val) hp)) =
simple_lift hp(ptr \<mapsto> xfu val z)"
(is "?LHS = ?RHS")
proof (rule ext)
fix p
have eui: "typ_uinfo_t TYPE('b) =
export_uinfo (adjust_ti (typ_info_t TYPE('b)) xf xfu)"
using xf_xfu
apply (subst export_tag_adjust_ti2 [OF _ wf_lf wf_desc])
apply (simp add: fg_cons_def )
apply (rule meta_eq_to_obj_eq [OF typ_uinfo_t_def])
done
have n_is_field_offset: "n = field_offset TYPE('a) f"
apply (insert field_lookup_offset_eq [OF fl])
apply (clarsimp)
done
have equal_case: "?LHS ptr = ?RHS ptr"
apply (insert cl)
apply (clarsimp simp: simple_lift_def split: if_split_asm)
apply (clarsimp simp: hrs_mem_update)
apply (subst h_val_super_update_bs)
apply (rule field_of_t_field_lookup [OF fl])
apply (clarsimp simp: eui)
apply (clarsimp simp: n_is_field_offset)
apply clarsimp
apply (unfold from_bytes_def)
apply (subst fi_fu_consistentD [where f=f and s="adjust_ti (typ_info_t TYPE('b)) xf xfu"])
apply (clarsimp simp: fl)
apply (clarsimp simp: n_is_field_offset field_lvalue_def)
apply (metis unat_of_nat_field_offset fl)
apply clarsimp
apply (clarsimp simp: size_of_def)
apply (clarsimp simp: size_of_def)
apply clarsimp
apply (subst update_ti_s_from_bytes)
apply clarsimp
apply (subst update_ti_adjust_ti)
apply (rule xf_xfu)
apply (subst update_ti_s_from_bytes)
apply clarsimp
apply clarsimp
apply (clarsimp simp: h_val_def)
done
show "?LHS p = ?RHS p"
apply (case_tac "p = ptr")
apply (erule ssubst)
apply (rule equal_case)
apply (insert cl)
apply (clarsimp simp: simple_lift_def hrs_mem_update split: if_split_asm)
apply (rule h_val_heap_update_disjoint)
apply (insert field_tag_sub [OF fl, where p=ptr])
apply (clarsimp simp: size_of_def)
apply (clarsimp simp: heap_ptr_valid_def)
apply (frule (1) valid_simple_footprint_neq_disjoint, fastforce)
apply clarsimp
apply blast
done
qed
lemma simple_lift_field_update:
fixes val :: "'b :: mem_type" and ptr :: "'a :: mem_type ptr"
assumes fl: "field_ti TYPE('a) f =
Some (adjust_ti (typ_info_t TYPE('b)) xf (xfu o (\<lambda>x _. x)))"
and xf_xfu: "fg_cons xf (xfu o (\<lambda>x _. x))"
and cl: "simple_lift hp ptr = Some z"
shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\<rightarrow>f)) val) hp)) =
simple_lift hp(ptr \<mapsto> xfu (\<lambda>_. val) z)"
(is "?LHS = ?RHS")
apply (insert fl [unfolded field_ti_def])
apply (clarsimp split: option.splits)
apply (subst simple_lift_field_update' [where xf=xf and xfu="xfu o (\<lambda>x _. x)" and z=z])
apply (clarsimp simp: o_def split: option.splits)
apply (rule refl)
apply (rule xf_xfu)
apply (rule cl)
apply clarsimp
done
lemma simple_heap_diff_types_impl_diff_ptrs:
"\<lbrakk> heap_ptr_valid h (p::('a::c_type) ptr);
heap_ptr_valid h (q::('b::c_type) ptr);
typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b) \<rbrakk> \<Longrightarrow>
ptr_val p \<noteq> ptr_val q"
apply (clarsimp simp: heap_ptr_valid_def)
apply (clarsimp simp: valid_simple_footprint_def)
done
lemma h_val_update_regions_disjoint:
"\<lbrakk> { ptr_val p ..+ size_of TYPE('a) } \<inter> { ptr_val x ..+ size_of TYPE('b)} = {} \<rbrakk> \<Longrightarrow>
h_val (heap_update p (v::('a::mem_type)) h) x = h_val h (x::('b::c_type) ptr)"
apply (clarsimp simp: heap_update_def)
apply (clarsimp simp: h_val_def)
apply (subst heap_list_update_disjoint_same)
apply clarsimp
apply clarsimp
done
lemma simple_lift_field_update_t:
fixes val :: "'b :: mem_type" and ptr :: "'a :: mem_type ptr"
assumes fl: "field_ti TYPE('a) f = Some t"
and diff: "typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('c :: c_type)"
and eu: "export_uinfo t = export_uinfo (typ_info_t TYPE('b))"
and cl: "simple_lift hp ptr = Some z"
shows "((simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\<rightarrow>f)) val) hp)) :: 'c ptr \<Rightarrow> 'c option) =
simple_lift hp"
apply (rule ext)
apply (case_tac "simple_lift hp x")
apply clarsimp
apply (case_tac "ptr_val x = ptr_val ptr")
apply clarsimp
apply (clarsimp simp: simple_lift_def hrs_mem_update split: if_split_asm)
apply (cut_tac simple_lift_heap_ptr_valid [OF cl])
apply (drule (1) simple_heap_diff_types_impl_diff_ptrs [OF _ _ diff])
apply simp
apply (clarsimp simp: simple_lift_def hrs_mem_update split: if_split_asm)
apply (rule field_ti_field_lookupE [OF fl])
apply (frule_tac p=ptr in field_tag_sub)
apply (clarsimp simp: h_val_def heap_update_def)
apply (subst heap_list_update_disjoint_same)
apply clarsimp
apply (cut_tac simple_lift_heap_ptr_valid [OF cl])
apply (drule (2) heap_ptr_valid_neq_disjoint)
apply (clarsimp simp: export_size_of [unfolded typ_uinfo_t_def, OF eu])
apply blast
apply simp
done
lemma simple_lift_heap_update_other':
"\<lbrakk> simple_lift h (p::'b::mem_type ptr) = Some v';
typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b) \<rbrakk> \<Longrightarrow>
simple_lift (hrs_mem_update (heap_update p v) h) = ((simple_lift h)::'a::c_type typ_heap)"
apply (rule simple_lift_heap_update_other)
apply (erule simple_lift_heap_ptr_valid)
apply simp
done
(* If you update bytes inside an object of one type, it won't affect
* heaps of other types. *)
lemma simple_lift_heap_update_bytes_in_other:
"\<lbrakk> simple_lift h (p::'b::mem_type ptr) = Some v';
typ_uinfo_t TYPE('b) \<noteq> typ_uinfo_t TYPE('c);
{ ptr_val q ..+ size_of TYPE('a)} \<subseteq> {ptr_val p ..+ size_of TYPE('b) } \<rbrakk> \<Longrightarrow>
simple_lift (hrs_mem_update (heap_update (q::'a::mem_type ptr) v) h) = ((simple_lift h)::'c::mem_type typ_heap)"
apply (rule ext)
apply (clarsimp simp: simple_lift_def split: if_split_asm)
apply (drule (1) heap_ptr_valid_type_neq_disjoint, simp)
apply (clarsimp simp: hrs_mem_update)
apply (rule h_val_heap_update_disjoint)
apply blast
done
lemma typ_name_neq:
"typ_name (export_uinfo (typ_info_t TYPE('a::c_type)))
\<noteq> typ_name (export_uinfo (typ_info_t TYPE('b::c_type)))
\<Longrightarrow> typ_uinfo_t TYPE('a) \<noteq> typ_uinfo_t TYPE('b)"
apply (metis typ_uinfo_t_def)
done
lemma of_nat_mod_div_decomp:
"of_nat k
= of_nat (k div size_of TYPE('b)) * of_nat (size_of TYPE('b::mem_type)) +
of_nat (k mod size_of TYPE('b))"
by (metis mod_div_decomp of_nat_add of_nat_mult)
lemma c_guard_array_c_guard:
"\<lbrakk> \<And>x. x < CARD('a) \<Longrightarrow> c_guard (ptr_coerce p +\<^sub>p int x :: 'b ptr) \<rbrakk> \<Longrightarrow> c_guard ( p :: ('b :: mem_type, 'a :: finite) array ptr)"
apply atomize
apply (clarsimp simp: c_guard_def)
apply (rule conjI)
apply (drule_tac x=0 in spec)
apply (clarsimp simp: ptr_aligned_def align_of_def align_td_array)
apply (simp add: c_null_guard_def)
apply (clarsimp simp: intvl_def)
apply (drule_tac x="k div size_of TYPE('b)" in spec)
apply (erule impE)
apply (metis (full_types) less_nat_zero_code mult_is_0 neq0_conv td_gal_lt)
apply clarsimp
apply (drule_tac x="k mod size_of TYPE('b)" in spec)
apply (clarsimp simp: CTypesDefs.ptr_add_def)
apply (subst (asm) add.assoc)
apply (subst (asm) of_nat_mod_div_decomp [symmetric])
apply clarsimp
done
lemma heap_list_update_list':
"\<lbrakk> n + x \<le> length v; length v < addr_card; q = (p + of_nat x) \<rbrakk> \<Longrightarrow>
heap_list (heap_update_list p v h) n q = take n (drop x v)"
by (metis heap_list_update_list)
lemma outside_intvl_range:
"p \<notin> {a ..+ b} \<Longrightarrow> p < a \<or> p \<ge> a + of_nat b"
apply (clarsimp simp: intvl_def not_le not_less)
apply (drule_tac x="unat (p-a)" in spec)
apply clarsimp
apply (metis add_diff_cancel2 le_less_linear le_unat_uoi
mpl_lem not_add_less2 unat_mono word_less_minus_mono_left)
done
lemma first_in_intvl:
"b \<noteq> 0 \<Longrightarrow> a \<in> {a ..+ b}"
by (force simp: intvl_def)
lemma zero_not_in_intvl_no_overflow:
"0 \<notin> {a :: 'a::len word ..+ b} \<Longrightarrow> unat a + b \<le> 2 ^ len_of TYPE('a)"
apply (rule ccontr)
apply (simp add: intvl_def not_le)
apply (drule_tac x="2 ^ len_of TYPE('a) - unat a" in spec)
apply (clarsimp simp: not_less)
apply (erule disjE)
apply (metis (erased, hide_lams) diff_add_inverse less_imp_add_positive of_nat_2p of_nat_add
unat_lt2p word_neq_0_conv word_unat.Rep_inverse)
apply (metis le_add_diff_inverse le_antisym le_diff_conv le_refl
less_imp_le_nat add.commute not_add_less1 unat_lt2p)
done
lemma intvl_split:
"\<lbrakk> n \<ge> a \<rbrakk> \<Longrightarrow> { p :: ('a :: len) word ..+ n } = { p ..+ a } \<union> { p + of_nat a ..+ (n - a)}"
apply (rule set_eqI, rule iffI)
apply (clarsimp simp: intvl_def not_less)
apply (rule_tac x=k in exI)
apply clarsimp
apply (rule classical)
apply (drule_tac x="k - a" in spec)
apply (clarsimp simp: not_less)
apply (metis diff_less_mono not_less)
apply (clarsimp simp: intvl_def not_less)
apply (rule_tac x="unat (x - p)" in exI)
apply clarsimp
apply (erule disjE)
apply clarsimp
apply (metis le_unat_uoi less_or_eq_imp_le not_less order_trans)
apply clarsimp
apply (metis le_def le_eq_less_or_eq le_unat_uoi less_diff_conv
add.commute of_nat_add)
done
lemma heap_ptr_valid_range_not_NULL:
"heap_ptr_valid htd (p :: ('a :: c_type) ptr)
\<Longrightarrow> 0 \<notin> {ptr_val p ..+ size_of TYPE('a)}"
apply (clarsimp simp: heap_ptr_valid_def)
apply (metis c_guard_def c_null_guard_def)
done
lemma heap_ptr_valid_last_byte_no_overflow:
"heap_ptr_valid htd (p :: ('a :: c_type) ptr)
\<Longrightarrow> unat (ptr_val p) + size_of TYPE('a) \<le> 2 ^ len_of TYPE(32)"
by (metis c_guard_def c_null_guard_def heap_ptr_valid_def
zero_not_in_intvl_no_overflow)
lemma heap_ptr_valid_intersect_array:
"\<lbrakk> \<forall>j < n. heap_ptr_valid htd (p +\<^sub>p int j);
heap_ptr_valid htd (q :: ('a :: c_type) ptr) \<rbrakk>
\<Longrightarrow> (\<exists>m < n. q = (p +\<^sub>p int m))
\<or> ({ptr_val p ..+ size_of TYPE ('a) * n} \<inter> {ptr_val q ..+ size_of TYPE ('a :: c_type)} = {})"
apply (induct n)
apply clarsimp
apply atomize
apply simp
apply (case_tac "n = 0")
apply clarsimp
apply (metis heap_ptr_valid_neq_disjoint ptr_val_inj)
apply (erule disjE)
apply (metis less_Suc_eq)
apply (case_tac "q = p +\<^sub>p int n")
apply force
apply (frule_tac x=n in spec)
apply (erule impE, simp)
apply (drule (1) heap_ptr_valid_neq_disjoint)
apply simp
apply (simp add: CTypesDefs.ptr_add_def)
apply (rule disjI2)
apply (cut_tac a=" of_nat n * of_nat (size_of TYPE('a))"
and p="ptr_val p" and n="n * size_of TYPE('a) + size_of TYPE('a)" in intvl_split)
apply clarsimp
apply (clarsimp simp: field_simps Int_Un_distrib2)
apply (metis IntI emptyE intvl_empty intvl_inter intvl_self neq0_conv)
done
(* Simplification rules for dealing with "lift_simple". *)
lemmas simple_lift_simps =
typ_name_neq
simple_lift_c_guard
h_val_simple_lift
simple_lift_heap_update'
simple_lift_heap_update_other'
c_guard_field
h_val_field_simple_lift
simple_lift_field_update
simple_lift_field_update_t
c_guard_array_field
nat_to_bin_string_simps
(* Old name for the above simpset. *)
lemmas typ_simple_heap_simps = simple_lift_simps
end
|
= = Specifications ( Tu @-@ 12 ) = =
|
Require Import String.
Require Import StringGraph.
Require Import ListUtil.
Require Import Bool.Bool.
Require Import ZArith.
Require Import Coq.Lists.List.
Import ListNotations.
Section Collapse.
Definition Double (S : list StringGraphEdge) : list StringGraphEdge :=
S ++ S.
Fixpoint FindInEdgesFrom (L : list StringGraphEdge) (A : String) {struct L} : list StringGraphEdge :=
match L with
| [] => []
| e :: R =>
if str_eq_dec (To e) A then
e :: FindInEdgesFrom R A
else
FindInEdgesFrom R A
end.
(* H : connected components of the same level as A *)
Fixpoint CollapseNodeAtLevel' (H : list list String) (S L : list StringGraphEdge) (A : String) {struct L} : list StringGraphEdge) :=
match L with
| [] => S
| e :: R =>
if str_eq_dec (To e) A then (* L is sorted so no more edges will be removed *)
S
else
match FindInEdgesFrom L A with
| [] => CollapseNodeAtLevel' S R A
| [u] => (* ... *)
| u :: U =>
let eCollapsed := pair (pref (suff (From e))) (To e) in
let uCollapsed := pair (From u) (suff (pref (To u))) in
CollapseNodeAtLevel' (eCollapsed :: uCollapsed :: (RemoveList StringGraphEdge edge_eq_dec S [e; u])) R A
end
end.
Fixpoint CollapseLevel' (S B : list StringGraphEdge) (l : nat) {struct S} : list StringGraphEdge :=
match S with
| [] => []
| e :: R => S (* ... *)
(*if (length ())*)
end.
Definition CollapseLevel (S : list StringGraphEdge) (l : nat) : list StringGraphEdge :=
CollapseLevel' S [] l.
Fixpoint RemoveLowerPairsFor' (S B : list StringGraphEdge) (V : String) {struct S} : list StringGraphEdge :=
match S with
| [] => []
| e :: R =>
if Contains StringGraphEdge edge_eq_dec B e then (* B: edges marked for removal *)
RemoveLowerPairsFor' R (RemoveFirst StringGraphEdge edge_eq_dec B e) V
else
let eInv := pair (To e) (From e) in
let eInvBlacklisted := Count StringGraphEdge edge_eq_dec B eInv in
let eInvRemaining := Count StringGraphEdge edge_eq_dec R eInv in
let eRemaining := Count StringGraphEdge edge_eq_dec R e in
if str_eq_dec (From e) [] then
if (eInvBlacklisted + 2) <=? eInvRemaining then
RemoveLowerPairsFor' R (eInv :: B) V
else
if (0 <? eRemaining) && ((1 + eInvBlacklisted) <=? eInvRemaining) then
RemoveLowerPairsFor' R (eInv :: B) V
else
e :: RemoveLowerPairsFor' R B V
else
if str_eq_dec (To e) [] then
if (eInvBlacklisted + 2) <=? eInvRemaining then
RemoveLowerPairsFor' R (eInv :: B) V
else
if (0 <? eRemaining) && ((1 + eInvBlacklisted) <=? eInvRemaining) then
RemoveLowerPairsFor' R (eInv :: B) V
else
e :: RemoveLowerPairsFor' R B V
else
e :: RemoveLowerPairsFor' R B V
end.
Definition RemoveLowerPairsFor (S : list StringGraphEdge) (V : String) : list StringGraphEdge :=
RemoveLowerPairsFor' S [] V.
Fixpoint RemoveLowerPairs' (S : list StringGraphEdge) (V : list String) {struct V} : list StringGraphEdge :=
match V with
| [] => S
| v :: R => RemoveLowerPairs' (RemoveLowerPairsFor S v) R
end.
Definition RemoveLowerPairs (S : list StringGraphEdge) :=
RemoveLowerPairs' S (FilterVertexLevel S 1).
Fixpoint Collapse' (S : list StringGraphEdge) (l : nat) {struct l} : list StringGraphEdge :=
match l with
| 0 | 1 => RemoveLowerPairs S
| S k => Collapse' (CollapseLevel S l) k
end.
Definition Collapse (S : list StringGraphEdge) : list StringGraphEdge :=
Collapse' S (GraphHeight S).
Definition DoubleAndCollapse (S : list String) : String :=
GenerateSuperstring (Collapse (Double (FindTrivialSolution S (ConstructFullGraph S)))).
End Collapse.
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(NICTA_GPL)
*)
theory Noninterference_Refinement
imports "Noninterference" "ADT_IF_Refine_C" "Noninterference_Base_Refinement"
begin
(* FIXME: fp is currently ignored by ADT_C_if *)
consts fp :: bool
context begin interpretation Arch . (*FIXME: arch_split*)
lemma internal_R_ADT_A_if:
"internal_R (ADT_A_if uop) R = R"
apply (rule ext, rule ext)
apply (simp add: internal_R_def ADT_A_if_def)
done
lemma LI_trans:
"\<lbrakk>LI A H R (Ia \<times> Ih); LI H C S (Ih \<times> Ic); H \<Turnstile> Ih\<rbrakk>
\<Longrightarrow> LI A C (R O (S \<inter> {(h, c). h \<in> Ih})) (Ia \<times> Ic)"
apply (clarsimp simp: LI_def)
apply safe
apply (clarsimp simp: Image_def)
apply (erule_tac x=s in allE)+
apply (drule(1) set_mp)
apply clarsimp
apply (drule(1) set_mp)
apply (clarsimp simp: invariant_holds_def)
apply blast
apply (clarsimp simp: rel_semi_def)
apply (erule_tac x=j in allE)+
apply (drule_tac x="(ya, z)" in set_mp)
apply blast
apply (clarsimp simp: invariant_holds_def)
apply blast
apply (erule_tac x=x in allE)
apply (erule_tac x=y in allE)+
apply (erule_tac x=z in allE)
apply simp
done
end
context kernel_m begin
definition big_step_ADT_C_if where
"big_step_ADT_C_if utf \<equiv> big_step_adt (ADT_C_if fp utf) (internal_R (ADT_C_if fp utf) big_step_R) big_step_evmap"
(*Note: Might be able to generalise big_step_adt_refines for fw_sim*)
lemma big_step_ADT_C_if_big_step_ADT_A_if_refines:
"uop_nonempty utf \<Longrightarrow> refines (big_step_ADT_C_if utf) (big_step_ADT_A_if utf) "
apply (simp add: big_step_ADT_A_if_def big_step_ADT_C_if_def)
apply (rule big_step_adt_refines[where A="ADT_A_if utf", simplified internal_R_ADT_A_if])
apply (rule LI_trans)
apply (erule global_automata_refine.fw_sim_abs_conc[OF haskell_to_abs])
apply (erule global_automata_refine.fw_sim_abs_conc[OF c_to_haskell])
apply (rule global_automaton_invs.ADT_invs[OF haskell_invs])
apply (rule global_automaton_invs.ADT_invs[OF abstract_invs])
apply simp
done
end
context begin interpretation Arch . (*FIXME: arch_split*)
lemma LI_sub_big_steps':
"\<lbrakk>(s',as) \<in> sub_big_steps C (internal_R C R) s;
LI A C S (Ia \<times> Ic); A [> Ia; C [> Ic;
(t, s) \<in> S; s \<in> Ic; t \<in> Ia\<rbrakk>
\<Longrightarrow> \<exists>t'. (t',as) \<in> sub_big_steps A (internal_R A R) t \<and> (t', s') \<in> S \<and> t' \<in> Ia"
apply (induct rule: sub_big_steps.induct)
apply(clarsimp simp: LI_def)
apply (rule_tac x=t in exI)
apply clarsimp
apply (rule sub_big_steps.nil, simp_all)[1]
apply (force simp: internal_R_def)
apply (clarsimp simp: LI_def)
apply (erule_tac x=e in allE)
apply (clarsimp simp: rel_semi_def)
apply (drule_tac x="(t', ta)" in set_mp)
apply (rule_tac b=s' in relcompI)
apply simp
apply (rule sub_big_steps_I_holds)
apply assumption+
apply clarsimp
apply (rule_tac x=y in exI)
apply clarsimp
apply (subst conj_commute)
apply (rule context_conjI)
apply (erule inv_holdsE)
apply assumption+
apply (rule sub_big_steps.step[OF refl])
apply assumption+
apply (subgoal_tac "z \<in> Ic")
prefer 2
apply (rule_tac I=Ic in inv_holdsE)
apply assumption+
apply (erule sub_big_steps_I_holds)
apply assumption+
apply (force simp: internal_R_def)
done
lemma LI_rel_terminate:
assumes ex_abs: "\<And>s'. s' \<in> Ic \<Longrightarrow> (\<exists>s. s \<in> Ia \<and> (s, s') \<in> S)"
assumes rel_correct: "\<And>s s' s0''. \<lbrakk>(internal_R C R)\<^sup>+\<^sup>+ s0'' s'; s0''\<in>Init C s0; (s, s') \<in> S\<rbrakk> \<Longrightarrow> \<exists>s0'\<in>Init A s0. (internal_R A R)\<^sup>+\<^sup>+ s0' s"
assumes init_rel_correct: "\<And>s0''. s0'' \<in> Init C s0 \<Longrightarrow> \<exists>s0' \<in> Init A s0. (s0', s0'') \<in> S"
assumes Ia_inv: "A [> Ia"
assumes s0_Ia: "Init A s0 \<subseteq> Ia"
assumes Ic_inv: "C [> Ic"
assumes s0_Ic: "Init C s0 \<subseteq> Ic"
assumes li: "LI A C S (Ia \<times> Ic)"
shows "\<lbrakk>rel_terminate A s0 (internal_R A R) Ia (internal_R A measuref)\<rbrakk>
\<Longrightarrow> rel_terminate C s0 (internal_R C R) Ic (internal_R C measuref)"
apply (simp add: rel_terminate_def)
apply (clarsimp simp: rtranclp_def2)
apply (erule disjE)
apply (cut_tac s'=s in ex_abs, assumption)
apply clarsimp
apply (cut_tac s=sa and s'=s in rel_correct, assumption+)
apply (erule_tac x="sa" in allE)
apply simp
apply (erule impE)
apply blast
apply (erule_tac x=as in allE)
apply (frule(3) LI_sub_big_steps'[OF _ li Ia_inv Ic_inv])
apply clarsimp
apply (erule_tac x=t' in allE)
apply simp
using li
apply (clarsimp simp: LI_def)
apply (erule_tac x=a in allE)
apply (clarsimp simp: rel_semi_def)
apply (frule(1) sub_big_steps_I_holds[OF Ic_inv])
apply (drule_tac x="(t', s'')" in set_mp)
apply blast
apply clarsimp
apply (erule_tac x=y in allE)
apply (erule impE)
apply blast
apply (simp add: internal_R_def)
apply (frule_tac x=sa in spec, drule_tac x=s in spec)
apply (frule_tac x=y in spec, drule_tac x=z in spec)
apply (drule_tac x=x in spec, drule_tac x=s' in spec)
apply simp
using Ia_inv Ic_inv
apply (clarsimp simp: invariant_holds_def inv_holds_def)
apply (erule_tac x=a in allE)+
apply (drule_tac x=y in set_mp, blast)
apply (drule_tac x=z in set_mp, blast)
apply simp
apply clarsimp
apply (cut_tac s0''=s0' in init_rel_correct, assumption+)
apply clarsimp
apply (erule_tac x="s0'a" in allE)
apply (frule set_mp[OF s0_Ia])
apply (erule impE)
apply blast
apply (erule_tac x=as in allE)
apply (frule(3) LI_sub_big_steps'[OF _ li Ia_inv Ic_inv])
apply clarsimp
apply (erule_tac x=t' in allE)
apply simp
using li
apply (clarsimp simp: LI_def)
apply (erule_tac x=a in allE)+
apply (clarsimp simp: rel_semi_def)
apply (frule(1) sub_big_steps_I_holds[OF Ic_inv])
apply (drule_tac x="(t', s'')" in set_mp)
apply blast
apply clarsimp
apply (erule_tac x=y in allE)
apply (erule impE)
apply blast
apply (simp add: internal_R_def)
apply (frule_tac x=s0'a in spec, drule_tac x=s0' in spec)
apply (frule_tac x=y in spec, drule_tac x=z in spec)
apply (drule_tac x=x in spec, drule_tac x=s' in spec)
apply simp
using Ia_inv Ic_inv
apply (clarsimp simp: invariant_holds_def inv_holds_def)
apply (erule_tac x=a in allE)+
apply (drule_tac x=y in set_mp, blast)
apply (drule_tac x=z in set_mp, blast)
apply simp
done
end
locale valid_initial_state_C = valid_initial_state + kernel_m +
assumes ADT_C_if_serial:
"\<forall>s' a. (\<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and> hs \<in> full_invs_if')
\<longrightarrow> (\<exists>t. (s', t) \<in> data_type.Step (ADT_C_if fp utf) a)"
lemma internal_R_tranclp:
"(internal_R A R)\<^sup>+\<^sup>+ s s' \<Longrightarrow> R\<^sup>+\<^sup>+ (Fin A s) (Fin A s')"
apply (induct rule: tranclp.induct)
apply (simp add: internal_R_def)
apply (simp add: internal_R_def)
done
lemma inv_holds_transport:
"\<lbrakk> A [> Ia; C [> Ic; LI A C R (Ia \<times> Ic) \<rbrakk> \<Longrightarrow> C [> {s'. \<exists>s. (s,s') \<in> R \<and> s \<in> Ia \<and> s' \<in> Ic}"
apply (clarsimp simp: LI_def inv_holds_def)
apply (erule_tac x=j in allE)+
apply (clarsimp simp: rel_semi_def)
apply (subgoal_tac "(s,x) \<in> Step A j O R")
prefer 2
apply blast
apply blast
done
lemma inv_holds_T: "A [> UNIV"
by (simp add: inv_holds_def)
context valid_initial_state_C begin
lemma LI_abs_to_c:
"LI (ADT_A_if utf) (ADT_C_if fp utf)
(((lift_fst_rel (lift_snd_rel state_relation)))
O ((lift_fst_rel (lift_snd_rel rf_sr)) \<inter> {(h, c). h \<in> full_invs_if'}))
(full_invs_if \<times> UNIV)"
apply (rule LI_trans)
apply (rule global_automata_refine.fw_sim_abs_conc[OF haskell_to_abs])
apply (rule uop_nonempty)
apply (rule global_automata_refine.fw_sim_abs_conc[OF c_to_haskell])
apply (rule uop_nonempty)
apply (rule global_automaton_invs.ADT_invs[OF haskell_invs])
done
lemma ADT_C_if_Init_Fin_serial:
"Init_Fin_serial (ADT_C_if fp utf) s {s'. \<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and> hs \<in> full_invs_if'}"
apply (unfold_locales)
apply (subgoal_tac "ADT_C_if fp utf \<Turnstile> P" for P)
prefer 2
apply (rule fw_inv_transport)
apply (rule global_automaton_invs.ADT_invs)
apply (rule haskell_invs)
apply (rule invariant_T)
apply (rule global_automata_refine.fw_sim_abs_conc)
apply (rule c_to_haskell)
apply (rule uop_nonempty)
apply simp
apply (rule ADT_C_if_serial[rule_format])
apply simp
apply (clarsimp simp: ADT_C_if_def lift_fst_rel_def lift_snd_rel_def)
apply blast
apply (clarsimp simp: lift_fst_rel_def lift_snd_rel_def ADT_C_if_def)
apply (rule_tac x=bb in exI)
apply (clarsimp simp: full_invs_if'_def)
apply (case_tac "sys_mode_of s", simp_all)
done
lemma ADT_C_if_Init_Fin_serial_weak:
"Init_Fin_serial_weak (ADT_C_if fp utf) s {s'.
\<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and> hs \<in> full_invs_if'}"
apply (rule Init_Fin_serial.serial_to_weak)
apply (rule ADT_C_if_Init_Fin_serial)
done
lemma Fin_ADT_C_if:
"Fin (ADT_C_if fp utf) ((uc, s), m) = ((uc, cstate_to_A s), m)"
by (simp add: ADT_C_if_def)
lemma Fin_Init_s0_ADT_C_if:
"s0' \<in> Init (ADT_C_if fp utf) s0 \<Longrightarrow> Fin (ADT_C_if fp utf) s0' = s0"
by (clarsimp simp: ADT_C_if_def s0_def)
lemma big_step_R_tranclp_abs':
"\<lbrakk>(s, s')
\<in> lift_fst_rel (lift_snd_rel state_relation) O
lift_fst_rel (lift_snd_rel rf_sr);
big_step_R\<^sup>+\<^sup>+ s0 s''\<rbrakk> \<Longrightarrow> s'' = (Fin (ADT_C_if fp utf) s')
\<longrightarrow> big_step_R\<^sup>+\<^sup>+ s0 s"
apply (erule tranclp_induct)
apply (clarsimp simp: Fin_ADT_C_if lift_fst_rel_def)
apply (rule tranclp.r_into_trancl)
apply (simp add: big_step_R_def)
apply (clarsimp simp: Fin_ADT_C_if lift_fst_rel_def)
apply (rule tranclp.trancl_into_trancl)
apply assumption
apply (simp add: big_step_R_def)
done
lemmas big_step_R_tranclp_abs = big_step_R_tranclp_abs'[rule_format]
lemma ADT_C_if_inv_holds_transport:
"ADT_C_if fp utf [>
{s'.
\<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and>
hs \<in> full_invs_if' \<and>
(\<exists>as. (as, hs) \<in> lift_fst_rel (lift_snd_rel state_relation) \<and>
invs_if as)}"
apply (subst arg_cong[where f="\<lambda>S. ADT_C_if fp utf [> S"])
prefer 2
apply (rule_tac A="ADT_A_if utf" in inv_holds_transport)
prefer 3
apply (rule weaken_LI)
apply (rule LI_abs_to_c)
prefer 2
apply (rule invs_if_inv_holds_ADT_A_if)
prefer 2
apply (rule inv_holds_T)
apply (clarsimp simp: invs_if_full_invs_if)
apply force
done
lemma ADT_C_if_Init_transport:
"Init (ADT_C_if fp utf) s0
\<subseteq> {s'.
\<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and>
hs \<in> full_invs_if' \<and>
(\<exists>as. (as, hs) \<in> lift_fst_rel (lift_snd_rel state_relation) \<and>
invs_if as)}"
apply clarsimp
apply (frule set_mp[OF global_automata_refine.init_refinement[OF c_to_haskell[OF uop_nonempty]]])
apply (clarsimp simp: Image_def lift_fst_rel_def lift_snd_rel_def)
apply (frule set_mp[OF global_automata_refine.init_refinement[OF haskell_to_abs[OF uop_nonempty]]])
apply (clarsimp simp: Image_def lift_fst_rel_def lift_snd_rel_def)
apply (rule_tac x=bb in exI)
apply simp
apply (rule conjI)
apply (force simp: ADT_H_if_def)
apply (rule_tac x=ba in exI)
apply (clarsimp simp: ADT_A_if_def)
done
lemma ADT_C_if_big_step_R_terminate:
"rel_terminate (ADT_C_if fp utf) s0
(internal_R (ADT_C_if fp utf) big_step_R)
{s'. \<exists>hs. (hs, s') \<in> lift_fst_rel (lift_snd_rel rf_sr) \<and>
hs \<in> full_invs_if' \<and> (\<exists>as. (as, hs) \<in>
lift_fst_rel (lift_snd_rel state_relation) \<and> invs_if as)}
(\<lambda>s s'. internal_R (ADT_C_if fp utf) measuref_if s s')"
apply (rule_tac S="lift_fst_rel (lift_snd_rel state_relation) O
(lift_fst_rel (lift_snd_rel rf_sr) \<inter> {(h, c). h \<in> full_invs_if'})"
and Ia="Collect invs_if" and A="ADT_A_if utf" in LI_rel_terminate)
apply blast
prefer 8
apply (simp add: internal_R_ADT_A_if)
apply (rule ADT_A_if_big_step_R_terminate)
apply (simp add: internal_R_ADT_A_if, simp add: ADT_A_if_def)
apply (rule_tac x="s0" in bexI)
apply (drule internal_R_tranclp)
apply (simp add: Fin_Init_s0_ADT_C_if)
apply clarsimp
apply (rule big_step_R_tranclp_abs)
apply force
apply assumption
apply simp
apply (clarsimp simp: invs_if_full_invs_if extras_s0)
apply (drule set_mp[OF global_automata_refine.init_refinement[OF c_to_haskell[OF uop_nonempty]]])
apply (clarsimp simp: Image_def lift_fst_rel_def lift_snd_rel_def)
apply (frule set_mp[OF global_automata_refine.init_refinement[OF haskell_to_abs[OF uop_nonempty]]])
apply (clarsimp simp: Image_def lift_fst_rel_def lift_snd_rel_def)
apply (rule_tac x="((aa, bc), bd)" in bexI)
apply (rule_tac b="((aa, bb), bd)" in relcompI)
apply simp
apply (force simp: ADT_H_if_def)
apply simp
apply (rule invs_if_inv_holds_ADT_A_if)
apply (simp add: ADT_A_if_def invs_if_full_invs_if extras_s0)
apply (rule ADT_C_if_inv_holds_transport)
apply (rule ADT_C_if_Init_transport)
apply (rule weaken_LI[OF LI_abs_to_c])
apply (clarsimp simp: invs_if_full_invs_if)
done
lemma big_step_ADT_C_if_enabled_system:
"enabled_system (big_step_ADT_C_if utf) s0"
apply (simp add: big_step_ADT_C_if_def)
apply (rule_tac measuref="internal_R (ADT_C_if fp utf) measuref_if" in big_step_adt_enabled_system)
apply simp
apply (force simp: big_step_R_def internal_R_def)
apply (rule Init_Fin_serial_weak_strengthen)
apply (rule ADT_C_if_Init_Fin_serial_weak)
apply (rule ADT_C_if_inv_holds_transport)
apply force
apply (rule ADT_C_if_Init_transport)
apply (rule ADT_C_if_big_step_R_terminate)
done
end
sublocale valid_initial_state_C \<subseteq>
abstract_to_C: noninterference_refinement
"big_step_ADT_A_if utf" (* the ADT that we prove infoflow for *)
s0 (* initial state *)
"\<lambda>e s. part s" (* dom function *)
"uwr" (* uwr *)
"policyFlows (pasPolicy initial_aag)" (* policy *)
"undefined" (* out -- unused *)
PSched (* scheduler partition name *)
"big_step_ADT_C_if utf"
apply(unfold_locales)
apply(insert big_step_ADT_C_if_enabled_system)[1]
apply(fastforce simp: enabled_system_def)
apply(rule big_step_ADT_C_if_big_step_ADT_A_if_refines)
apply (rule uop_nonempty)
done
context valid_initial_state_C begin
lemma xnonleakage_C:
"abstract_to_C.conc.xNonleakage_gen"
apply(rule abstract_to_C.xNonleakage_gen_refinement_closed)
apply(rule xnonleakage)
done
end
end
|
subroutine dbesig (x1, alpha, kode, n, y, nz,w,ierr)
c Author Serge Steer, Copyright INRIA, 2005
c extends dbesi for the case where alpha is negative
double precision x1,alpha,y(n),w(n)
integer kode,n,nz,ierr
c
double precision a,pi,inf,x,a1
integer ier1,ier2
double precision dlamch
data pi /3.14159265358979324D0/
inf=dlamch('o')*2.0d0
x=x1
ier2=0
if (x.ne.x.or.alpha.ne.alpha) then
c . NaN case
call dset(n,inf-inf,y,1)
ierr=4
elseif (alpha .ge. 0.0d0) then
call dbesi(abs(x),alpha,kode,n,y,nz,ierr)
if (ierr.eq.2) call dset(n,inf,y,1)
if(x.lt.0.0d0) then
i0=mod(int(abs(alpha))+1,2)
call dscal((n-i0+1)/2,-1.0d0,y(1+i0),2)
endif
else if (alpha .eq. dint(alpha)) then
c . alpha <0 and integer,
c . transform to positive value of alpha
if(alpha-1+n.ge.0) then
c . 0 is between alpha and alpha+n
a1=0.0d0
nn=min(n,int(-alpha))
else
a1=-(alpha-1+n)
nn=n
endif
call dbesi(abs(x),a1,kode,n,w,nz,ierr)
if (ierr.eq.2) then
call dset(n,inf,y,1)
else
if(n.gt.nn) then
c . 0 is between alpha and alpha+n
call dcopy(n-nn,w,1,y(nn+1),1)
call dcopy(nn,w(2),-1,y,1)
else
c . alpha and alpha+n are negative
call dcopy(nn,w,-1,y,1)
endif
endif
if(x.lt.0.0d0) then
i0=mod(int(abs(alpha))+1,2)
call dscal((n-i0+1)/2,-1.0d0,y(1+i0),2)
endif
else if (x .eq. 0.0d0) then
c . alpha <0 and x==0
if(alpha-1.0d0+n.ge.0.0d0) then
c . 0 is between alpha and alpha+n
nn=int(-alpha)+1
else
nn=n
endif
ierr=2
call dset(nn,-inf,y,1)
if (n.gt.nn) call dset(n-nn,0.0d0,y(nn+1),1)
else
c . first alpha is negative non integer, x should be positive (with
C . x negative the result is complex. CHECKED
c . transform to positive value of alpha
if(alpha-1.0d0+n.ge.0.0d0) then
c . 0 is between alpha and alpha+n
nn=int(-alpha)+1
else
nn=n
endif
c . compute for negative value of alpha+k, transform problem for
c . a1:a1+(nn-1) with a1 positive a1+k =abs(alpha+nn-k)
a1=-(alpha-1.0d0+nn)
call dbesi(x,a1,kode,nn,w,nz1,ierr)
call dbesk(x,a1,1,nn,y,nz2,ier)
ierr=max(ierr,ier)
nz=max(nz1,nz2)
if (ierr.eq.0) then
a=(2.0d0/pi)*dsin(a1*pi)
if (kode.eq.2) a=a*dexp(-x)
c . change sign to take into account that sin((a1+k)*pi)
C . changes sign with k
if (nn.ge.2) call dscal(nn/2,-1.0d0,y(2),2)
call daxpy(nn,a,y,1,w,1)
elseif (ierr.eq.2) then
call dset(nn,inf,w,1)
elseif (ierr.eq.4) then
call dset(nn,inf-inf,w,1)
endif
c . store the result in the correct order
call dcopy(nn,w,-1,y,1)
c . compute for positive value of alpha+k is any (note that x>0)
if (n.gt.nn) then
call dbesi(x,1.0d0-a1,kode,n-nn,y(nn+1),nz,ier)
if (ier.eq.2) call dset(n-nn,inf,y(nn+1),1)
ierr=max(ierr,ier)
endif
endif
end
subroutine dbesiv (x,nx,alpha,na, kode,y,w,ierr)
c Author Serge Steer, Copyright INRIA, 2005
c compute besseli function for x and alpha given by vectors
c w : working array of size 2*na (used only if nz>0 and alpha contains negative
C values
double precision x(nx),alpha(na),y(*),w(*)
integer kode,nx,na,ier
double precision e,dlamch,w1,eps
eps=dlamch('p')
ierr=0
if (na.lt.0) then
c . element wise case x and alpha are supposed to have the same size
do i=1,nx
call dbesig (x(i), alpha(i),kode,1,y(i), nz, w1,ier)
ierr=max(ierr,ier)
enddo
elseif (na.eq.1) then
c . element wise case x and alpha are supposed to have the same size
do i=1,nx
call dbesig (x(i), alpha(1),kode,1,y(i), nz, w1,ier)
ierr=max(ierr,ier)
enddo
else
c . compute besseli(x(i),y(j)), i=1,nx,j=1,na
j0=1
05 n=0
10 n=n+1
j=j0+n
if (j.le.na.and.abs((1+alpha(j-1))-alpha(j)).le.eps) then
goto 10
endif
do i=1,nx
call dbesig(x(i),alpha(j0),kode,n, w, nz, w(na+1),ier)
ierr=max(ierr,ier)
call dcopy(n,w,1,y(i+(j0-1)*nx),nx)
enddo
j0=j
if (j0.le.na) goto 05
endif
end
|
source("calculate/distance.r")
source("calculate/norms.r")
# Renderable data.frame of nearest neighbours
neighbours_table <- function(word, distance_type, count, radius) {
if (radius <= 0) { radius = Inf }
mx = distance_matrix(vector_for_word(word), matrix_for_words(all_words), distance_type)
argsort <- order(mx)
nearest_idxs <- argsort[2:(count+1)] # Skip the guaranteed nearest neighbour: the word itself
nearest_words <- all_words[nearest_idxs]
nearest_distances <- mx[nearest_idxs]
if (!is.infinite(radius)) {
nearest_distances <- nearest_distances[nearest_distances <= radius]
nearest_words <- nearest_words[1:length(nearest_distances)]
}
table <- data.frame(order = 1:length(nearest_words),
Concept = nearest_words,
distance = nearest_distances)
names(table) <- c("Order", "Concept", distance_col_name(distance_type))
return(table)
}
|
Formal statement is: lemma isCont_Lb_Ub: fixes f :: "real \<Rightarrow> real" assumes "a \<le> b" "\<forall>x. a \<le> x \<and> x \<le> b \<longrightarrow> isCont f x" shows "\<exists>L M. (\<forall>x. a \<le> x \<and> x \<le> b \<longrightarrow> L \<le> f x \<and> f x \<le> M) \<and> (\<forall>y. L \<le> y \<and> y \<le> M \<longrightarrow> (\<exists>x. a \<le> x \<and> x \<le> b \<and> (f x = y)))" Informal statement is: If $f$ is continuous on the closed interval $[a,b]$, then $f$ attains its maximum and minimum on $[a,b]$.
|
import cv2
import numpy as np
import time
color_map = {
"Animal" : (64, 128, 64 ),
"Archway" : (192, 0, 128 ),
"Bicyclist" : (0, 128, 192 ),
"Bridge" : (0, 128, 64 ),
"Building" : (128, 0, 0 ),
"Car" : (64, 0, 128 ),
"CartLuggagePram" : (64, 0, 192 ),
"Child" : (192, 128, 64 ),
"Column_Pole" : (192, 192, 128),
"Fence" : (64, 64, 128 ),
"LaneMkgsDriv" : (128, 0, 192 ),
"LaneMkgsNonDriv" : (192, 0, 64 ),
"Misc_Text" : (128, 128, 64 ),
"MotorcycleScooter" : (192, 0, 192 ),
"OtherMoving" : (128, 64, 64 ),
"ParkingBlock" : (64, 192, 128 ),
"Pedestrian" : (64, 64, 0 ),
"Road" : (128, 64, 128 ),
"RoadShoulder" : (128, 128, 192),
"Sidewalk" : (0, 0, 192 ),
"SignSymbol" : (192, 128, 128),
"Sky" : (128, 128, 128),
"SUVPickupTruck" : (64, 128, 192 ),
"TrafficCone" : (0, 0, 64 ),
"TrafficLight" : (0, 64, 64 ),
"Train" : (192, 64, 128 ),
"Tree" : (128, 128, 0 ),
"Truck_Bus" : (192, 128, 192),
"Tunnel" : (64, 0, 64 ),
"VegetationMisc" : (192, 192, 0 ),
"Void" : (0, 0, 0 ),
"Wall" : (64, 192, 0 ),
}
img = cv2.imread('result_label.png', cv2.IMREAD_GRAYSCALE)
result = np.zeros((img.shape[0], img.shape[1], 3))
start = time.time()
for i, v in enumerate(color_map.values()):
result[img == i] = v
if np.any(img==i):
print(i, list(color_map.items())[i])
print(time.time()-start)
result = cv2.cvtColor(np.array(result, dtype=np.uint8), cv2.COLOR_RGB2BGR)
cv2.imwrite("result_color.png", result)
|
# Fishbone-Moncrief Initial Data
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
[comment]: <> (Notebook Status and Validation Notes: TODO)
### NRPy+ Source Code for this module: [FishboneMoncriefID/FishboneMoncriefID.py](../edit/FishboneMoncriefID/FishboneMoncriefID.py)
## Introduction:
This goal of this module will be to construct Fishbone-Moncrief initial data for GRMHD simulations in a format suitable for the Einstein Toolkit (ETK). We will be using the equations as derived in [the original paper](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1976ApJ...207..962F&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf), which will hereafter be called "***the FM paper***". Since we want to use this with the ETK, our final result will be in Cartesian coordinates. The natural coordinate system for these data is spherical, however, so we will use [reference_metric.py](../edit/reference_metric.py) ([**Tutorial**](Tutorial-Reference_Metric.ipynb)) to help with the coordinate transformation.
This notebook documents the equations in the NRPy+ module [FishboneMoncrief.py](../edit/FishboneMoncriefID/FishboneMoncriefID.py). Then, we will build an Einstein Toolkit [thorn](Tutorial-ETK_thorn-FishboneMoncriefID.ipynb) to set this initial data.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
1. [Step 2](#fishbonemoncrief): Implementing Fishbone-Moncrief initial data within NRPy+
1. [Step 2.a](#registergridfunctions): Register within NRPy+ needed gridfunctions and initial parameters
1. [Step 2.b](#l_of_r): Specific angular momentum $l(r)$
1. [Step 2.c](#enthalpy): Specific enthalpy $h$
1. [Step 2.d](#pressure_density): Pressure and density, from the specific enthalpy
1. [Step 2.e](#covariant_velocity): Nonzero covariant velocity components $u_\mu$
1. [Step 2.f](#inverse_bl_metric): Inverse metric $g^{\mu\nu}$ for the black hole in Boyer-Lindquist coordinates
1. [Step 2.g](#xform_to_ks): Transform components of four-veloicty $u^\mu$ to Kerr-Schild
1. [Step 2.h](#ks_metric): Define Kerr-Schild metric $g_{\mu\nu}$ and extrinsic curvature $K_{ij}$
1. [Step 2.i](#magnetic_field): Seed poloidal magnetic field $B^i$
1. [Step 2.j](#adm_metric): Set the ADM quantities $\alpha$, $\beta^i$, and $\gamma_{ij}$ from the spacetime metric $g_{\mu\nu}$
1. [Step 2.k](#magnetic_field_comoving_frame): Set the magnetic field components in the comoving frame $b^\mu$, and $b^2$, which is twice the magnetic pressure
1. [Step 2.l](#lorentz_fac_valencia): Lorentz factor $\Gamma = \alpha u^0$ and Valencia 3-velocity $v^i_{(n)}$
1. [Step 3](#output_to_c): Output SymPy expressions to C code, using NRPy+
1. [Step 4](#code_validation): Code Validation against Code Validation against `FishboneMoncriefID.FishboneMoncriefID` NRPy+ module NRPy+ module
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
We begin by importing the packages and NRPy+ modules that we will need. We will also set some of the most commonly used parameters.
```python
# Step 1a: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
import loop
import reference_metric as rfm
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
#Set the spatial dimension parameter to 3.
par.set_parval_from_str("grid::DIM", 3)
DIM = par.parval_from_str("grid::DIM")
thismodule = "FishboneMoncriefID"
```
<a id='fishbonemoncrief'></a>
# Step 2: The Fishbone-Moncrief Initial Data Prescription \[Back to [top](#toc)\]
$$\label{fishbonemoncrief}$$
With NRPy's most important functions now available to us, we can start to set up the rest of the tools we will need to build the initial data.
<a id='registergridfunctions'></a>
## Step 2.a: Register within NRPy+ needed gridfunctions and initial parameters \[Back to [top](#toc)\]
$$\label{registergridfunctions}$$
We will now register the gridfunctions we expect to use. Critically, we register the physical metric and extrinsic curvature tensors.
```python
gPhys4UU = ixp.register_gridfunctions_for_single_rank2("AUX","gPhys4UU", "sym01", DIM=4)
KDD = ixp.register_gridfunctions_for_single_rank2("EVOL","KDD", "sym01")
# Variables needed for initial data given in spherical basis
r, th, ph = gri.register_gridfunctions("AUX",["r","th","ph"])
r_in,r_at_max_density,a,M = par.Cparameters("REAL",thismodule,
["r_in","r_at_max_density", "a","M"],
[ 6.0, 12.0, 0.9375,1.0])
kappa,gamma = par.Cparameters("REAL",thismodule,["kappa","gamma"], [1.0e-3, 4.0/3.0])
LorentzFactor = gri.register_gridfunctions("AUX","LorentzFactor")
```
<a id='l_of_r'></a>
## Step 2.b: Specific angular momentum $l(r)$ \[Back to [top](#toc)\]
$$\label{l_of_r}$$
Now, we can begin actually building the ID equations. We will start with the value of the angular momentum $l$ at the position $r \equiv$`r_at_max_density` where the density is at a maximum, as in equation 3.8 of the FM paper:
\begin{align}
l(r) &= \pm \left( \frac{M}{r^3} \right) ^{1/2}
\left[ \frac{r^4+r^2a^2-2Mra^2 \mp a(Mr)^{1/2}(r^2-a^2)}
{r^2 -3Mr \pm 2a(Mr)^{1/2}} \right].
\end{align}
```python
def calculate_l_at_r(r):
l = sp.sqrt(M/r**3) * (r**4 + r**2*a**2 - 2*M*r*a**2 - a*sp.sqrt(M*r)*(r**2-a**2))
l /= r**2 - 3*M*r + 2*a*sp.sqrt(M*r)
return l
# First compute angular momentum at r_at_max_density, TAKING POSITIVE ROOT. This way disk is co-rotating with black hole
# Eq 3.8:
l = calculate_l_at_r(r_at_max_density)
```
<a id='enthalpy'></a>
## Step 2.c: Specific enthalpy $h$ \[Back to [top](#toc)\]
$$\label{enthalpy}$$
Next, we will follow equation 3.6 of the FM paper to compute the enthalpy $h$ by first finding its logarithm $\ln h$. Fortunately, we can make this process quite a bit simpler by first identifying the common subexpressions. Let
\begin{align}
\Delta &= r^2 - 2Mr + a^2 \\
\Sigma &= r^2 + a^2 \cos^2 (\theta) \\
A &= (r^2+a^2)^2 - \Delta a^2 \sin^2(\theta);
\end{align}
furthermore, let
\begin{align}
\text{tmp3} &= \sqrt{\frac{1 + 4 l^2 \Sigma^2 \Delta}{A \sin^2 (\theta)}}. \\
\end{align}
(These terms reflect the radially-independent part of the log of the enthalpy, `ln_h_const`.)
So,
$$
{\rm ln\_h\_const} = \frac{1}{2} * \log \left( \frac{1+\text{tmp3}}{\Sigma \Delta/A} \right) - \frac{1}{2} \text{tmp3} - \frac{2aMrl}{A}
$$
```python
# Eq 3.6:
# First compute the radially-independent part of the log of the enthalpy, ln_h_const
Delta = r**2 - 2*M*r + a**2
Sigma = r**2 + a**2*sp.cos(th)**2
A = (r**2 + a**2)**2 - Delta*a**2*sp.sin(th)**2
# Next compute the radially-dependent part of log(enthalpy), ln_h
tmp3 = sp.sqrt(1 + 4*l**2*Sigma**2*Delta/(A*sp.sin(th))**2)
# Term 1 of Eq 3.6
ln_h = sp.Rational(1,2)*sp.log( ( 1 + tmp3) / (Sigma*Delta/A))
# Term 2 of Eq 3.6
ln_h -= sp.Rational(1,2)*tmp3
# Term 3 of Eq 3.6
ln_h -= 2*a*M*r*l/A
```
Additionally, let
\begin{align}
\Delta_{\rm in} &= r_{\rm in}^2 - 2Mr_{\rm in} + a^2 \\
\Sigma_{\rm in} &= r_{\rm in}^2 + a^2 \cos^2 (\pi/2) \\
A_{\rm in} &= (r_{\rm in}^2+a^2)^2 - \Delta_{\rm in} a^2 \sin^2(\pi/2)
\end{align}
and
\begin{align}
\text{tmp3in} &= \sqrt{\frac{1 + 4 l^2 \Sigma_{\rm in}^2 \Delta_{\rm in}}{A_{\rm in} \sin^2 (\theta)}}, \\
\end{align}
corresponding to the radially Independent part of log(enthalpy), $\ln h$:
\begin{align}
{\rm mln\_h\_in} = -\frac{1}{2} * \log \left( \frac{1+\text{tmp3in}}{\Sigma_{\rm in} \Delta_{\rm in}/A_{\rm in}} \right) + \frac{1}{2} \text{tmp3in} + \frac{2aMr_{\rm in}l}{A_{\rm in}}. \\
\end{align}
(Note that there is some typo in the expression for these terms given in Eq 3.6, so we opt to just evaluate negative of the first three terms at r=`r_in` and th=pi/2 (the integration constant), as described in the text below Eq. 3.6.)
So, then, we exponentiate:
\begin{align}
\text{hm1} \equiv h-1 &= e^{{\rm ln\_h}+{\rm mln\_h\_in}}-1. \\
\end{align}
```python
# Next compute the radially-INdependent part of log(enthalpy), ln_h
# Note that there is some typo in the expression for these terms given in Eq 3.6, so we opt to just evaluate
# negative of the first three terms at r=r_in and th=pi/2 (the integration constant), as described in
# the text below Eq. 3.6, basically just copying the above lines of code.
# Delin = Delta_in ; Sigin = Sigma_in ; Ain = A_in .
Delin = r_in**2 - 2*M*r_in + a**2
Sigin = r_in**2 + a**2*sp.cos(sp.pi/2)**2
Ain = (r_in**2 + a**2)**2 - Delin*a**2*sp.sin(sp.pi/2)**2
tmp3in = sp.sqrt(1 + 4*l**2*Sigin**2*Delin/(Ain*sp.sin(sp.pi/2))**2)
# Term 4 of Eq 3.6
mln_h_in = -sp.Rational(1,2)*sp.log( ( 1 + tmp3in) / (Sigin*Delin/Ain))
# Term 5 of Eq 3.6
mln_h_in += sp.Rational(1,2)*tmp3in
# Term 6 of Eq 3.6
mln_h_in += 2*a*M*r_in*l/Ain
hm1 = sp.exp(ln_h + mln_h_in) - 1
```
<a id='pressure_density'></a>
## Step 2.d: Pressure and density, from the specific enthalpy \[Back to [top](#toc)\]
$$\label{pressure_density}$$
Python 3.4 + SymPy 1.0.0 has a serious problem taking the power here; it hangs forever, so instead we use the identity $x^{1/y} = \exp(\frac{1}{y} * \log(x))$. Thus, our expression for density becomes (in Python 2.7 + SymPy 0.7.4.1):
\begin{align}
\rho_0 &= \left( \frac{(h-1)(\gamma-1)}{\kappa \gamma} \right)^{1/(\gamma-1)} \\
&= \exp \left[ {\frac{1}{\gamma-1} \log \left( \frac{(h-1)(\gamma-1)}{\kappa \gamma}\right)} \right]
\end{align}
Additionally, the pressure $P_0 = \kappa \rho_0^\gamma$
```python
rho_initial,Pressure_initial = gri.register_gridfunctions("AUX",["rho_initial","Pressure_initial"])
# Python 3.4 + sympy 1.0.0 has a serious problem taking the power here, hangs forever.
# so instead we use the identity x^{1/y} = exp( [1/y] * log(x) )
# Original expression (works with Python 2.7 + sympy 0.7.4.1):
# rho_initial = ( hm1*(gamma-1)/(kappa*gamma) )**(1/(gamma - 1))
# New expression (workaround):
rho_initial = sp.exp( (1/(gamma-1)) * sp.log( hm1*(gamma-1)/(kappa*gamma) ))
Pressure_initial = kappa * rho_initial**gamma
```
<a id='covariant_velocity'></a>
## Step 2.e: Nonzero covariant velocity components $u_\mu$ \[Back to [top](#toc)\]
$$\label{covariant_velocity}$$
We now want to compute eq 3.3; we will start by finding $e^{-2 \chi}$ in Boyer-Lindquist (BL) coordinates. By eq 2.16, $\chi = \psi - \nu$, so, by eqs. 3.5,
\begin{align}
e^{2 \nu} &= \frac{\Sigma \Delta}{A} \\
e^{2 \psi} &= \frac{A \sin^2 \theta}{\Sigma} \\
e^{-2 \chi} &= e^{2 \nu} / e^{2 \psi} = e^{2(\nu - \psi)}.
\end{align}
Next, we will calculate the 4-velocity $u_i$ of the fluid disk in BL coordinates. We start with eqs. 3.3 and 2.13, finding
\begin{align}
u_{(r)} = u_{(\theta)} &= 0 \\
u_{(\phi)} &= \sqrt{-1+ \frac{1}{2}\sqrt{1 + 4l^2e^{-2 \chi}}} \\
u_{(t)} &= - \sqrt{1 + u_{(\phi)}^2}.
\end{align}
Given that $\omega = 2aMr/A$, we then find that, in BL coordinates,
\begin{align}
u_r = u_{\theta} &= 0 \\
u_{\phi} &= u_{(\phi)} \sqrt{e^{2 \psi}} \\
u_t &= u_{(t)} \sqrt{e^{2 \nu}} - \omega u_{\phi},
\end{align}
using eq. 2.13 to get the last relation.
```python
# Eq 3.3: First compute exp(-2 chi), assuming Boyer-Lindquist coordinates
# Eq 2.16: chi = psi - nu, so
# Eq 3.5 -> exp(-2 chi) = exp(-2 (psi - nu)) = exp(2 nu)/exp(2 psi)
exp2nu = Sigma*Delta / A
exp2psi = A*sp.sin(th)**2 / Sigma
expm2chi = exp2nu / exp2psi
# Eq 3.3: Next compute u_(phi).
u_pphip = sp.sqrt((-1 + sp.sqrt(1 + 4*l**2*expm2chi))/2)
# Eq 2.13: Compute u_(t)
u_ptp = -sp.sqrt(1 + u_pphip**2)
# Next compute spatial components of 4-velocity in Boyer-Lindquist coordinates:
uBL4D = ixp.zerorank1(DIM=4) # Components 1 and 2: u_r = u_theta = 0
# Eq 2.12 (typo): u_(phi) = e^(-psi) u_phi -> u_phi = e^(psi) u_(phi)
uBL4D[3] = sp.sqrt(exp2psi)*u_pphip
# Assumes Boyer-Lindquist coordinates:
omega = 2*a*M*r/A
# Eq 2.13: u_(t) = 1/sqrt(exp2nu) * ( u_t + omega*u_phi )
# --> u_t = u_(t) * sqrt(exp2nu) - omega*u_phi
# --> u_t = u_ptp * sqrt(exp2nu) - omega*uBL4D[3]
uBL4D[0] = u_ptp*sp.sqrt(exp2nu) - omega*uBL4D[3]
```
<a id='inverse_bl_metric'></a>
## Step 2.f: Inverse metric $g^{\mu\nu}$ for the black hole in Boyer-Lindquist coordinates \[Back to [top](#toc)\]
$$\label{inverse_bl_metric}$$
Next, we will use eq. 2.1 to find the inverse physical (as opposed to conformal) metric in BL coordinates, using the shorthands defined in eq. 3.5:
\begin{align}
g_{tt} &= - \frac{\Sigma \Delta}{A} + \omega^2 \sin^2 \theta \frac{A}{\Sigma} \\
g_{t \phi} = g_{\phi t} &= - \omega \sin^2 \theta \frac{A}{\Sigma} \\
g_{\phi \phi} &= \sin^2 \theta \frac{A}{\Sigma},
\end{align}
which can be inverted to show that
\begin{align}
g^{tt} &= - \frac{A}{\Delta \Sigma} \\
g^{t \phi} = g^{\phi t} &= \frac{2aMr}{\Delta \Sigma} \\
g^{\phi \phi} &= - \frac{4a^2M^2r^2}{\Delta A \Sigma} + \frac{\Sigma^2}{A \Sigma \sin^2 \theta}.
\end{align}
With this, we will now be able to raise the index on the BL $u_i$: $u^i = g^{ij} u_j$
```python
# Eq. 3.5:
# w = 2*a*M*r/A;
# Eqs. 3.5 & 2.1:
# gtt = -Sig*Del/A + w^2*Sin[th]^2*A/Sig;
# gtp = w*Sin[th]^2*A/Sig;
# gpp = Sin[th]^2*A/Sig;
# FullSimplify[Inverse[{{gtt,gtp},{gtp,gpp}}]]
gPhys4BLUU = ixp.zerorank2(DIM=4)
gPhys4BLUU[0][0] = -A/(Delta*Sigma)
# DO NOT NEED TO SET gPhys4BLUU[1][1] or gPhys4BLUU[2][2]!
gPhys4BLUU[0][3] = gPhys4BLUU[3][0] = -2*a*M*r/(Delta*Sigma)
gPhys4BLUU[3][3] = -4*a**2*M**2*r**2/(Delta*A*Sigma) + Sigma**2/(A*Sigma*sp.sin(th)**2)
uBL4U = ixp.zerorank1(DIM=4)
for i in range(4):
for j in range(4):
uBL4U[i] += gPhys4BLUU[i][j]*uBL4D[j]
```
<a id='xform_to_ks'></a>
## Step 2.g: Transform components of four-velocity $u^\mu$ to Kerr-Schild \[Back to [top](#toc)\]
$$\label{xform_to_ks}$$
Now, we will transform the 4-velocity from the Boyer-Lindquist to the Kerr-Schild basis. This algorithm is adapted from [HARM](https://github.com/atchekho/harmpi/blob/master/init.c). This definees the tensor `transformBLtoKS`, where the diagonal elements are $1$, and the non-zero off-diagonal elements are
\begin{align}
\text{transformBLtoKS}_{tr} &= \frac{2r}{r^2-2r+a^2} \\
\text{transformBLtoKS}_{\phi r} &= \frac{a}{r^2-2r+a^2} \\
\end{align}
```python
# https://github.com/atchekho/harmpi/blob/master/init.c
# Next transform Boyer-Lindquist velocity to Kerr-Schild basis:
transformBLtoKS = ixp.zerorank2(DIM=4)
for i in range(4):
transformBLtoKS[i][i] = 1
transformBLtoKS[0][1] = 2*r/(r**2 - 2*r + a*a)
transformBLtoKS[3][1] = a/(r**2 - 2*r + a*a)
#uBL4U = ixp.declarerank1("UBL4U",DIM=4)
# After the xform below, print(uKS4U) outputs:
# [UBL4U0 + 2*UBL4U1*r/(a**2 + r**2 - 2*r), UBL4U1, UBL4U2, UBL4U1*a/(a**2 + r**2 - 2*r) + UBL4U3]
uKS4U = ixp.zerorank1(DIM=4)
for i in range(4):
for j in range(4):
uKS4U[i] += transformBLtoKS[i][j]*uBL4U[j]
```
<a id='ks_metric'></a>
## Step 2.h: Define Kerr-Schild metric $g_{\mu\nu}$ and extrinsic curvature $K_{ij}$ \[Back to [top](#toc)\]
$$\label{ks_metric}$$
We will also adopt the Kerr-Schild metric for Fishbone-Moncrief disks. Further details can be found in [Cook's Living Review](http://gravity.psu.edu/numrel/jclub/jc/Cook___LivRev_2000-5.pdf) article on initial data, or in the appendix of [this](https://arxiv.org/pdf/1704.00599.pdf) article. So, in KS coordinates,
\begin{align}
\rho^2 &= r^2 + a^2 \cos^2 \theta \\
\Delta &= r^2 - 2Mr + a^2 \\
\alpha &= \left(1 + \frac{2Mr}{\rho^2}\right)^{-2} \\
\beta^0 &= \frac{2 \alpha^2 Mr}{\rho^2} \\
\gamma_{00} &= 1 + \frac{2Mr}{\rho^2} \\
\gamma_{02} = \gamma_{20} &= -\left(1+\frac{2Mr}{\rho^2}\right) a \sin^2 \theta \\
\gamma_{11} &= \rho^2 \\
\gamma_{22} &= \left(r^2+a^2+\frac{2Mr}{\rho^2} a^2 \sin^2 \theta\right) \sin^2 \theta.
\end{align}
(Note that only the non-zero components of $\beta^i$ and $\gamma_{ij}$ are defined here.)
```python
# Adopt the Kerr-Schild metric for Fishbone-Moncrief disks
# http://gravity.psu.edu/numrel/jclub/jc/Cook___LivRev_2000-5.pdf
# Alternatively, Appendix of https://arxiv.org/pdf/1704.00599.pdf
rhoKS2 = r**2 + a**2*sp.cos(th)**2 # Eq 79 of Cook's Living Review article
DeltaKS = r**2 - 2*M*r + a**2 # Eq 79 of Cook's Living Review article
alphaKS = 1/sp.sqrt(1 + 2*M*r/rhoKS2)
betaKSU = ixp.zerorank1()
betaKSU[0] = alphaKS**2*2*M*r/rhoKS2
gammaKSDD = ixp.zerorank2()
gammaKSDD[0][0] = 1 + 2*M*r/rhoKS2
gammaKSDD[0][2] = gammaKSDD[2][0] = -(1 + 2*M*r/rhoKS2)*a*sp.sin(th)**2
gammaKSDD[1][1] = rhoKS2
gammaKSDD[2][2] = (r**2 + a**2 + 2*M*r/rhoKS2 * a**2*sp.sin(th)**2) * sp.sin(th)**2
```
We can also define the following useful quantities, continuing in KS coordinates:
\begin{align}
A &= a^2 \cos (2 \theta) + a^2 +2r^2 \\
B &= A + 4Mr \\
D &= \sqrt{\frac{2Mr}{a^2 \cos^2 \theta +r^2}+1};
\end{align}
we will also define the extrinsic curvature:
\begin{align}
K_{00} &= D\frac{A+2Mr}{A^2 B} (4M(a^2 \cos(2 \theta)+a^s-2r^2)) \\
K_{01} = K_{10} &= \frac{D}{AB} (8a^2Mr\sin \theta \cos \theta) \\
K_{02} = K_{20} &= \frac{D}{A^2} (-2aM \sin^2 \theta (a^2\cos(2 \theta)+a^2-2r^2)) \\
K_{11} &= \frac{D}{B} (4Mr^2) \\
K_{12} = K_{21} &= \frac{D}{AB} (-8a^3Mr \sin^3 \theta \cos \theta) \\
K_{22} &= \frac{D}{A^2 B} (2Mr \sin^2 \theta (a^4(r-M) \cos(4 \theta) + a^4 (M+3r) + 4a^2 r^2 (2r-M) + 4a^2 r \cos(2 \theta) (a^2 + r(M+2r)) + 8r^5)). \\
\end{align}
Note that the indexing for extrinsic curvature only runs from 0 to 2, since there are no time components to the tensor.
```python
AA = a**2 * sp.cos(2*th) + a**2 + 2*r**2
BB = AA + 4*M*r
DD = sp.sqrt(2*M*r / (a**2 * sp.cos(th)**2 + r**2) + 1)
KDD[0][0] = DD*(AA + 2*M*r)/(AA**2*BB) * (4*M*(a**2 * sp.cos(2*th) + a**2 - 2*r**2))
KDD[0][1] = KDD[1][0] = DD/(AA*BB) * 8*a**2*M*r*sp.sin(th)*sp.cos(th)
KDD[0][2] = KDD[2][0] = DD/AA**2 * (-2*a*M*sp.sin(th)**2 * (a**2 * sp.cos(2*th) + a**2 - 2*r**2))
KDD[1][1] = DD/BB * 4*M*r**2
KDD[1][2] = KDD[2][1] = DD/(AA*BB) * (-8*a**3*M*r*sp.sin(th)**3*sp.cos(th))
KDD[2][2] = DD/(AA**2*BB) * \
(2*M*r*sp.sin(th)**2 * (a**4*(r-M)*sp.cos(4*th) + a**4*(M+3*r) +
4*a**2*r**2*(2*r-M) + 4*a**2*r*sp.cos(2*th)*(a**2 + r*(M+2*r)) + 8*r**5))
```
We must also compute the inverse and determinant of the KS metric. We can use the NRPy+ [indexedexp.py](../edit/indexedexp.py) function to do this easily for the inverse physical 3-metric $\gamma^{ij}$, and then use the lapse $\alpha$ and the shift $\beta^i$ to find the full, inverse 4-dimensional metric, $g^{ij}$. We use the general form relating the 3- and 4- metric from (B&S 2.122)
\begin{equation}
g_{\mu\nu} = \begin{pmatrix}
-\alpha^2 + \beta\cdot\beta & \beta_i \\
\beta_j & \gamma_{ij}
\end{pmatrix},
\end{equation}
and invert it. That is,
\begin{align}
g^{00} &= -\frac{1}{\alpha^2} \\
g^{0i} = g^{i0} &= \frac{\beta^{i-1}}{\alpha^2} \\
g^{ij} = g^{ji} &= \gamma^{(i-1) (j-1)} - \frac{\beta^{i-1} \beta^{j-1}}{\alpha^2},
\end{align}
keeping careful track of the differences in the indexing conventions for 3-dimensional quantities and 4-dimensional quantities (Python always indexes lists from 0, but in four dimensions, the 0 direction corresponds to time, while in 3+1, the connection to time is handled by other variables).
```python
# For compatibility, we must compute gPhys4UU
gammaKSUU,gammaKSDET = ixp.symm_matrix_inverter3x3(gammaKSDD)
# See, e.g., Eq. 4.49 of https://arxiv.org/pdf/gr-qc/0703035.pdf , where N = alpha
gPhys4UU[0][0] = -1 / alphaKS**2
for i in range(1,4):
if i>0:
# if the quantity does not have a "4", then it is assumed to be a 3D quantity.
# E.g., betaKSU[] is a spatial vector, with indices ranging from 0 to 2:
gPhys4UU[0][i] = gPhys4UU[i][0] = betaKSU[i-1]/alphaKS**2
for i in range(1,4):
for j in range(1,4):
# if the quantity does not have a "4", then it is assumed to be a 3D quantity.
# E.g., betaKSU[] is a spatial vector, with indices ranging from 0 to 2,
# and gammaKSUU[][] is a spatial tensor, with indices again ranging from 0 to 2.
gPhys4UU[i][j] = gPhys4UU[j][i] = gammaKSUU[i-1][j-1] - betaKSU[i-1]*betaKSU[j-1]/alphaKS**2
```
<a id='magnetic_field'></a>
## Step 2.i: Seed poloidal magnetic field $B^i$ \[Back to [top](#toc)\]
$$\label{magnetic_field}$$
The original Fishbone-Moncrief initial data prescription describes a non-self-gravitating accretion disk in hydrodynamical equilibrium about a black hole. The following assumes that a very weak magnetic field seeded into this disk will not significantly disturb this equilibrium, at least on a dynamical (free-fall) timescale.
Now, we will set up the magnetic field that, when simulated with a GRMHD code, will give us insight into the electromagnetic emission from the disk. We define the vector potential $A_i$ to be proportional to $\rho_0$, and, as usual, let the magnetic field $B^i$ be the curl of the vector potential.
```python
A_b = par.Cparameters("REAL",thismodule,"A_b",1.0)
A_3vecpotentialD = ixp.zerorank1()
# Set A_phi = A_b*rho_initial FIXME: why is there a sign error?
A_3vecpotentialD[2] = -A_b * rho_initial
BtildeU = ixp.register_gridfunctions_for_single_rank1("EVOL","BtildeU")
# Eq 15 of https://arxiv.org/pdf/1501.07276.pdf:
# B = curl A -> B^r = d_th A_ph - d_ph A_th
BtildeU[0] = sp.diff(A_3vecpotentialD[2],th) - sp.diff(A_3vecpotentialD[1],ph)
# B = curl A -> B^th = d_ph A_r - d_r A_ph
BtildeU[1] = sp.diff(A_3vecpotentialD[0],ph) - sp.diff(A_3vecpotentialD[2],r)
# B = curl A -> B^ph = d_r A_th - d_th A_r
BtildeU[2] = sp.diff(A_3vecpotentialD[1],r) - sp.diff(A_3vecpotentialD[0],th)
```
<a id='adm_metric'></a>
## Step 2.j: Set the ADM quantities $\alpha$, $\beta^i$, and $\gamma_{ij}$ from the spacetime metric $g_{\mu\nu}$ \[Back to [top](#toc)\]
$$\label{adm_metric}$$
Now, we wish to build the 3+1-dimensional variables in terms of the inverse 4-dimensional spacetime metric $g^{ij},$ as demonstrated in eq. 4.49 of [Gourgoulhon's lecture notes on 3+1 formalisms](https://arxiv.org/pdf/gr-qc/0703035.pdf) (letting $N=\alpha$). So,
\begin{align}
\alpha &= \sqrt{-\frac{1}{g^{00}}} \\
\beta^i &= \alpha^2 g^{0 (i+1)} \\
\gamma^{ij} &= g^{(i+1) (j+1)} + \frac{\beta^i \beta_j}{\alpha^2},
\end{align}
again keeping careful track of the differences in the indexing conventions for 3-dimensional quantities and 4-dimensional quantities. We will also take the inverse of $\gamma^{ij}$, obtaining (naturally) $\gamma_{ij}$ and its determinant $|\gamma|$. (Note that the function we use gives the determinant of $\gamma^{ij}$, which is the reciprocal of $|\gamma|$.)
```python
# Construct spacetime metric in 3+1 form:
# See, e.g., Eq. 4.49 of https://arxiv.org/pdf/gr-qc/0703035.pdf , where N = alpha
alpha = gri.register_gridfunctions("EVOL",["alpha"])
betaU = ixp.register_gridfunctions_for_single_rank1("EVOL","betaU")
alpha = sp.sqrt(1/(-gPhys4UU[0][0]))
betaU = ixp.zerorank1()
for i in range(3):
betaU[i] = alpha**2 * gPhys4UU[0][i+1]
gammaUU = ixp.zerorank2()
for i in range(3):
for j in range(3):
gammaUU[i][j] = gPhys4UU[i+1][j+1] + betaU[i]*betaU[j]/alpha**2
gammaDD = ixp.register_gridfunctions_for_single_rank2("EVOL","gammaDD","sym01")
gammaDD,igammaDET = ixp.symm_matrix_inverter3x3(gammaUU)
gammaDET = 1/igammaDET
```
Now, we will lower the index on the shift vector $\beta_j = \gamma_{ij} \beta^i$ and use that to calculate the 4-dimensional metric tensor, $g_{ij}$. So, we have
\begin{align}
g_{00} &= -\alpha^2 + \beta^2 \\
g_{0 (i+1)} = g_{(i+1) 0} &= \beta_i \\
g_{(i+1) (j+1)} &= \gamma_{ij},
\end{align}
where $\beta^2 \equiv \beta^i \beta_i$.
```python
###############
# Next compute g_{\alpha \beta} from lower 3-metric, using
# Eq 4.47 of https://arxiv.org/pdf/gr-qc/0703035.pdf
betaD = ixp.zerorank1()
for i in range(3):
for j in range(3):
betaD[i] += gammaDD[i][j]*betaU[j]
beta2 = sp.sympify(0)
for i in range(3):
beta2 += betaU[i]*betaD[i]
gPhys4DD = ixp.zerorank2(DIM=4)
gPhys4DD[0][0] = -alpha**2 + beta2
for i in range(3):
gPhys4DD[0][i+1] = gPhys4DD[i+1][0] = betaD[i]
for j in range(3):
gPhys4DD[i+1][j+1] = gammaDD[i][j]
```
<a id='magnetic_field_comoving_frame'></a>
## Step 2.k: Set the magnetic field components in the comoving frame $b^\mu$, and $b^2$, which is twice the magnetic pressure \[Back to [top](#toc)\]
$$\label{magnetic_field_comoving_frame}$$
Next compute $b^{\mu}$ using Eqs 23, 24, 27 and 31 of [this paper](https://arxiv.org/pdf/astro-ph/0503420.pdf):
\begin{align}
B^i &= \frac{\tilde{B}}{\sqrt{|\gamma|}} \\
B^0_{(u)} &= \frac{u_{i+1} B^i}{\alpha} \\
b^0 &= \frac{B^0_{(u)}}{\sqrt{4 \pi}} \\
b^{i+1} &= \frac{\frac{B^i}{\alpha} + B^0_{(u)} u^{i+1}}{u^0 \sqrt{4 \pi}}
\end{align}
```python
###############
# Next compute b^{\mu} using Eqs 23 and 31 of https://arxiv.org/pdf/astro-ph/0503420.pdf
uKS4D = ixp.zerorank1(DIM=4)
for i in range(4):
for j in range(4):
uKS4D[i] += gPhys4DD[i][j] * uKS4U[j]
# Eq 27 of https://arxiv.org/pdf/astro-ph/0503420.pdf
BU = ixp.zerorank1()
for i in range(3):
BU[i] = BtildeU[i]/sp.sqrt(gammaDET)
# Eq 23 of https://arxiv.org/pdf/astro-ph/0503420.pdf
BU0_u = sp.sympify(0)
for i in range(3):
BU0_u += uKS4D[i+1]*BU[i]/alpha
smallbU = ixp.zerorank1(DIM=4)
smallbU[0] = BU0_u / sp.sqrt(4 * sp.pi)
# Eqs 24 and 31 of https://arxiv.org/pdf/astro-ph/0503420.pdf
for i in range(3):
smallbU[i+1] = (BU[i]/alpha + BU0_u*uKS4U[i+1])/(sp.sqrt(4*sp.pi)*uKS4U[0])
smallbD = ixp.zerorank1(DIM=4)
for i in range(4):
for j in range(4):
smallbD[i] += gPhys4DD[i][j]*smallbU[j]
smallb2 = sp.sympify(0)
for i in range(4):
smallb2 += smallbU[i]*smallbD[i]
```
<a id='lorentz_fac_valencia'></a>
## Step 2.l: Lorentz factor $\Gamma = \alpha u^0$ and Valencia 3-velocity $v^i_{(n)}$ \[Back to [top](#toc)\]
$$\label{lorentz_fac_valencia}$$
Now, we will define the Lorentz factor ($= \alpha u^0$) and the Valencia 3-velocity $v^i_{(n)}$, which sets the 3-velocity as measured by normal observers to the spatial slice:
\begin{align}
v^i_{(n)} &= \frac{u^i}{u^0 \alpha} + \frac{\beta^i}{\alpha}, \\
\end{align}
as shown in eq 11 of [this](https://arxiv.org/pdf/1501.07276.pdf) paper. We will also compute the product of the square root of the determinant of the 3-metric with the lapse.
```python
###############
LorentzFactor = alpha * uKS4U[0]
# Define Valencia 3-velocity v^i_(n), which sets the 3-velocity as measured by normal observers to the spatial slice:
# v^i_(n) = u^i/(u^0*alpha) + beta^i/alpha. See eq 11 of https://arxiv.org/pdf/1501.07276.pdf
Valencia3velocityU = ixp.zerorank1()
for i in range(3):
Valencia3velocityU[i] = uKS4U[i + 1] / (alpha * uKS4U[0]) + betaU[i] / alpha
sqrtgamma4DET = sp.symbols("sqrtgamma4DET")
sqrtgamma4DET = sp.sqrt(gammaDET)*alpha
```
<a id='output_to_c'></a>
## Step 3: Output above-generated expressions to C code, using NRPy+ \[Back to [top](#toc)\]
$$\label{output_to_c}$$
Finally, we have contructed the underlying expressions necessary for the Fishbone-Moncrief initial data. By means of demonstration, we will use NRPy+'s `FD_outputC()` to print the expressions. (The actual output statements are commented out right now, to save time in testing.)
```python
KerrSchild_CKernel = [\
lhrh(lhs=gri.gfaccess("out_gfs","alpha"),rhs=alpha),\
lhrh(lhs=gri.gfaccess("out_gfs","betaU0"),rhs=betaU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","betaU1"),rhs=betaU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","betaU2"),rhs=betaU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD00"),rhs=gammaDD[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD01"),rhs=gammaDD[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD02"),rhs=gammaDD[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD11"),rhs=gammaDD[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD12"),rhs=gammaDD[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaDD22"),rhs=gammaDD[2][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD00"),rhs=KDD[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD01"),rhs=KDD[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD02"),rhs=KDD[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD11"),rhs=KDD[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD12"),rhs=KDD[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","KDD22"),rhs=KDD[2][2]),\
]
#fin.FD_outputC("stdout",KerrSchild_CKernel)
FMdisk_Lorentz_uUs_CKernel = [\
lhrh(lhs=gri.gfaccess("out_gfs","LorentzFactor"),rhs=LorentzFactor),\
# lhrh(lhs=gri.gfaccess("out_gfs","uKS4U1"),rhs=uKS4U[1]),\
# lhrh(lhs=gri.gfaccess("out_gfs","uKS4U2"),rhs=uKS4U[2]),\
# lhrh(lhs=gri.gfaccess("out_gfs","uKS4U3"),rhs=uKS4U[3]),\
]
#fin.FD_outputC("stdout",FMdisk_Lorentz_uUs_CKernel)
FMdisk_hm1_rho_P_CKernel = [\
# lhrh(lhs=gri.gfaccess("out_gfs","hm1"),rhs=hm1),\
lhrh(lhs=gri.gfaccess("out_gfs","rho_initial"),rhs=rho_initial),\
lhrh(lhs=gri.gfaccess("out_gfs","Pressure_initial"),rhs=Pressure_initial),\
]
#fin.FD_outputC("stdout",FMdisk_hm1_rho_P_CKernel)
udotu = sp.sympify(0)
for i in range(4):
udotu += uKS4U[i]*uKS4D[i]
#NRPy_file_output(OUTDIR+"/standalone-spherical_coords/NRPy_codegen/FMdisk_Btildes.h", [],[],[],
# ID_protected_variables + ["r","th","ph"],
# [],[uKS4U[0], "uKS4Ut", uKS4U[1],"uKS4Ur", uKS4U[2],"uKS4Uth", uKS4U[3],"uKS4Uph",
# uKS4D[0], "uKS4Dt", uKS4D[1],"uKS4Dr", uKS4D[2],"uKS4Dth", uKS4D[3],"uKS4Dph",
# uKS4D[1] * BU[0] / alpha, "Bur", uKS4D[2] * BU[1] / alpha, "Buth", uKS4D[3] * BU[2] / alpha, "Buph",
# gPhys4DD[0][0], "g4DD00", gPhys4DD[0][1], "g4DD01",gPhys4DD[0][2], "g4DD02",gPhys4DD[0][3], "g4DD03",
# BtildeU[0], "BtildeUr", BtildeU[1], "BtildeUth",BtildeU[2], "BtildeUph",
# smallbU[0], "smallbUt", smallbU[1], "smallbUr", smallbU[2], "smallbUth",smallbU[3], "smallbUph",
# smallb2,"smallb2",udotu,"udotu"])
FMdisk_Btildes_CKernel = [\
lhrh(lhs=gri.gfaccess("out_gfs","BtildeU0"),rhs=BtildeU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","BtildeU1"),rhs=BtildeU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","BtildeU2"),rhs=BtildeU[2]),\
]
#fin.FD_outputC("stdout",FMdisk_Btildes_CKernel)
```
We will now use the relationships between coordinate systems provided by [reference_metric.py](../edit/reference_metric.py) to convert our expressions to Cartesian coordinates. See [Tutorial-Reference_Metric](Tutorial-Reference_Metric.ipynb) for more detail.
```python
# Now that all derivatives of ghat and gbar have been computed,
# we may now substitute the definitions r = rfm.xxSph[0], th=rfm.xxSph[1],...
# WARNING: Substitution only works when the variable is not an integer. Hence the if not isinstance(...,...) stuff.
# If the variable isn't an integer, we revert transcendental functions inside to normal variables. E.g., sin(x2) -> sinx2
# Reverting to normal variables in this way makes expressions simpler in NRPy, and enables transcendental functions
# to be pre-computed in SENR.
alpha = alpha.subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
for i in range(DIM):
betaU[i] = betaU[i].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
for j in range(DIM):
gammaDD[i][j] = gammaDD[i][j].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
KDD[i][j] = KDD[i][j].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
# GRMHD variables:
# Density and pressure:
hm1 = hm1.subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
rho_initial = rho_initial.subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
Pressure_initial = Pressure_initial.subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
LorentzFactor = LorentzFactor.subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
# "Valencia" three-velocity
for i in range(DIM):
BtildeU[i] = BtildeU[i].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
uKS4U[i+1] = uKS4U[i+1].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
uBL4U[i+1] = uBL4U[i+1].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
Valencia3velocityU[i] = Valencia3velocityU[i].subs(r,rfm.xxSph[0]).subs(th,rfm.xxSph[1]).subs(ph,rfm.xxSph[2])
```
At last, we will use our reference metric formalism and the Jacobian associated with the two coordinate systems to convert the spherical initial data to Cartesian coordinates. The module reference_metric.py provides us with the definition of $r, \theta, \phi$ in Cartesian coordinates. To find dthe Jacobian to then transform from spherical to Cartesian, we must find the tensor \begin{equation} \frac{\partial x_i}{\partial y_j}, \end{equation} where $x_i \in \{r,\theta,\phi\}$ and $y_i \in \{x,y,z\}$. We will also compute its inverse.
```python
# uUphi = uKS4U[3]
# uUphi = sympify_integers__replace_rthph(uUphi,r,th,ph,rfm.xxSph[0],rfm.xxSph[1],rfm.xxSph[2])
# uUt = uKS4U[0]
# uUt = sympify_integers__replace_rthph(uUt,r,th,ph,rfm.xxSph[0],rfm.xxSph[1],rfm.xxSph[2])
# Transform initial data to our coordinate system:
# First compute Jacobian and its inverse
drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff( rfm.xxSph[0],rfm.xx[1]), sp.diff( rfm.xxSph[0],rfm.xx[2])],
[sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])],
[sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]])
dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv()
# Declare as gridfunctions the final quantities we will output for the initial data
IDalpha = gri.register_gridfunctions("EVOL","IDalpha")
IDgammaDD = ixp.register_gridfunctions_for_single_rank2("EVOL","IDgammaDD","sym01")
IDKDD = ixp.register_gridfunctions_for_single_rank2("EVOL","IDKDD","sym01")
IDbetaU = ixp.register_gridfunctions_for_single_rank1("EVOL","IDbetaU")
IDValencia3velocityU = ixp.register_gridfunctions_for_single_rank1("EVOL","IDValencia3velocityU")
IDalpha = alpha
for i in range(3):
IDbetaU[i] = 0
IDValencia3velocityU[i] = 0
for j in range(3):
# Matrices are stored in row, column format, so (i,j) <-> (row,column)
IDbetaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*betaU[j]
IDValencia3velocityU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*Valencia3velocityU[j]
IDgammaDD[i][j] = 0
IDKDD[i][j] = 0
for k in range(3):
for l in range(3):
IDgammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*gammaDD[k][l]
IDKDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]* KDD[k][l]
# -={ Spacetime quantities: Generate C code from expressions and output to file }=-
KerrSchild_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","IDalpha"),rhs=IDalpha),\
lhrh(lhs=gri.gfaccess("out_gfs","IDbetaU0"),rhs=IDbetaU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDbetaU1"),rhs=IDbetaU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDbetaU2"),rhs=IDbetaU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD00"),rhs=IDgammaDD[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD01"),rhs=IDgammaDD[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD02"),rhs=IDgammaDD[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD11"),rhs=IDgammaDD[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD12"),rhs=IDgammaDD[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDgammaDD22"),rhs=IDgammaDD[2][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD00"),rhs=IDKDD[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD01"),rhs=IDKDD[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD02"),rhs=IDKDD[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD11"),rhs=IDKDD[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD12"),rhs=IDKDD[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDKDD22"),rhs=IDKDD[2][2]),\
]
# -={ GRMHD quantities: Generate C code from expressions and output to file }=-
FMdisk_GRHD_hm1_to_print = [lhrh(lhs=gri.gfaccess("out_gfs","rho_initial"),rhs=rho_initial)]
FMdisk_GRHD_velocities_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","IDValencia3velocityU0"),rhs=IDValencia3velocityU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDValencia3velocityU1"),rhs=IDValencia3velocityU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","IDValencia3velocityU2"),rhs=IDValencia3velocityU[2]),\
]
```
To verify this against the old version of FishboneMoncriefID from the old version of NRPy, we use the `mathematica_code()` output function.
```python
# Comment out debug code for now, to reduce this file's size.
#from mathematica_output import *
# print("ID1alpha = " + sp.mathematica_code(IDalpha) + ";")
# print("ID1beta0 = " + sp.mathematica_code(IDbetaU[0]) + ";")
# print("ID1beta1 = " + sp.mathematica_code(IDbetaU[1]) + ";")
# print("ID1beta2 = " + sp.mathematica_code(IDbetaU[2]) + ";")
# print("ID1gamma00 = " + sp.mathematica_code(IDgammaDD[0][0]) + ";")
# print("ID1gamma01 = " + sp.mathematica_code(IDgammaDD[0][1]) + ";")
# print("ID1gamma02 = " + sp.mathematica_code(IDgammaDD[0][2]) + ";")
# print("ID1gamma11 = " + sp.mathematica_code(IDgammaDD[1][1]) + ";")
# print("ID1gamma12 = " + sp.mathematica_code(IDgammaDD[1][2]) + ";")
# print("ID1gamma22 = " + sp.mathematica_code(IDgammaDD[2][2]) + ";")
# print("ID1K00 = " + sp.mathematica_code(IDKDD[0][0]) + ";")
# print("ID1K01 = " + sp.mathematica_code(IDKDD[0][1]) + ";")
# print("ID1K02 = " + sp.mathematica_code(IDKDD[0][2]) + ";")
# print("ID1K11 = " + sp.mathematica_code(IDKDD[1][1]) + ";")
# print("ID1K12 = " + sp.mathematica_code(IDKDD[1][2]) + ";")
# print("ID1K22 = " + sp.mathematica_code(IDKDD[2][2]) + ";")
# print("hm11 = " + sp.mathematica_code(hm1) + ";")
# print("ID1Valencia3velocityU0 = " + sp.mathematica_code(IDValencia3velocityU[0]) + ";")
# print("ID1Valencia3velocityU1 = " + sp.mathematica_code(IDValencia3velocityU[1]) + ";")
# print("ID1Valencia3velocityU2 = " + sp.mathematica_code(IDValencia3velocityU[2]) + ";")
```
<a id='code_validation'></a>
# Step 4: Code Validation against `FishboneMoncriefID.FishboneMoncriefID` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for these Fishbone-Moncrief initial data between
1. this tutorial and
2. the NRPy+ [FishboneMoncriefID.FishboneMoncriefID](../edit/FishboneMoncriefID/FishboneMoncriefID.py) module.
```python
gri.glb_gridfcs_list = []
import FishboneMoncriefID.FishboneMoncriefID as fmid
fmid.FishboneMoncriefID()
print("IDalpha - fmid.IDalpha = " + str(IDalpha - fmid.IDalpha))
print("rho_initial - fmid.rho_initial = " + str(rho_initial - fmid.rho_initial))
print("hm1 - fmid.hm1 = " + str(hm1 - fmid.hm1))
for i in range(DIM):
print("IDbetaU["+str(i)+"] - fmid.IDbetaU["+str(i)+"] = " + str(IDbetaU[i] - fmid.IDbetaU[i]))
print("IDValencia3velocityU["+str(i)+"] - fmid.IDValencia3velocityU["+str(i)+"] = "\
+ str(IDValencia3velocityU[i] - fmid.IDValencia3velocityU[i]))
for j in range(DIM):
print("IDgammaDD["+str(i)+"]["+str(j)+"] - fmid.IDgammaDD["+str(i)+"]["+str(j)+"] = "
+ str(IDgammaDD[i][j] - fmid.IDgammaDD[i][j]))
print("IDKDD["+str(i)+"]["+str(j)+"] - fmid.IDKDD["+str(i)+"]["+str(j)+"] = "
+ str(IDKDD[i][j] - fmid.IDKDD[i][j]))
```
IDalpha - fmid.IDalpha = 0
rho_initial - fmid.rho_initial = 0
hm1 - fmid.hm1 = 0
IDbetaU[0] - fmid.IDbetaU[0] = 0
IDValencia3velocityU[0] - fmid.IDValencia3velocityU[0] = 0
IDgammaDD[0][0] - fmid.IDgammaDD[0][0] = 0
IDKDD[0][0] - fmid.IDKDD[0][0] = 0
IDgammaDD[0][1] - fmid.IDgammaDD[0][1] = 0
IDKDD[0][1] - fmid.IDKDD[0][1] = 0
IDgammaDD[0][2] - fmid.IDgammaDD[0][2] = 0
IDKDD[0][2] - fmid.IDKDD[0][2] = 0
IDbetaU[1] - fmid.IDbetaU[1] = 0
IDValencia3velocityU[1] - fmid.IDValencia3velocityU[1] = 0
IDgammaDD[1][0] - fmid.IDgammaDD[1][0] = 0
IDKDD[1][0] - fmid.IDKDD[1][0] = 0
IDgammaDD[1][1] - fmid.IDgammaDD[1][1] = 0
IDKDD[1][1] - fmid.IDKDD[1][1] = 0
IDgammaDD[1][2] - fmid.IDgammaDD[1][2] = 0
IDKDD[1][2] - fmid.IDKDD[1][2] = 0
IDbetaU[2] - fmid.IDbetaU[2] = 0
IDValencia3velocityU[2] - fmid.IDValencia3velocityU[2] = 0
IDgammaDD[2][0] - fmid.IDgammaDD[2][0] = 0
IDKDD[2][0] - fmid.IDKDD[2][0] = 0
IDgammaDD[2][1] - fmid.IDgammaDD[2][1] = 0
IDKDD[2][1] - fmid.IDKDD[2][1] = 0
IDgammaDD[2][2] - fmid.IDgammaDD[2][2] = 0
IDKDD[2][2] - fmid.IDKDD[2][2] = 0
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-FishboneMoncriefID.pdf](Tutorial-FishboneMoncriefID.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-FishboneMoncriefID.ipynb
!pdflatex -interaction=batchmode Tutorial-FishboneMoncriefID.tex
!pdflatex -interaction=batchmode Tutorial-FishboneMoncriefID.tex
!pdflatex -interaction=batchmode Tutorial-FishboneMoncriefID.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
[NbConvertApp] Converting notebook Tutorial-FishboneMoncriefID.ipynb to latex
[pandoc warning] Duplicate link reference `[comment]' "source" (line 17, column 1)
[NbConvertApp] Writing 129778 bytes to Tutorial-FishboneMoncriefID.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
|
module Language.JSON.Data
import Data.Bits
import Data.List
import Data.Nat
import Data.String.Extra
import Data.String
%default total
public export
data JSON
= JNull
| JBoolean Bool
| JNumber Double
| JString String
| JArray (List JSON)
| JObject (List (String, JSON))
%name JSON json
private
b16ToHexString : Bits16 -> String
b16ToHexString n =
case n of
0 => "0"
1 => "1"
2 => "2"
3 => "3"
4 => "4"
5 => "5"
6 => "6"
7 => "7"
8 => "8"
9 => "9"
10 => "A"
11 => "B"
12 => "C"
13 => "D"
14 => "E"
15 => "F"
other => assert_total $
b16ToHexString (n `shiftR` fromNat 4) ++
b16ToHexString (n .&. 15)
private
showChar : Char -> String
showChar c
= case c of
'\b' => "\\b"
'\f' => "\\f"
'\n' => "\\n"
'\r' => "\\r"
'\t' => "\\t"
'\\' => "\\\\"
'"' => "\\\""
c => if isControl c || c >= '\127'
then let hex = b16ToHexString (cast $ ord c)
in "\\u" ++ justifyRight 4 '0' hex
else singleton c
private
showString : String -> String
showString x = "\"" ++ concatMap showChar (unpack x) ++ "\""
||| Convert a JSON value into its string representation.
||| No whitespace is added.
private
stringify : JSON -> String
stringify JNull = "null"
stringify (JBoolean x) = if x then "true" else "false"
stringify (JNumber x) = show x
stringify (JString x) = showString x
stringify (JArray xs) = "[" ++ stringifyValues xs ++ "]"
where
stringifyValues : List JSON -> String
stringifyValues [] = ""
stringifyValues (x :: xs) = stringify x
++ if isNil xs
then ""
else "," ++ stringifyValues xs
stringify (JObject xs) = "{" ++ stringifyProps xs ++ "}"
where
stringifyProp : (String, JSON) -> String
stringifyProp (key, value) = showString key ++ ":" ++ stringify value
stringifyProps : List (String, JSON) -> String
stringifyProps [] = ""
stringifyProps (x :: xs) = stringifyProp x
++ if isNil xs
then ""
else "," ++ stringifyProps xs
export
Show JSON where
show = stringify
||| Format a JSON value, indenting by `n` spaces per nesting level.
|||
||| @curr The current indentation amount, measured in spaces.
||| @n The amount of spaces to indent per nesting level.
export
format : {default 0 curr : Nat} -> (n : Nat) -> JSON -> String
format {curr} n json = indent curr $ formatValue curr n json
where
formatValue : (curr, n : Nat) -> JSON -> String
formatValue _ _ (JArray []) = "[]"
formatValue curr n (JArray xs@(_ :: _)) = "[\n" ++ formatValues xs
++ indent curr "]"
where
formatValues : (xs : List JSON) -> {auto ok : NonEmpty xs} -> String
formatValues (x :: xs) = format {curr=(curr + n)} n x
++ case xs of
_ :: _ => ",\n" ++ formatValues xs
[] => "\n"
formatValue _ _ (JObject []) = "{}"
formatValue curr n (JObject xs@(_ :: _)) = "{\n" ++ formatProps xs
++ indent curr "}"
where
formatProp : (String, JSON) -> String
formatProp (key, value) = indent (curr + n) (showString key ++ ": ")
++ formatValue (curr + n) n value
formatProps : (xs : List (String, JSON)) -> {auto ok : NonEmpty xs} -> String
formatProps (x :: xs) = formatProp x
++ case xs of
_ :: _ => ",\n" ++ formatProps xs
[] => "\n"
formatValue _ _ x = stringify x
|
# A simple SymPy example
First we import SymPy and initialize printing:
```
from sympy import init_printing
from sympy import *
init_printing()
```
Create a few symbols:
```
x,y,z = symbols('x y z')
```
Here is a basic expression:
```
e = x**2 + 2.0*y + sin(z); e
```
```
diff(e, x)
```
```
integrate(e, z)
```
```
```
|
function [c ceq gradc gradceq] = hs71C(x)
c = -(prod(x)-25);
ceq = sum(x.^2)-40;
if(nargout > 2)
gradc = -(prod(x)./x')';
gradceq = 2*x;
end
|
/*
* SOCI_ConvertersLong.hpp
*
* Created on: 29 Aug 2016
* Author: gishara
*/
//Copyright (c) 2016 Singapore-MIT Alliance for Research and Technology
//Licensed under the terms of the MIT License, as described in the file:
// license.txt (http://opensource.org/licenses/MIT)
#pragma once
#include <boost/algorithm/string.hpp>
#include <boost/tokenizer.hpp>
#include <database/entity/JobsByIndustryTypeByTaz.hpp>
#include <database/entity/WorkersGrpByLogsumParams.hpp>
#include <map>
#include <soci/soci.h>
#include <string>
#include "database/entity/HedonicCoeffs.hpp"
#include "database/entity/LagPrivateT.hpp"
#include "database/entity/LtVersion.hpp"
#include "database/entity/HedonicLogsums.hpp"
#include "database/entity/TAOByUnitType.hpp"
#include "database/entity/StudyArea.hpp"
#include "database/entity/JobsWithIndustryTypeAndTazId.hpp"
#include "database/entity/ResidentialWTP_Coefs.hpp"
#include "database/entity/StudentStop.hpp"
#include "database/entity/EzLinkStop.hpp"
using namespace sim_mob;
using namespace long_term;
namespace soci
{
template<>
struct type_conversion<sim_mob::long_term::HedonicCoeffs>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::HedonicCoeffs& hedonicCoeffs)
{
hedonicCoeffs.setPropertyTypeId(values.get<BigSerial>("property_type_id", INVALID_ID));
hedonicCoeffs.setIntercept(values.get<double>("intercept", 0));
hedonicCoeffs.setLogSqrtArea(values.get<double>("log_area", 0));
hedonicCoeffs.setFreehold( values.get<double>("freehold", 0));
hedonicCoeffs.setLogsumWeighted(values.get<double>("logsum_weighted", 0));
hedonicCoeffs.setPms1km(values.get<double>("pms_1km", 0));
hedonicCoeffs.setDistanceMallKm(values.get<double>("distance_mall_km", 0));
hedonicCoeffs.setMrt200m(values.get<double>("mrt_200m", 0));
hedonicCoeffs.setMrt_2_400m(values.get<double>("mrt_2_400m", 0));
hedonicCoeffs.setExpress200m(values.get<double>("express_200m", 0));
hedonicCoeffs.setBus400m(values.get<double>("bus2_400m", 0));
hedonicCoeffs.setBusGt400m(values.get<double>("bus_gt400m", 0));
hedonicCoeffs.setAge(values.get<double>("age", 0));
hedonicCoeffs.setLogAgeSquared(values.get<double>("age_squared", 0));
hedonicCoeffs.setMisage(values.get<double>("misage", 0));
hedonicCoeffs.setStorey(values.get<double>("storey", 0));
hedonicCoeffs.setStoreySquared(values.get<double>("storey_squared", 0));
}
};
template<>
struct type_conversion<sim_mob::long_term::HedonicCoeffsByUnitType>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::HedonicCoeffsByUnitType& hedonicCoeffsByUT)
{
hedonicCoeffsByUT.setUnitTypeId(values.get<BigSerial>("unit_type_id", INVALID_ID));
hedonicCoeffsByUT.setIntercept(values.get<double>("intercept", 0));
hedonicCoeffsByUT.setLogArea(values.get<double>("log_area", 0));
hedonicCoeffsByUT.setFreehold( values.get<double>("freehold", 0));
hedonicCoeffsByUT.setLogsumWeighted(values.get<double>("logsum_weighted", 0));
hedonicCoeffsByUT.setPms1km(values.get<double>("pms_1km", 0));
hedonicCoeffsByUT.setDistanceMallKm(values.get<double>("distance_mall_km", 0));
hedonicCoeffsByUT.setMrt200m(values.get<double>("mrt_200m", 0));
hedonicCoeffsByUT.setMrt2400m(values.get<double>("mrt_2_400m", 0));
hedonicCoeffsByUT.setExpress200m(values.get<double>("express_200m", 0));
hedonicCoeffsByUT.setBus2400m(values.get<double>("bus2_400m", 0));
hedonicCoeffsByUT.setBusGt400m(values.get<double>("bus_gt400m", 0));
hedonicCoeffsByUT.setAge(values.get<double>("age", 0));
hedonicCoeffsByUT.setAgeSquared(values.get<double>("age_squared", 0));
hedonicCoeffsByUT.setMisage(values.get<double>("misage", 0));
hedonicCoeffsByUT.setNonMature(values.get<double>("non_mature", 0));
hedonicCoeffsByUT.setOtherMature(values.get<double>("other_mature", 0));
hedonicCoeffsByUT.setStorey(values.get<double>("storey", 0));
hedonicCoeffsByUT.setStoreySquared(values.get<double>("storey_squared", 0));
}
};
template<>
struct type_conversion<sim_mob::long_term::LagPrivateT>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::LagPrivateT& lagPrivateT)
{
lagPrivateT.setPropertyTypeId(values.get<BigSerial>("property_type_id", INVALID_ID));
lagPrivateT.setIntercept(values.get<double>("intercept", 0));
lagPrivateT.setT4(values.get<double>("t4", 0));
}
};
template<>
struct type_conversion<sim_mob::long_term::LtVersion>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::LtVersion& ltVersion)
{
ltVersion.setId(values.get<long long>("id", INVALID_ID));
ltVersion.setBase_version(values.get<string>("base_version", ""));
ltVersion.setChange_date(values.get<std::tm>("change_date", tm()));
ltVersion.setComments(values.get<string>("comments", ""));
ltVersion.setUser_id(values.get<string>("userid", ""));
}
};
template<>
struct type_conversion<sim_mob::long_term::HedonicLogsums>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::HedonicLogsums& hedonicLogsums)
{
hedonicLogsums.setTazId(values.get<long long>("taz_id", INVALID_ID));
hedonicLogsums.setLogsumWeighted(values.get<double>("logsum_weighted", 0));
}
};
template<>
struct type_conversion<sim_mob::long_term::WorkersGrpByLogsumParams>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::WorkersGrpByLogsumParams& workersGrpByLogsumParams)
{
workersGrpByLogsumParams.setIndividualId(values.get<long long>("id", INVALID_ID));
workersGrpByLogsumParams.setLogsumCharacteristicsGroupId(values.get<int>("rowid", 0));
}
};
template<>
struct type_conversion<sim_mob::long_term::BuildingMatch>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::BuildingMatch& buildingMatch)
{
buildingMatch.setFm_building(values.get<long long>("fm_building_id", INVALID_ID));
buildingMatch.setFm_building_id_2008(values.get<long long>("fm_building_id_2008", INVALID_ID));
buildingMatch.setSla_building_id(values.get<string>("sla_building_id", ""));
buildingMatch.setSla_inc_cnc(values.get<string>("sla_inc_cnc",""));
buildingMatch.setMatch_code(values.get<int>("match_code",0));
buildingMatch.setMatch_date(values.get<tm>("match_date",tm()));
}
};
template<>
struct type_conversion<sim_mob::long_term::SlaBuilding>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::SlaBuilding& slaBuilding)
{
slaBuilding.setSla_address_id(values.get<long long>("sla_address_id",0));
slaBuilding.setSla_building_id(values.get<string>("sla_building_id",""));
slaBuilding.setSla_inc_crc(values.get<string>("sla_inc_crc",""));
}
};
template<>
struct type_conversion<sim_mob::long_term::TAOByUnitType>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::TAOByUnitType& taoByUT)
{
taoByUT.setId(values.get<BigSerial>("id",0));
taoByUT.setQuarter(values.get<std::string>("quarter",std::string()));
taoByUT.setTreasuryBillYield1Year(values.get<double>("treasury_bill_yield_1year",0));
taoByUT.setGdpRate(values.get<double>("gdp_rate",0));
taoByUT.setInflation(values.get<double>("inflation",0));
taoByUT.setTApartment7(values.get<double>("tapt_7",0));
taoByUT.setTApartment8(values.get<double>("tapt_8",0));
taoByUT.setTApartment9(values.get<double>("tapt_9",0));
taoByUT.setTApartment10(values.get<double>("tapt_10",0));
taoByUT.setTApartment11(values.get<double>("tapt_11",0));
taoByUT.setTCondo12(values.get<double>("tcondo_12",0));
taoByUT.setTCondo13(values.get<double>("tcondo_13",0));
taoByUT.setTCondo14(values.get<double>("tcondo_14",0));
taoByUT.setTCondo15(values.get<double>("tcondo_15",0));
taoByUT.setTCondo16(values.get<double>("tcondo_16",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::LagPrivate_TByUnitType>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::LagPrivate_TByUnitType& lagPvtTByUT)
{
lagPvtTByUT.setUnitTypeId(values.get<BigSerial>("unit_type_id",0));
lagPvtTByUT.setIntercept(values.get<double>("intercept",0));
lagPvtTByUT.setT4(values.get<double>("t4",0));
lagPvtTByUT.setT5(values.get<double>("t5",0));
lagPvtTByUT.setT6(values.get<double>("t6",0));
lagPvtTByUT.setT7(values.get<double>("t7",0));
lagPvtTByUT.setGdpRate(values.get<double>("gdp_rate",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::StudyArea>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::StudyArea& studyArea)
{
studyArea.setId(values.get<BigSerial>("id",0));
studyArea.setFmTazId(values.get<BigSerial>("fm_taz_id",0));
studyArea.setStudyCode(values.get<std::string>("study_code",std::string()));
}
};
template<>
struct type_conversion<sim_mob::long_term::JobAssignmentCoeffs>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::JobAssignmentCoeffs& jobAssignmentCoeff)
{
jobAssignmentCoeff.setId(values.get<int>("id",0));
jobAssignmentCoeff.setBetaInc1(values.get<double>("beta_inc1",0));
jobAssignmentCoeff.setBetaInc2(values.get<double>("beta_inc2",0));
jobAssignmentCoeff.setBetaInc3(values.get<double>("beta_inc3",0));
jobAssignmentCoeff.setBetaLgs(values.get<double>("beta_lgs",0));
jobAssignmentCoeff.setBetaS1(values.get<double>("beta_s1",0));
jobAssignmentCoeff.setBetaS2(values.get<double>("beta_s2",0));
jobAssignmentCoeff.setBetaS3(values.get<double>("beta_s3",0));
jobAssignmentCoeff.setBetaS4(values.get<double>("beta_s4",0));
jobAssignmentCoeff.setBetaS5(values.get<double>("beta_s5",0));
jobAssignmentCoeff.setBetaS6(values.get<double>("beta_s6",0));
jobAssignmentCoeff.setBetaS7(values.get<double>("beta_s7",0));
jobAssignmentCoeff.setBetaS8(values.get<double>("beta_s8",0));
jobAssignmentCoeff.setBetaS9(values.get<double>("beta_s9",0));
jobAssignmentCoeff.setBetaS10(values.get<double>("beta_s10",0));
jobAssignmentCoeff.setBetaS11(values.get<double>("beta_s11",0));
jobAssignmentCoeff.setBetaS98(values.get<double>("beta_s98",0));
jobAssignmentCoeff.setBetaLnJob(values.get<double>("beta_lnjob",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::JobsByIndustryTypeByTaz>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::JobsByIndustryTypeByTaz& jobsByIndustryByTaz)
{
jobsByIndustryByTaz.setTazId(values.get<BigSerial>("taz_id",0));
jobsByIndustryByTaz.setIndustryType1(values.get<int>("industry1",0));
jobsByIndustryByTaz.setIndustryType2(values.get<int>("industry2",0));
jobsByIndustryByTaz.setIndustryType3(values.get<int>("industry3",0));
jobsByIndustryByTaz.setIndustryType4(values.get<int>("industry4",0));
jobsByIndustryByTaz.setIndustryType5(values.get<int>("industry5",0));
jobsByIndustryByTaz.setIndustryType6(values.get<int>("industry6",0));
jobsByIndustryByTaz.setIndustryType7(values.get<int>("industry7",0));
jobsByIndustryByTaz.setIndustryType8(values.get<int>("industry8",0));
jobsByIndustryByTaz.setIndustryType9(values.get<int>("industry9",0));
jobsByIndustryByTaz.setIndustryType10(values.get<int>("industry10",0));
jobsByIndustryByTaz.setIndustryType11(values.get<int>("industry11",0));
jobsByIndustryByTaz.setIndustryType98(values.get<int>("industry98",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::IndLogsumJobAssignment>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::IndLogsumJobAssignment& indLogsumJobAssignment)
{
indLogsumJobAssignment.setIndividualId(values.get<BigSerial>("individual_id",0));
indLogsumJobAssignment.setTazId(values.get<std::string>("taz_id",0));
indLogsumJobAssignment.setLogsum(values.get<float>("logsum",.0));
}
};
template<>
struct type_conversion<sim_mob::long_term::JobsWithIndustryTypeAndTazId>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::JobsWithIndustryTypeAndTazId& jobsWithIndustryTypeAndTazId)
{
jobsWithIndustryTypeAndTazId.setJobId(values.get<BigSerial>("job_id",0));
jobsWithIndustryTypeAndTazId.setIndustryTypeId(values.get<int>("industry_type_id",0));
jobsWithIndustryTypeAndTazId.setTazId(values.get<BigSerial>("taz_id",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::School>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::School& school)
{
school.setId(values.get<BigSerial>("id",0));
school.setFmBuildingId(values.get<BigSerial>("fm_building_id",0));
school.setFloorArea(values.get<double>("floor_area",0));
school.setSchoolSlot(values.get<double>("school_slot",0));
school.setCentroidX(values.get<double>("centroid_x",0));
school.setCentroidY(values.get<double>("centroid_y",0));
school.setGiftedProgram(values.get<int>("gifted_program",0));
school.setSapProgram(values.get<int>("sap_program",0));
school.setPlanningArea(values.get<std::string>("planning_area",std::string()));
school.setTazName(values.get<BigSerial>("taz_name",0));
school.setSchoolType(values.get<std::string>("school_type",std::string()));
school.setArtProgram(values.get<int>("art_program",0));
school.setMusicProgram(values.get<int>("music_program",0));
school.setLangProgram(values.get<int>("lang_program",0));
school.setStudentDensity(values.get<double>("student_den",0));
school.setExpressTest(values.get<int>("express_test",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::EzLinkStop>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::EzLinkStop& ezLinkStop)
{
ezLinkStop.setId(values.get<BigSerial>("id",0));
ezLinkStop.setXCoord(values.get<double>("x_coord",0));
ezLinkStop.setYCoord(values.get<double>("y_coord",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::StudentStop>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::StudentStop& studentStop)
{
studentStop.setHomeStopEzLinkId(values.get<int>("home_stop_ez_link_id",0));
studentStop.setSchoolStopEzLinkId(values.get<int>("school_stop_ez_link_id",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::SchoolDesk>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::SchoolDesk& schoolDesk)
{
schoolDesk.setSchoolDeskId(values.get<BigSerial>("school_desk_id",0));
schoolDesk.setSchoolId(values.get<BigSerial>("school_id",0));
}
};
template<>
struct type_conversion<sim_mob::long_term::ResidentialWTP_Coefs>
{
typedef values base_type;
static void
from_base(soci::values const & values, soci::indicator & indicator, sim_mob::long_term::ResidentialWTP_Coefs& wtpCoeffs)
{
wtpCoeffs.setId(values.get<int>("id",0));
wtpCoeffs.setPropertyType(values.get<std::string>("property_type",std::string()));
wtpCoeffs.setSde(values.get<double>("sde",0));
wtpCoeffs.setM2(values.get<double>("m2",0));
wtpCoeffs.setS2(values.get<double>("s2",0));
wtpCoeffs.setConstant(values.get<double>("constant",0));
wtpCoeffs.setLogArea(values.get<double>("log_area",0));
wtpCoeffs.setLogsumTaz(values.get<double>("logsum_taz",0));
wtpCoeffs.setAge(values.get<double>("age",0));
wtpCoeffs.setAgeSquared(values.get<double>("age_squared",0));
wtpCoeffs.setMissingAgeDummy(values.get<double>("missing_age_dummy",0));
wtpCoeffs.setCarDummy(values.get<double>("car_dummy",0));
wtpCoeffs.setCarIntoLogsumTaz(values.get<double>("car_into_logsum_taz",0));
wtpCoeffs.setDistanceMall(values.get<double>("distance_mall",0));
wtpCoeffs.setMrt200m400m(values.get<double>("mrt_200_400m_dummy",0));
wtpCoeffs.setMatureDummy(values.get<double>("mature_dummy",0));
wtpCoeffs.setMatureOtherDummy(values.get<double>("mature_other_dummy",0));
wtpCoeffs.setFloorNumber(values.get<double>("floor_number",0));
wtpCoeffs.setLogIncome(values.get<double>("log_income",0));
wtpCoeffs.setLogIncomeIntoLogArea(values.get<double>("log_income_into_log_area",0));
wtpCoeffs.setFreeholdApartment(values.get<double>("freehold_apartment",0));
wtpCoeffs.setFreeholdCondo(values.get<double>("freehold_condo",0));
wtpCoeffs.setFreeholdTerrace(values.get<double>("freehold_terrace",0));
wtpCoeffs.setFreeholdDetached(values.get<double>("freehold_detached",0));
wtpCoeffs.setBus200m400mDummy(values.get<double>("bus_200_400m_dummy",0));
wtpCoeffs.setOneTwoFullTimeWorkerDummy(values.get<double>("one_two_fulltime_worker_dummy",0));;
wtpCoeffs.setFullTimeWorkersTwoIntoLogArea(values.get<double>("fulltime_workers2_into_log_area",0));
wtpCoeffs.setHhSizeworkersDiff(values.get<double>("hh_size_wrokers_diff",0));
}
};
} //namespace soci
|
= Why Does It Hurt So Bad =
|
{-# OPTIONS --without-K --exact-split --safe #-}
module Fragment.Setoid.Morphism.Properties where
open import Fragment.Setoid.Morphism.Base
open import Fragment.Setoid.Morphism.Setoid
open import Level using (Level)
open import Relation.Binary using (Setoid)
private
variable
a b c d ℓ₁ ℓ₂ ℓ₃ ℓ₄ : Level
A : Setoid a ℓ₁
B : Setoid b ℓ₂
C : Setoid c ℓ₃
D : Setoid d ℓ₄
id-unitˡ : ∀ {f : A ↝ B} → id · f ≗ f
id-unitˡ {B = B} = Setoid.refl B
id-unitʳ : ∀ {f : A ↝ B} → f · id ≗ f
id-unitʳ {B = B} = Setoid.refl B
·-assoc : ∀ (h : C ↝ D) (g : B ↝ C) (f : A ↝ B)
→ (h · g) · f ≗ h · (g · f)
·-assoc {D = D} _ _ _ = Setoid.refl D
·-congˡ : ∀ (h : B ↝ C) (f g : A ↝ B)
→ f ≗ g
→ h · f ≗ h · g
·-congˡ h _ _ f≗g = ∣ h ∣-cong f≗g
·-congʳ : ∀ (h : A ↝ B) (f g : B ↝ C)
→ f ≗ g
→ f · h ≗ g · h
·-congʳ _ _ _ f≗g = f≗g
|
(* Title: HOL/Quotient.thy
Author: Cezary Kaliszyk and Christian Urban
*)
section \<open>Definition of Quotient Types\<close>
theory Quotient
imports Lifting
keywords
"print_quotmapsQ3" "print_quotientsQ3" "print_quotconsts" :: diag and
"quotient_type" :: thy_goal and "/" and
"quotient_definition" :: thy_goal
begin
text \<open>
Basic definition for equivalence relations
that are represented by predicates.
\<close>
text \<open>Composition of Relations\<close>
abbreviation
rel_conj :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> bool" (infixr "OOO" 75)
where
"r1 OOO r2 \<equiv> r1 OO r2 OO r1"
lemma eq_comp_r:
shows "((op =) OOO R) = R"
by (auto simp add: fun_eq_iff)
context includes lifting_syntax
begin
subsection \<open>Quotient Predicate\<close>
definition
"Quotient3 R Abs Rep \<longleftrightarrow>
(\<forall>a. Abs (Rep a) = a) \<and> (\<forall>a. R (Rep a) (Rep a)) \<and>
(\<forall>r s. R r s \<longleftrightarrow> R r r \<and> R s s \<and> Abs r = Abs s)"
lemma Quotient3I:
assumes "\<And>a. Abs (Rep a) = a"
and "\<And>a. R (Rep a) (Rep a)"
and "\<And>r s. R r s \<longleftrightarrow> R r r \<and> R s s \<and> Abs r = Abs s"
shows "Quotient3 R Abs Rep"
using assms unfolding Quotient3_def by blast
context
fixes R Abs Rep
assumes a: "Quotient3 R Abs Rep"
begin
lemma Quotient3_abs_rep:
"Abs (Rep a) = a"
using a
unfolding Quotient3_def
by simp
lemma Quotient3_rep_reflp:
"R (Rep a) (Rep a)"
using a
unfolding Quotient3_def
by blast
lemma Quotient3_rel:
"R r r \<and> R s s \<and> Abs r = Abs s \<longleftrightarrow> R r s" \<comment> \<open>orientation does not loop on rewriting\<close>
using a
unfolding Quotient3_def
by blast
lemma Quotient3_refl1:
"R r s \<Longrightarrow> R r r"
using a unfolding Quotient3_def
by fast
lemma Quotient3_refl2:
"R r s \<Longrightarrow> R s s"
using a unfolding Quotient3_def
by fast
lemma Quotient3_rel_rep:
"R (Rep a) (Rep b) \<longleftrightarrow> a = b"
using a
unfolding Quotient3_def
by metis
lemma Quotient3_rep_abs:
"R r r \<Longrightarrow> R (Rep (Abs r)) r"
using a unfolding Quotient3_def
by blast
lemma Quotient3_rel_abs:
"R r s \<Longrightarrow> Abs r = Abs s"
using a unfolding Quotient3_def
by blast
lemma Quotient3_symp:
"symp R"
using a unfolding Quotient3_def using sympI by metis
lemma Quotient3_transp:
"transp R"
using a unfolding Quotient3_def using transpI by (metis (full_types))
lemma Quotient3_part_equivp:
"part_equivp R"
by (metis Quotient3_rep_reflp Quotient3_symp Quotient3_transp part_equivpI)
lemma abs_o_rep:
"Abs o Rep = id"
unfolding fun_eq_iff
by (simp add: Quotient3_abs_rep)
lemma equals_rsp:
assumes b: "R xa xb" "R ya yb"
shows "R xa ya = R xb yb"
using b Quotient3_symp Quotient3_transp
by (blast elim: sympE transpE)
lemma rep_abs_rsp:
assumes b: "R x1 x2"
shows "R x1 (Rep (Abs x2))"
using b Quotient3_rel Quotient3_abs_rep Quotient3_rep_reflp
by metis
lemma rep_abs_rsp_left:
assumes b: "R x1 x2"
shows "R (Rep (Abs x1)) x2"
using b Quotient3_rel Quotient3_abs_rep Quotient3_rep_reflp
by metis
end
lemma identity_quotient3:
"Quotient3 (op =) id id"
unfolding Quotient3_def id_def
by blast
lemma fun_quotient3:
assumes q1: "Quotient3 R1 abs1 rep1"
and q2: "Quotient3 R2 abs2 rep2"
shows "Quotient3 (R1 ===> R2) (rep1 ---> abs2) (abs1 ---> rep2)"
proof -
have "\<And>a.(rep1 ---> abs2) ((abs1 ---> rep2) a) = a"
using q1 q2 by (simp add: Quotient3_def fun_eq_iff)
moreover
have "\<And>a.(R1 ===> R2) ((abs1 ---> rep2) a) ((abs1 ---> rep2) a)"
by (rule rel_funI)
(insert q1 q2 Quotient3_rel_abs [of R1 abs1 rep1] Quotient3_rel_rep [of R2 abs2 rep2],
simp (no_asm) add: Quotient3_def, simp)
moreover
{
fix r s
have "(R1 ===> R2) r s = ((R1 ===> R2) r r \<and> (R1 ===> R2) s s \<and>
(rep1 ---> abs2) r = (rep1 ---> abs2) s)"
proof -
have "(R1 ===> R2) r s \<Longrightarrow> (R1 ===> R2) r r" unfolding rel_fun_def
using Quotient3_part_equivp[OF q1] Quotient3_part_equivp[OF q2]
by (metis (full_types) part_equivp_def)
moreover have "(R1 ===> R2) r s \<Longrightarrow> (R1 ===> R2) s s" unfolding rel_fun_def
using Quotient3_part_equivp[OF q1] Quotient3_part_equivp[OF q2]
by (metis (full_types) part_equivp_def)
moreover have "(R1 ===> R2) r s \<Longrightarrow> (rep1 ---> abs2) r = (rep1 ---> abs2) s"
apply(auto simp add: rel_fun_def fun_eq_iff) using q1 q2 unfolding Quotient3_def by metis
moreover have "((R1 ===> R2) r r \<and> (R1 ===> R2) s s \<and>
(rep1 ---> abs2) r = (rep1 ---> abs2) s) \<Longrightarrow> (R1 ===> R2) r s"
apply(auto simp add: rel_fun_def fun_eq_iff) using q1 q2 unfolding Quotient3_def
by (metis map_fun_apply)
ultimately show ?thesis by blast
qed
}
ultimately show ?thesis by (intro Quotient3I) (assumption+)
qed
lemma lambda_prs:
assumes q1: "Quotient3 R1 Abs1 Rep1"
and q2: "Quotient3 R2 Abs2 Rep2"
shows "(Rep1 ---> Abs2) (\<lambda>x. Rep2 (f (Abs1 x))) = (\<lambda>x. f x)"
unfolding fun_eq_iff
using Quotient3_abs_rep[OF q1] Quotient3_abs_rep[OF q2]
by simp
lemma lambda_prs1:
assumes q1: "Quotient3 R1 Abs1 Rep1"
and q2: "Quotient3 R2 Abs2 Rep2"
shows "(Rep1 ---> Abs2) (\<lambda>x. (Abs1 ---> Rep2) f x) = (\<lambda>x. f x)"
unfolding fun_eq_iff
using Quotient3_abs_rep[OF q1] Quotient3_abs_rep[OF q2]
by simp
text\<open>
In the following theorem R1 can be instantiated with anything,
but we know some of the types of the Rep and Abs functions;
so by solving Quotient assumptions we can get a unique R1 that
will be provable; which is why we need to use \<open>apply_rsp\<close> and
not the primed version\<close>
lemma apply_rspQ3:
fixes f g::"'a \<Rightarrow> 'c"
assumes q: "Quotient3 R1 Abs1 Rep1"
and a: "(R1 ===> R2) f g" "R1 x y"
shows "R2 (f x) (g y)"
using a by (auto elim: rel_funE)
lemma apply_rspQ3'':
assumes "Quotient3 R Abs Rep"
and "(R ===> S) f f"
shows "S (f (Rep x)) (f (Rep x))"
proof -
from assms(1) have "R (Rep x) (Rep x)" by (rule Quotient3_rep_reflp)
then show ?thesis using assms(2) by (auto intro: apply_rsp')
qed
subsection \<open>lemmas for regularisation of ball and bex\<close>
lemma ball_reg_eqv:
fixes P :: "'a \<Rightarrow> bool"
assumes a: "equivp R"
shows "Ball (Respects R) P = (All P)"
using a
unfolding equivp_def
by (auto simp add: in_respects)
lemma bex_reg_eqv:
fixes P :: "'a \<Rightarrow> bool"
assumes a: "equivp R"
shows "Bex (Respects R) P = (Ex P)"
using a
unfolding equivp_def
by (auto simp add: in_respects)
lemma ball_reg_right:
assumes a: "\<And>x. x \<in> R \<Longrightarrow> P x \<longrightarrow> Q x"
shows "All P \<longrightarrow> Ball R Q"
using a by fast
lemma bex_reg_left:
assumes a: "\<And>x. x \<in> R \<Longrightarrow> Q x \<longrightarrow> P x"
shows "Bex R Q \<longrightarrow> Ex P"
using a by fast
lemma ball_reg_left:
assumes a: "equivp R"
shows "(\<And>x. (Q x \<longrightarrow> P x)) \<Longrightarrow> Ball (Respects R) Q \<longrightarrow> All P"
using a by (metis equivp_reflp in_respects)
lemma bex_reg_right:
assumes a: "equivp R"
shows "(\<And>x. (Q x \<longrightarrow> P x)) \<Longrightarrow> Ex Q \<longrightarrow> Bex (Respects R) P"
using a by (metis equivp_reflp in_respects)
lemma ball_reg_eqv_range:
fixes P::"'a \<Rightarrow> bool"
and x::"'a"
assumes a: "equivp R2"
shows "(Ball (Respects (R1 ===> R2)) (\<lambda>f. P (f x)) = All (\<lambda>f. P (f x)))"
apply(rule iffI)
apply(rule allI)
apply(drule_tac x="\<lambda>y. f x" in bspec)
apply(simp add: in_respects rel_fun_def)
apply(rule impI)
using a equivp_reflp_symp_transp[of "R2"]
apply (auto elim: equivpE reflpE)
done
lemma bex_reg_eqv_range:
assumes a: "equivp R2"
shows "(Bex (Respects (R1 ===> R2)) (\<lambda>f. P (f x)) = Ex (\<lambda>f. P (f x)))"
apply(auto)
apply(rule_tac x="\<lambda>y. f x" in bexI)
apply(simp)
apply(simp add: Respects_def in_respects rel_fun_def)
apply(rule impI)
using a equivp_reflp_symp_transp[of "R2"]
apply (auto elim: equivpE reflpE)
done
(* Next four lemmas are unused *)
lemma all_reg:
assumes a: "!x :: 'a. (P x --> Q x)"
and b: "All P"
shows "All Q"
using a b by fast
lemma ex_reg:
assumes a: "!x :: 'a. (P x --> Q x)"
and b: "Ex P"
shows "Ex Q"
using a b by fast
lemma ball_reg:
assumes a: "!x :: 'a. (x \<in> R --> P x --> Q x)"
and b: "Ball R P"
shows "Ball R Q"
using a b by fast
lemma bex_reg:
assumes a: "!x :: 'a. (x \<in> R --> P x --> Q x)"
and b: "Bex R P"
shows "Bex R Q"
using a b by fast
lemma ball_all_comm:
assumes "\<And>y. (\<forall>x\<in>P. A x y) \<longrightarrow> (\<forall>x. B x y)"
shows "(\<forall>x\<in>P. \<forall>y. A x y) \<longrightarrow> (\<forall>x. \<forall>y. B x y)"
using assms by auto
lemma bex_ex_comm:
assumes "(\<exists>y. \<exists>x. A x y) \<longrightarrow> (\<exists>y. \<exists>x\<in>P. B x y)"
shows "(\<exists>x. \<exists>y. A x y) \<longrightarrow> (\<exists>x\<in>P. \<exists>y. B x y)"
using assms by auto
subsection \<open>Bounded abstraction\<close>
definition
Babs :: "'a set \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b"
where
"x \<in> p \<Longrightarrow> Babs p m x = m x"
lemma babs_rsp:
assumes q: "Quotient3 R1 Abs1 Rep1"
and a: "(R1 ===> R2) f g"
shows "(R1 ===> R2) (Babs (Respects R1) f) (Babs (Respects R1) g)"
apply (auto simp add: Babs_def in_respects rel_fun_def)
apply (subgoal_tac "x \<in> Respects R1 \<and> y \<in> Respects R1")
using a apply (simp add: Babs_def rel_fun_def)
apply (simp add: in_respects rel_fun_def)
using Quotient3_rel[OF q]
by metis
lemma babs_prs:
assumes q1: "Quotient3 R1 Abs1 Rep1"
and q2: "Quotient3 R2 Abs2 Rep2"
shows "((Rep1 ---> Abs2) (Babs (Respects R1) ((Abs1 ---> Rep2) f))) = f"
apply (rule ext)
apply (simp add:)
apply (subgoal_tac "Rep1 x \<in> Respects R1")
apply (simp add: Babs_def Quotient3_abs_rep[OF q1] Quotient3_abs_rep[OF q2])
apply (simp add: in_respects Quotient3_rel_rep[OF q1])
done
lemma babs_simp:
assumes q: "Quotient3 R1 Abs Rep"
shows "((R1 ===> R2) (Babs (Respects R1) f) (Babs (Respects R1) g)) = ((R1 ===> R2) f g)"
apply(rule iffI)
apply(simp_all only: babs_rsp[OF q])
apply(auto simp add: Babs_def rel_fun_def)
apply (subgoal_tac "x \<in> Respects R1 \<and> y \<in> Respects R1")
apply(metis Babs_def)
apply (simp add: in_respects)
using Quotient3_rel[OF q]
by metis
(* If a user proves that a particular functional relation
is an equivalence this may be useful in regularising *)
lemma babs_reg_eqv:
shows "equivp R \<Longrightarrow> Babs (Respects R) P = P"
by (simp add: fun_eq_iff Babs_def in_respects equivp_reflp)
(* 3 lemmas needed for proving repabs_inj *)
lemma ball_rsp:
assumes a: "(R ===> (op =)) f g"
shows "Ball (Respects R) f = Ball (Respects R) g"
using a by (auto simp add: Ball_def in_respects elim: rel_funE)
lemma bex_rsp:
assumes a: "(R ===> (op =)) f g"
shows "(Bex (Respects R) f = Bex (Respects R) g)"
using a by (auto simp add: Bex_def in_respects elim: rel_funE)
lemma bex1_rsp:
assumes a: "(R ===> (op =)) f g"
shows "Ex1 (\<lambda>x. x \<in> Respects R \<and> f x) = Ex1 (\<lambda>x. x \<in> Respects R \<and> g x)"
using a by (auto elim: rel_funE simp add: Ex1_def in_respects)
(* 2 lemmas needed for cleaning of quantifiers *)
lemma all_prs:
assumes a: "Quotient3 R absf repf"
shows "Ball (Respects R) ((absf ---> id) f) = All f"
using a unfolding Quotient3_def Ball_def in_respects id_apply comp_def map_fun_def
by metis
lemma ex_prs:
assumes a: "Quotient3 R absf repf"
shows "Bex (Respects R) ((absf ---> id) f) = Ex f"
using a unfolding Quotient3_def Bex_def in_respects id_apply comp_def map_fun_def
by metis
subsection \<open>\<open>Bex1_rel\<close> quantifier\<close>
definition
Bex1_rel :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> bool"
where
"Bex1_rel R P \<longleftrightarrow> (\<exists>x \<in> Respects R. P x) \<and> (\<forall>x \<in> Respects R. \<forall>y \<in> Respects R. ((P x \<and> P y) \<longrightarrow> (R x y)))"
lemma bex1_rel_aux:
"\<lbrakk>\<forall>xa ya. R xa ya \<longrightarrow> x xa = y ya; Bex1_rel R x\<rbrakk> \<Longrightarrow> Bex1_rel R y"
unfolding Bex1_rel_def
apply (erule conjE)+
apply (erule bexE)
apply rule
apply (rule_tac x="xa" in bexI)
apply metis
apply metis
apply rule+
apply (erule_tac x="xaa" in ballE)
prefer 2
apply (metis)
apply (erule_tac x="ya" in ballE)
prefer 2
apply (metis)
apply (metis in_respects)
done
lemma bex1_rel_aux2:
"\<lbrakk>\<forall>xa ya. R xa ya \<longrightarrow> x xa = y ya; Bex1_rel R y\<rbrakk> \<Longrightarrow> Bex1_rel R x"
unfolding Bex1_rel_def
apply (erule conjE)+
apply (erule bexE)
apply rule
apply (rule_tac x="xa" in bexI)
apply metis
apply metis
apply rule+
apply (erule_tac x="xaa" in ballE)
prefer 2
apply (metis)
apply (erule_tac x="ya" in ballE)
prefer 2
apply (metis)
apply (metis in_respects)
done
lemma bex1_rel_rsp:
assumes a: "Quotient3 R absf repf"
shows "((R ===> op =) ===> op =) (Bex1_rel R) (Bex1_rel R)"
apply (simp add: rel_fun_def)
apply clarify
apply rule
apply (simp_all add: bex1_rel_aux bex1_rel_aux2)
apply (erule bex1_rel_aux2)
apply assumption
done
lemma ex1_prs:
assumes a: "Quotient3 R absf repf"
shows "((absf ---> id) ---> id) (Bex1_rel R) f = Ex1 f"
apply (simp add:)
apply (subst Bex1_rel_def)
apply (subst Bex_def)
apply (subst Ex1_def)
apply simp
apply rule
apply (erule conjE)+
apply (erule_tac exE)
apply (erule conjE)
apply (subgoal_tac "\<forall>y. R y y \<longrightarrow> f (absf y) \<longrightarrow> R x y")
apply (rule_tac x="absf x" in exI)
apply (simp)
apply rule+
using a unfolding Quotient3_def
apply metis
apply rule+
apply (erule_tac x="x" in ballE)
apply (erule_tac x="y" in ballE)
apply simp
apply (simp add: in_respects)
apply (simp add: in_respects)
apply (erule_tac exE)
apply rule
apply (rule_tac x="repf x" in exI)
apply (simp only: in_respects)
apply rule
apply (metis Quotient3_rel_rep[OF a])
using a unfolding Quotient3_def apply (simp)
apply rule+
using a unfolding Quotient3_def in_respects
apply metis
done
lemma bex1_bexeq_reg:
shows "(\<exists>!x\<in>Respects R. P x) \<longrightarrow> (Bex1_rel R (\<lambda>x. P x))"
by (auto simp add: Ex1_def Bex1_rel_def Bex_def Ball_def in_respects)
lemma bex1_bexeq_reg_eqv:
assumes a: "equivp R"
shows "(\<exists>!x. P x) \<longrightarrow> Bex1_rel R P"
using equivp_reflp[OF a]
apply (intro impI)
apply (elim ex1E)
apply (rule mp[OF bex1_bexeq_reg])
apply (rule_tac a="x" in ex1I)
apply (subst in_respects)
apply (rule conjI)
apply assumption
apply assumption
apply clarify
apply (erule_tac x="xa" in allE)
apply simp
done
subsection \<open>Various respects and preserve lemmas\<close>
lemma quot_rel_rsp:
assumes a: "Quotient3 R Abs Rep"
shows "(R ===> R ===> op =) R R"
apply(rule rel_funI)+
apply(rule equals_rsp[OF a])
apply(assumption)+
done
lemma o_prs:
assumes q1: "Quotient3 R1 Abs1 Rep1"
and q2: "Quotient3 R2 Abs2 Rep2"
and q3: "Quotient3 R3 Abs3 Rep3"
shows "((Abs2 ---> Rep3) ---> (Abs1 ---> Rep2) ---> (Rep1 ---> Abs3)) op \<circ> = op \<circ>"
and "(id ---> (Abs1 ---> id) ---> Rep1 ---> id) op \<circ> = op \<circ>"
using Quotient3_abs_rep[OF q1] Quotient3_abs_rep[OF q2] Quotient3_abs_rep[OF q3]
by (simp_all add: fun_eq_iff)
lemma o_rsp:
"((R2 ===> R3) ===> (R1 ===> R2) ===> (R1 ===> R3)) op \<circ> op \<circ>"
"(op = ===> (R1 ===> op =) ===> R1 ===> op =) op \<circ> op \<circ>"
by (force elim: rel_funE)+
lemma cond_prs:
assumes a: "Quotient3 R absf repf"
shows "absf (if a then repf b else repf c) = (if a then b else c)"
using a unfolding Quotient3_def by auto
lemma if_prs:
assumes q: "Quotient3 R Abs Rep"
shows "(id ---> Rep ---> Rep ---> Abs) If = If"
using Quotient3_abs_rep[OF q]
by (auto simp add: fun_eq_iff)
lemma if_rsp:
assumes q: "Quotient3 R Abs Rep"
shows "(op = ===> R ===> R ===> R) If If"
by force
lemma let_prs:
assumes q1: "Quotient3 R1 Abs1 Rep1"
and q2: "Quotient3 R2 Abs2 Rep2"
shows "(Rep2 ---> (Abs2 ---> Rep1) ---> Abs1) Let = Let"
using Quotient3_abs_rep[OF q1] Quotient3_abs_rep[OF q2]
by (auto simp add: fun_eq_iff)
lemma let_rsp:
shows "(R1 ===> (R1 ===> R2) ===> R2) Let Let"
by (force elim: rel_funE)
lemma id_rsp:
shows "(R ===> R) id id"
by auto
lemma id_prs:
assumes a: "Quotient3 R Abs Rep"
shows "(Rep ---> Abs) id = id"
by (simp add: fun_eq_iff Quotient3_abs_rep [OF a])
end
locale quot_type =
fixes R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
and Abs :: "'a set \<Rightarrow> 'b"
and Rep :: "'b \<Rightarrow> 'a set"
assumes equivp: "part_equivp R"
and rep_prop: "\<And>y. \<exists>x. R x x \<and> Rep y = Collect (R x)"
and rep_inverse: "\<And>x. Abs (Rep x) = x"
and abs_inverse: "\<And>c. (\<exists>x. ((R x x) \<and> (c = Collect (R x)))) \<Longrightarrow> (Rep (Abs c)) = c"
and rep_inject: "\<And>x y. (Rep x = Rep y) = (x = y)"
begin
definition
abs :: "'a \<Rightarrow> 'b"
where
"abs x = Abs (Collect (R x))"
definition
rep :: "'b \<Rightarrow> 'a"
where
"rep a = (SOME x. x \<in> Rep a)"
lemma some_collect:
assumes "R r r"
shows "R (SOME x. x \<in> Collect (R r)) = R r"
apply simp
by (metis assms exE_some equivp[simplified part_equivp_def])
lemma Quotient:
shows "Quotient3 R abs rep"
unfolding Quotient3_def abs_def rep_def
proof (intro conjI allI)
fix a r s
show x: "R (SOME x. x \<in> Rep a) (SOME x. x \<in> Rep a)" proof -
obtain x where r: "R x x" and rep: "Rep a = Collect (R x)" using rep_prop[of a] by auto
have "R (SOME x. x \<in> Rep a) x" using r rep some_collect by metis
then have "R x (SOME x. x \<in> Rep a)" using part_equivp_symp[OF equivp] by fast
then show "R (SOME x. x \<in> Rep a) (SOME x. x \<in> Rep a)"
using part_equivp_transp[OF equivp] by (metis \<open>R (SOME x. x \<in> Rep a) x\<close>)
qed
have "Collect (R (SOME x. x \<in> Rep a)) = (Rep a)" by (metis some_collect rep_prop)
then show "Abs (Collect (R (SOME x. x \<in> Rep a))) = a" using rep_inverse by auto
have "R r r \<Longrightarrow> R s s \<Longrightarrow> Abs (Collect (R r)) = Abs (Collect (R s)) \<longleftrightarrow> R r = R s"
proof -
assume "R r r" and "R s s"
then have "Abs (Collect (R r)) = Abs (Collect (R s)) \<longleftrightarrow> Collect (R r) = Collect (R s)"
by (metis abs_inverse)
also have "Collect (R r) = Collect (R s) \<longleftrightarrow> (\<lambda>A x. x \<in> A) (Collect (R r)) = (\<lambda>A x. x \<in> A) (Collect (R s))"
by rule simp_all
finally show "Abs (Collect (R r)) = Abs (Collect (R s)) \<longleftrightarrow> R r = R s" by simp
qed
then show "R r s \<longleftrightarrow> R r r \<and> R s s \<and> (Abs (Collect (R r)) = Abs (Collect (R s)))"
using equivp[simplified part_equivp_def] by metis
qed
end
subsection \<open>Quotient composition\<close>
lemma OOO_quotient3:
fixes R1 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
fixes Abs1 :: "'a \<Rightarrow> 'b" and Rep1 :: "'b \<Rightarrow> 'a"
fixes Abs2 :: "'b \<Rightarrow> 'c" and Rep2 :: "'c \<Rightarrow> 'b"
fixes R2' :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
fixes R2 :: "'b \<Rightarrow> 'b \<Rightarrow> bool"
assumes R1: "Quotient3 R1 Abs1 Rep1"
assumes R2: "Quotient3 R2 Abs2 Rep2"
assumes Abs1: "\<And>x y. R2' x y \<Longrightarrow> R1 x x \<Longrightarrow> R1 y y \<Longrightarrow> R2 (Abs1 x) (Abs1 y)"
assumes Rep1: "\<And>x y. R2 x y \<Longrightarrow> R2' (Rep1 x) (Rep1 y)"
shows "Quotient3 (R1 OO R2' OO R1) (Abs2 \<circ> Abs1) (Rep1 \<circ> Rep2)"
apply (rule Quotient3I)
apply (simp add: o_def Quotient3_abs_rep [OF R2] Quotient3_abs_rep [OF R1])
apply simp
apply (rule_tac b="Rep1 (Rep2 a)" in relcomppI)
apply (rule Quotient3_rep_reflp [OF R1])
apply (rule_tac b="Rep1 (Rep2 a)" in relcomppI [rotated])
apply (rule Quotient3_rep_reflp [OF R1])
apply (rule Rep1)
apply (rule Quotient3_rep_reflp [OF R2])
apply safe
apply (rename_tac x y)
apply (drule Abs1)
apply (erule Quotient3_refl2 [OF R1])
apply (erule Quotient3_refl1 [OF R1])
apply (drule Quotient3_refl1 [OF R2], drule Rep1)
apply (subgoal_tac "R1 r (Rep1 (Abs1 x))")
apply (rule_tac b="Rep1 (Abs1 x)" in relcomppI, assumption)
apply (erule relcomppI)
apply (erule Quotient3_symp [OF R1, THEN sympD])
apply (rule Quotient3_rel[symmetric, OF R1, THEN iffD2])
apply (rule conjI, erule Quotient3_refl1 [OF R1])
apply (rule conjI, rule Quotient3_rep_reflp [OF R1])
apply (subst Quotient3_abs_rep [OF R1])
apply (erule Quotient3_rel_abs [OF R1])
apply (rename_tac x y)
apply (drule Abs1)
apply (erule Quotient3_refl2 [OF R1])
apply (erule Quotient3_refl1 [OF R1])
apply (drule Quotient3_refl2 [OF R2], drule Rep1)
apply (subgoal_tac "R1 s (Rep1 (Abs1 y))")
apply (rule_tac b="Rep1 (Abs1 y)" in relcomppI, assumption)
apply (erule relcomppI)
apply (erule Quotient3_symp [OF R1, THEN sympD])
apply (rule Quotient3_rel[symmetric, OF R1, THEN iffD2])
apply (rule conjI, erule Quotient3_refl2 [OF R1])
apply (rule conjI, rule Quotient3_rep_reflp [OF R1])
apply (subst Quotient3_abs_rep [OF R1])
apply (erule Quotient3_rel_abs [OF R1, THEN sym])
apply simp
apply (rule Quotient3_rel_abs [OF R2])
apply (rule Quotient3_rel_abs [OF R1, THEN ssubst], assumption)
apply (rule Quotient3_rel_abs [OF R1, THEN subst], assumption)
apply (erule Abs1)
apply (erule Quotient3_refl2 [OF R1])
apply (erule Quotient3_refl1 [OF R1])
apply (rename_tac a b c d)
apply simp
apply (rule_tac b="Rep1 (Abs1 r)" in relcomppI)
apply (rule Quotient3_rel[symmetric, OF R1, THEN iffD2])
apply (rule conjI, erule Quotient3_refl1 [OF R1])
apply (simp add: Quotient3_abs_rep [OF R1] Quotient3_rep_reflp [OF R1])
apply (rule_tac b="Rep1 (Abs1 s)" in relcomppI [rotated])
apply (rule Quotient3_rel[symmetric, OF R1, THEN iffD2])
apply (simp add: Quotient3_abs_rep [OF R1] Quotient3_rep_reflp [OF R1])
apply (erule Quotient3_refl2 [OF R1])
apply (rule Rep1)
apply (drule Abs1)
apply (erule Quotient3_refl2 [OF R1])
apply (erule Quotient3_refl1 [OF R1])
apply (drule Abs1)
apply (erule Quotient3_refl2 [OF R1])
apply (erule Quotient3_refl1 [OF R1])
apply (drule Quotient3_rel_abs [OF R1])
apply (drule Quotient3_rel_abs [OF R1])
apply (drule Quotient3_rel_abs [OF R1])
apply (drule Quotient3_rel_abs [OF R1])
apply simp
apply (rule Quotient3_rel[symmetric, OF R2, THEN iffD2])
apply simp
done
lemma OOO_eq_quotient3:
fixes R1 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
fixes Abs1 :: "'a \<Rightarrow> 'b" and Rep1 :: "'b \<Rightarrow> 'a"
fixes Abs2 :: "'b \<Rightarrow> 'c" and Rep2 :: "'c \<Rightarrow> 'b"
assumes R1: "Quotient3 R1 Abs1 Rep1"
assumes R2: "Quotient3 op= Abs2 Rep2"
shows "Quotient3 (R1 OOO op=) (Abs2 \<circ> Abs1) (Rep1 \<circ> Rep2)"
using assms
by (rule OOO_quotient3) auto
subsection \<open>Quotient3 to Quotient\<close>
lemma Quotient3_to_Quotient:
assumes "Quotient3 R Abs Rep"
and "T \<equiv> \<lambda>x y. R x x \<and> Abs x = y"
shows "Quotient R Abs Rep T"
using assms unfolding Quotient3_def by (intro QuotientI) blast+
lemma Quotient3_to_Quotient_equivp:
assumes q: "Quotient3 R Abs Rep"
and T_def: "T \<equiv> \<lambda>x y. Abs x = y"
and eR: "equivp R"
shows "Quotient R Abs Rep T"
proof (intro QuotientI)
fix a
show "Abs (Rep a) = a" using q by(rule Quotient3_abs_rep)
next
fix a
show "R (Rep a) (Rep a)" using q by(rule Quotient3_rep_reflp)
next
fix r s
show "R r s = (R r r \<and> R s s \<and> Abs r = Abs s)" using q by(rule Quotient3_rel[symmetric])
next
show "T = (\<lambda>x y. R x x \<and> Abs x = y)" using T_def equivp_reflp[OF eR] by simp
qed
subsection \<open>ML setup\<close>
text \<open>Auxiliary data for the quotient package\<close>
named_theorems quot_equiv "equivalence relation theorems"
and quot_respect "respectfulness theorems"
and quot_preserve "preservation theorems"
and id_simps "identity simp rules for maps"
and quot_thm "quotient theorems"
ML_file "Tools/Quotient/quotient_info.ML"
declare [[mapQ3 "fun" = (rel_fun, fun_quotient3)]]
lemmas [quot_thm] = fun_quotient3
lemmas [quot_respect] = quot_rel_rsp if_rsp o_rsp let_rsp id_rsp
lemmas [quot_preserve] = if_prs o_prs let_prs id_prs
lemmas [quot_equiv] = identity_equivp
text \<open>Lemmas about simplifying id's.\<close>
lemmas [id_simps] =
id_def[symmetric]
map_fun_id
id_apply
id_o
o_id
eq_comp_r
vimage_id
text \<open>Translation functions for the lifting process.\<close>
ML_file "Tools/Quotient/quotient_term.ML"
text \<open>Definitions of the quotient types.\<close>
ML_file "Tools/Quotient/quotient_type.ML"
text \<open>Definitions for quotient constants.\<close>
ML_file "Tools/Quotient/quotient_def.ML"
text \<open>
An auxiliary constant for recording some information
about the lifted theorem in a tactic.
\<close>
definition
Quot_True :: "'a \<Rightarrow> bool"
where
"Quot_True x \<longleftrightarrow> True"
lemma
shows QT_all: "Quot_True (All P) \<Longrightarrow> Quot_True P"
and QT_ex: "Quot_True (Ex P) \<Longrightarrow> Quot_True P"
and QT_ex1: "Quot_True (Ex1 P) \<Longrightarrow> Quot_True P"
and QT_lam: "Quot_True (\<lambda>x. P x) \<Longrightarrow> (\<And>x. Quot_True (P x))"
and QT_ext: "(\<And>x. Quot_True (a x) \<Longrightarrow> f x = g x) \<Longrightarrow> (Quot_True a \<Longrightarrow> f = g)"
by (simp_all add: Quot_True_def ext)
lemma QT_imp: "Quot_True a \<equiv> Quot_True b"
by (simp add: Quot_True_def)
context includes lifting_syntax
begin
text \<open>Tactics for proving the lifted theorems\<close>
ML_file "Tools/Quotient/quotient_tacs.ML"
end
subsection \<open>Methods / Interface\<close>
method_setup lifting =
\<open>Attrib.thms >> (fn thms => fn ctxt =>
SIMPLE_METHOD' (Quotient_Tacs.lift_tac ctxt [] thms))\<close>
\<open>lift theorems to quotient types\<close>
method_setup lifting_setup =
\<open>Attrib.thm >> (fn thm => fn ctxt =>
SIMPLE_METHOD' (Quotient_Tacs.lift_procedure_tac ctxt [] thm))\<close>
\<open>set up the three goals for the quotient lifting procedure\<close>
method_setup descending =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.descend_tac ctxt []))\<close>
\<open>decend theorems to the raw level\<close>
method_setup descending_setup =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.descend_procedure_tac ctxt []))\<close>
\<open>set up the three goals for the decending theorems\<close>
method_setup partiality_descending =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.partiality_descend_tac ctxt []))\<close>
\<open>decend theorems to the raw level\<close>
method_setup partiality_descending_setup =
\<open>Scan.succeed (fn ctxt =>
SIMPLE_METHOD' (Quotient_Tacs.partiality_descend_procedure_tac ctxt []))\<close>
\<open>set up the three goals for the decending theorems\<close>
method_setup regularize =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.regularize_tac ctxt))\<close>
\<open>prove the regularization goals from the quotient lifting procedure\<close>
method_setup injection =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.all_injection_tac ctxt))\<close>
\<open>prove the rep/abs injection goals from the quotient lifting procedure\<close>
method_setup cleaning =
\<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Quotient_Tacs.clean_tac ctxt))\<close>
\<open>prove the cleaning goals from the quotient lifting procedure\<close>
attribute_setup quot_lifted =
\<open>Scan.succeed Quotient_Tacs.lifted_attrib\<close>
\<open>lift theorems to quotient types\<close>
no_notation
rel_conj (infixr "OOO" 75)
end
|
\chapter*{Preface}
\label{sec:preface}
\addcontentsline{toc}{chapter}{\nameref{sec:preface}}
% I am priviliged.
% insert 'especially' if more text before this
I am very grateful that I got the opportunity to do my master's thesis at NERF\footnote{Neuro-Electronics Research Flanders. A collaborative research initiative between the university of Leuven, the Flemish life sciences institute VIB, and imec (a ``boutique, not-for-profit microelectronics shop'', in the \href{https://www.economist.com/science-and-technology/2017/11/09/a-new-nerve-cell-monitor-will-help-those-studying-brains}{words} of The Economist).}, where I was introduced to the wonderful world of circuit-level neuroscience.\footnote{I watched the fluorescent neural fireworks through a laser confocal microscope; I heard live the crackling firing of neurons in a sleeping brain; and I saw beautiful brain tissue photographs where virally infected cells fluoresced in bright colours. I learned about animal surgery, 3D printing, behavioural experiments, cluster and GPU computing, and the manufacture of bespoke, electromechanical scientific tools.}
I want to thank Alexander Bertrand and Fabian Kloosterman, whose collaboration made this possible. They, and my thesis supervisors Jasper Wouters and Davide Ciliberti, spent many hours on very helpful feedback, guidance, and support.
I also want to thank Jo\~ao Couto, \c{C}a\u{g}atay Ayd{\i}n, and Luis Hoffman for setting up the NERF computing cluster, writing a comprehensive usage guide for it, and helping me to get started with it. I thank Luis Hoffman specifically for his magnanimous help with various imec networking issues.
Finally, a big thanks to my family and friends for the continued support throughout the thesis work. You know who y'all are.
% todo: make link a proper reference
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test EmbeddingND."""
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Embedding
from psiz.keras.layers import EmbeddingND
def test_init():
phys_emb = Embedding(
input_dim=12, output_dim=3
)
# Raise no error, since shapes are compatible.
nd_emb = EmbeddingND(
embedding=phys_emb
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[12]
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[2, 6]
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[2, 2, 3]
)
# Raise ValueError for incompatible shapes.
with pytest.raises(Exception) as e_info:
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[3, 5]
)
assert str(e_info.value) == (
'The provided `input_dims` and `embedding` are not shape '
'compatible. The provided embedding has input_dim=12, which '
'cannot be reshaped to ([3, 5]).'
)
def test_standard_2d_call(flat_embeddings):
"""Test 2D input embedding call."""
phys_emb = Embedding(
input_dim=12, output_dim=3,
embeddings_initializer=tf.keras.initializers.Constant(
flat_embeddings
)
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[6, 2]
)
reshaped_embeddings = np.reshape(flat_embeddings, [6, 2, 3])
# Test reshape.
inputs = [
tf.constant(np.array(2, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = reshaped_embeddings[2, 1]
np.testing.assert_array_almost_equal(output, desired_output)
inputs = [
tf.constant(np.array(5, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = reshaped_embeddings[5, 1]
np.testing.assert_array_almost_equal(output, desired_output)
inputs_0 = tf.constant(
np.array([
[1, 2],
[1, 2]
], dtype=np.int32)
)
inputs_1 = tf.constant(
np.array([
[0, 0],
[1, 1]
], dtype=np.int32)
)
inputs = [inputs_0, inputs_1]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = np.array([
[[2.0, 2.1, 2.2], [4.0, 4.1, 4.2]],
[[3.0, 3.1, 3.2], [5.0, 5.1, 5.2]],
])
# Assert almost equal because of TF 32 bit casting.
np.testing.assert_array_almost_equal(output, desired_output)
def test_standard_3d_call(flat_embeddings):
"""Test 3D input Embedding call."""
phys_emb = Embedding(
input_dim=12, output_dim=3,
embeddings_initializer=tf.keras.initializers.Constant(
flat_embeddings
)
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[3, 2, 2]
)
reshaped_embeddings = np.reshape(flat_embeddings, [3, 2, 2, 3])
# Test reshape.
inputs = [
tf.constant(np.array(0, dtype=np.int32)),
tf.constant(np.array(0, dtype=np.int32)),
tf.constant(np.array(0, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = reshaped_embeddings[0, 0, 0]
np.testing.assert_array_almost_equal(output, desired_output)
inputs = [
tf.constant(np.array(2, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = reshaped_embeddings[2, 1, 1]
np.testing.assert_array_almost_equal(output, desired_output)
inputs = [
tf.constant(np.array(1, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32)),
tf.constant(np.array(0, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = reshaped_embeddings[1, 1, 0]
np.testing.assert_array_almost_equal(output, desired_output)
inputs_0 = tf.constant(
np.array([
[1, 2],
[1, 2]
], dtype=np.int32)
)
inputs_1 = tf.constant(
np.array([
[0, 0],
[1, 1]
], dtype=np.int32)
)
inputs_2 = tf.constant(
np.array([
[0, 1],
[0, 1]
], dtype=np.int32)
)
inputs = (inputs_0, inputs_1, inputs_2)
multi_index = tf.stack(inputs, axis=0)
output = nd_emb(multi_index).numpy()
desired_output = np.array([
[[4.0, 4.1, 4.2], [9.0, 9.1, 9.2]],
[[6.0, 6.1, 6.2], [11.0, 11.1, 11.2]],
])
np.testing.assert_array_almost_equal(output, desired_output)
def test_serialization(flat_embeddings):
# Build ND embedding.
phys_emb = Embedding(
input_dim=12, output_dim=3,
embeddings_initializer=tf.keras.initializers.Constant(
flat_embeddings
)
)
nd_emb = EmbeddingND(
embedding=phys_emb, input_dims=[3, 2, 2]
)
# Get configuration.
config = nd_emb.get_config()
# Reconstruct layer from configuration.
nd_emb_reconstructed = EmbeddingND.from_config(config)
# Assert reconstructed attributes are the same as original.
np.testing.assert_array_equal(
nd_emb.input_dims,
nd_emb_reconstructed.input_dims
)
assert nd_emb.output_dim == nd_emb_reconstructed.output_dim
# Assert calls are equal (given constant initializer).
inputs = [
tf.constant(np.array(1, dtype=np.int32)),
tf.constant(np.array(1, dtype=np.int32)),
tf.constant(np.array(0, dtype=np.int32))
]
multi_index = tf.stack(inputs, axis=0)
output_orig = nd_emb(multi_index).numpy()
output_recon = nd_emb_reconstructed(inputs).numpy()
np.testing.assert_array_almost_equal(output_orig, output_recon)
|
The interior of a singleton set is empty.
|
#ifdef _WIN32
#define _WIN32_WINNT 0x0601
#endif
#include <iostream>
#include <chrono>
#include <string>
#include <vector>
#include <utility>
#include <string>
#include <sstream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
using namespace boost;
using namespace std::chrono_literals;
constexpr uint16_t port = 54'000;
std::vector<uint8_t> input_buffer(1024 * 20);
std::vector<uint8_t> output_buffer(1024 * 20);
std::vector<std::shared_ptr<asio::ip::tcp::socket>> clients;
enum class MessageType : uint8_t {
Ping,
Hello,
Text,
Exit
};
void ReadData(asio::ip::tcp::socket&);
void WriteData(asio::ip::tcp::socket& sock, size_t mess_size) {
sock.async_write_some(asio::buffer(output_buffer.data(), mess_size),
[&](system::error_code err, size_t length)
{
if (err) {
std::cout << err.what() << std::endl;
}
else {
ReadData(sock);
}
}
);
}
void ReadData(asio::ip::tcp::socket& sock) {
sock.async_read_some(asio::buffer(input_buffer.data(), input_buffer.size()),
[&](system::error_code err, size_t length)
{
if (err) {
std::cout << err.what() << std::endl;
}
else {
for (int i = 0; i < length; ++i) {
std::cout.put(input_buffer[i]);
}
std::cout << std::endl;
}
}
);
}
int main()
{
asio::io_context context;
asio::ip::tcp::socket sock(context);
sock.connect(asio::ip::tcp::endpoint(asio::ip::make_address("127.0.0.1"), port));
std::string str;
do {
std::cout << "Enter the option (1 - ping, 2 - hello, 3 - text, 4 - exit)" << std::endl;
std::cin >> str;
std::string message;
switch (std::stoi(str))
{
case 1:
{
uint64_t time = std::chrono::steady_clock::now().time_since_epoch().count();
message = std::to_string(static_cast<uint8_t>(MessageType::Ping)) +
std::to_string(time) + "\n";
}
break;
case 2:
message = std::to_string(static_cast<uint8_t>(MessageType::Hello)) + "Hello";
break;
case 3:
{
std::string mess;
std::cout << "enter a message: " << std::endl;
std::cin >> mess;
message = std::to_string(static_cast<uint8_t>(MessageType::Text)) + mess + "\n";
}
break;
case 4:
{
std::cout << "Disconnecting..." << std::endl;
std::string str = std::to_string(static_cast<uint8_t>(MessageType::Exit)) + "Bye\n";
sock.write_some(asio::buffer(str.data(), str.size()));
sock.close();
exit(0);
}
default:
break;
}
std::copy(message.begin(), message.end(), output_buffer.begin());
WriteData(sock, message.size());
context.run();
context.restart();
} while (true);
return 0;
}
|
-- Andreas, 2017-01-12
data D {A : Set} : A → Set where
c : (a : A) → D {!!} -- fill with a
d : (a : A) → {!!} -- fill with D a
|
If $f$ is not an essential singularity at $z$ and $f$ has an isolated singularity at $z$, then $1/f$ is not an essential singularity at $z$.
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Eric Wieser
-/
import algebra.order.module
import data.real.basic
/-!
# Pointwise operations on sets of reals
This file relates `Inf (a • s)`/`Sup (a • s)` with `a • Inf s`/`a • Sup s` for `s : set ℝ`.
From these, it relates `⨅ i, a • f i` / `⨆ i, a • f i` with `a • (⨅ i, f i)` / `a • (⨆ i, f i)`,
and provides lemmas about distributing `*` over `⨅` and `⨆`.
# TODO
This is true more generally for conditionally complete linear order whose default value is `0`. We
don't have those yet.
-/
open set
open_locale pointwise
variables {ι : Sort*} {α : Type*} [linear_ordered_field α]
section mul_action_with_zero
variables [mul_action_with_zero α ℝ] [ordered_smul α ℝ] {a : α}
lemma real.Inf_smul_of_nonneg (ha : 0 ≤ a) (s : set ℝ) : Inf (a • s) = a • Inf s :=
begin
obtain rfl | hs := s.eq_empty_or_nonempty,
{ rw [smul_set_empty, real.Inf_empty, smul_zero'] },
obtain rfl | ha' := ha.eq_or_lt,
{ rw [zero_smul_set hs, zero_smul],
exact cInf_singleton 0 },
by_cases bdd_below s,
{ exact ((order_iso.smul_left ℝ ha').map_cInf' hs h).symm },
{ rw [real.Inf_of_not_bdd_below (mt (bdd_below_smul_iff_of_pos ha').1 h),
real.Inf_of_not_bdd_below h, smul_zero'] }
end
lemma real.smul_infi_of_nonneg (ha : 0 ≤ a) (f : ι → ℝ) :
a • (⨅ i, f i) = ⨅ i, a • f i :=
(real.Inf_smul_of_nonneg ha _).symm.trans $ congr_arg Inf $ (range_comp _ _).symm
lemma real.Sup_smul_of_nonneg (ha : 0 ≤ a) (s : set ℝ) : Sup (a • s) = a • Sup s :=
begin
obtain rfl | hs := s.eq_empty_or_nonempty,
{ rw [smul_set_empty, real.Sup_empty, smul_zero'] },
obtain rfl | ha' := ha.eq_or_lt,
{ rw [zero_smul_set hs, zero_smul],
exact cSup_singleton 0 },
by_cases bdd_above s,
{ exact ((order_iso.smul_left ℝ ha').map_cSup' hs h).symm },
{ rw [real.Sup_of_not_bdd_above (mt (bdd_above_smul_iff_of_pos ha').1 h),
real.Sup_of_not_bdd_above h, smul_zero'] }
end
lemma real.smul_supr_of_nonneg (ha : 0 ≤ a) (f : ι → ℝ) :
a • (⨆ i, f i) = ⨆ i, a • f i :=
(real.Sup_smul_of_nonneg ha _).symm.trans $ congr_arg Sup $ (range_comp _ _).symm
end mul_action_with_zero
section module
variables [module α ℝ] [ordered_smul α ℝ] {a : α}
lemma real.Inf_smul_of_nonpos (ha : a ≤ 0) (s : set ℝ) : Inf (a • s) = a • Sup s :=
begin
obtain rfl | hs := s.eq_empty_or_nonempty,
{ rw [smul_set_empty, real.Inf_empty, real.Sup_empty, smul_zero'] },
obtain rfl | ha' := ha.eq_or_lt,
{ rw [zero_smul_set hs, zero_smul],
exact cInf_singleton 0 },
by_cases bdd_above s,
{ exact ((order_iso.smul_left_dual ℝ ha').map_cSup' hs h).symm },
{ rw [real.Inf_of_not_bdd_below (mt (bdd_below_smul_iff_of_neg ha').1 h),
real.Sup_of_not_bdd_above h, smul_zero'] }
end
lemma real.smul_supr_of_nonpos (ha : a ≤ 0) (f : ι → ℝ) :
a • (⨆ i, f i) = ⨅ i, a • f i :=
(real.Inf_smul_of_nonpos ha _).symm.trans $ congr_arg Inf $ (range_comp _ _).symm
lemma real.Sup_smul_of_nonpos (ha : a ≤ 0) (s : set ℝ) : Sup (a • s) = a • Inf s :=
begin
obtain rfl | hs := s.eq_empty_or_nonempty,
{ rw [smul_set_empty, real.Sup_empty, real.Inf_empty, smul_zero] },
obtain rfl | ha' := ha.eq_or_lt,
{ rw [zero_smul_set hs, zero_smul],
exact cSup_singleton 0 },
by_cases bdd_below s,
{ exact ((order_iso.smul_left_dual ℝ ha').map_cInf' hs h).symm },
{ rw [real.Sup_of_not_bdd_above (mt (bdd_above_smul_iff_of_neg ha').1 h),
real.Inf_of_not_bdd_below h, smul_zero] }
end
lemma real.smul_infi_of_nonpos (ha : a ≤ 0) (f : ι → ℝ) :
a • (⨅ i, f i) = ⨆ i, a • f i :=
(real.Sup_smul_of_nonpos ha _).symm.trans $ congr_arg Sup $ (range_comp _ _).symm
end module
/-! ## Special cases for real multiplication -/
section mul
variables {r : ℝ}
lemma real.mul_infi_of_nonneg (ha : 0 ≤ r) (f : ι → ℝ) : r * (⨅ i, f i) = ⨅ i, r * f i :=
real.smul_infi_of_nonneg ha f
lemma real.mul_supr_of_nonneg (ha : 0 ≤ r) (f : ι → ℝ) : r * (⨆ i, f i) = ⨆ i, r * f i :=
real.smul_supr_of_nonneg ha f
lemma real.mul_infi_of_nonpos (ha : r ≤ 0) (f : ι → ℝ) : r * (⨅ i, f i) = ⨆ i, r * f i :=
real.smul_infi_of_nonpos ha f
lemma real.mul_supr_of_nonpos (ha : r ≤ 0) (f : ι → ℝ) : r * (⨆ i, f i) = ⨅ i, r * f i :=
real.smul_supr_of_nonpos ha f
lemma real.infi_mul_of_nonneg (ha : 0 ≤ r) (f : ι → ℝ) : (⨅ i, f i) * r = ⨅ i, f i * r :=
by simp only [real.mul_infi_of_nonneg ha, mul_comm]
lemma real.supr_mul_of_nonneg (ha : 0 ≤ r) (f : ι → ℝ) : (⨆ i, f i) * r = ⨆ i, f i * r :=
by simp only [real.mul_supr_of_nonneg ha, mul_comm]
lemma real.infi_mul_of_nonpos (ha : r ≤ 0) (f : ι → ℝ) : (⨅ i, f i) * r = ⨆ i, f i * r :=
by simp only [real.mul_infi_of_nonpos ha, mul_comm]
lemma real.supr_mul_of_nonpos (ha : r ≤ 0) (f : ι → ℝ) : (⨆ i, f i) * r = ⨅ i, f i * r :=
by simp only [real.mul_supr_of_nonpos ha, mul_comm]
end mul
|
\section{Tricks}
\begin{enumerate}
\item
In a script, line breaking is allowed provided the line breaks occur immediately after operators.
The scanner will automatically go to the next line after an operator.
\item
Setting \verb$trace=1$ in a script causes each line to be printed just before it is evaluated.
This is useful for debugging.
\item
The last result is stored in the symbol $last$.
\item
Use \verb$contract(A)$ to get the mathematical trace of matrix $A$.
\item
Use \verb$binding(s)$ to get the unevaluated binding of symbol $s$.
\item
Use \verb$s=quote(s)$ to clear symbol $s$.
\item
Use \verb$float(pi)$ to get the floating point value of $\pi$.
Set \verb$pi=float(pi)$ to evaluate expressions with a numerical value for $\pi$.
Set \verb$pi=quote(pi)$ to make $\pi$ symbolic again.
\item
Assign strings to unit names so they are printed normally.
For example, setting \verb$meter="meter"$ causes the symbol {\it meter}
to be printed as meter instead of $m_{eter}$.
\item
Use \verb$expsin$ and \verb$expcos$ instead of \verb$sin$ and \verb$cos$.
Trigonometric simplifications occur automatically when exponentials are used.
\item
Use \verb$A==B$ or \verb$A-B==0$ to test for equality of $A$ and $B$.
The equality operator \verb$==$ uses a cross multiply algorithm to eliminate denominators.
Hence \verb$==$ can typically determine equality even when the unsimplified result of $A-B$ is nonzero.
Note: Equality tests involving floating point numbers can be problematic
due to roundoff error.
\item
If local symbols are needed in a function, they can be appended to {\it arg-list}.
(The caller does not have to supply all the arguments.)
The following example uses Rodrigues's formula to
compute an associated Legendre function of $\cos\theta$.
\begin{equation*}
P_n^m(x)=\frac{1}{2^n\,n!}(1-x^2)^{m/2}\frac{d^{n+m}}{dx^{n+m}}(x^2-1)^n
\end{equation*}
Function $P$ below first computes $P_n^m(x)$ for local variable
$x$ and then uses {\it eval} to replace $x$ with $f$.
In this case, $f=\cos\theta$.
\begin{Verbatim}[formatcom=\color{blue}]
x = 123 -- global x in use, need local x in P
P(f,n,m,x) = eval(1/(2^n n!) (1 - x^2)^(m/2) d((x^2 - 1)^n,x,n + m),x,f)
P(cos(theta),2,0) -- arguments f, n, m, but not x
\end{Verbatim}
\noindent
$\displaystyle \tfrac{3}{2} \cos(\theta)^2-\tfrac{1}{2}$
\bigskip
\noindent
Note: The maximum number of arguments is nine.
\end{enumerate}
|
"""
===========
Path Editor
===========
Sharing events across GUIs.
This example demonstrates a cross-GUI application using Matplotlib event
handling to interact with and modify objects on the canvas.
"""
import numpy as np
from matplotlib.backend_bases import MouseButton
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
pathdata = [
(Path.MOVETO, (1.58, -2.57)),
(Path.CURVE4, (0.35, -1.1)),
(Path.CURVE4, (-1.75, 2.0)),
(Path.CURVE4, (0.375, 2.0)),
(Path.LINETO, (0.85, 1.15)),
(Path.CURVE4, (2.2, 3.2)),
(Path.CURVE4, (3, 0.05)),
(Path.CURVE4, (2.0, -0.5)),
(Path.CLOSEPOLY, (1.58, -2.57)),
]
codes, verts = zip(*pathdata)
path = Path(verts, codes)
patch = PathPatch(
path, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
class PathInteractor:
"""
An path editor.
Press 't' to toggle vertex markers on and off. When vertex markers are on,
they can be dragged with the mouse.
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, pathpatch):
self.ax = pathpatch.axes
canvas = self.ax.figure.canvas
self.pathpatch = pathpatch
self.pathpatch.set_animated(True)
x, y = zip(*self.pathpatch.get_path().vertices)
self.line, = ax.plot(
x, y, marker='o', markerfacecolor='r', animated=True)
self._ind = None # the active vertex
canvas.mpl_connect('draw_event', self.on_draw)
canvas.mpl_connect('button_press_event', self.on_button_press)
canvas.mpl_connect('key_press_event', self.on_key_press)
canvas.mpl_connect('button_release_event', self.on_button_release)
canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas = canvas
def get_ind_under_point(self, event):
"""
Return the index of the point closest to the event position or *None*
if no point is within ``self.epsilon`` to the event position.
"""
# display coords
xy = np.asarray(self.pathpatch.get_path().vertices)
xyt = self.pathpatch.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.sqrt((xt - event.x)**2 + (yt - event.y)**2)
ind = d.argmin()
if d[ind] >= self.epsilon:
ind = None
return ind
def on_draw(self, event):
"""Callback for draws."""
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.pathpatch)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def on_button_press(self, event):
"""Callback for mouse button presses."""
if (event.inaxes is None
or event.button != MouseButton.LEFT
or not self.showverts):
return
self._ind = self.get_ind_under_point(event)
def on_button_release(self, event):
"""Callback for mouse button releases."""
if (event.button != MouseButton.LEFT
or not self.showverts):
return
self._ind = None
def on_key_press(self, event):
"""Callback for key presses."""
if not event.inaxes:
return
if event.key == 't':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts:
self._ind = None
self.canvas.draw()
def on_mouse_move(self, event):
"""Callback for mouse movements."""
if (self._ind is None
or event.inaxes is None
or event.button != MouseButton.LEFT
or not self.showverts):
return
vertices = self.pathpatch.get_path().vertices
vertices[self._ind] = event.xdata, event.ydata
self.line.set_data(zip(*vertices))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.pathpatch)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
interactor = PathInteractor(patch)
ax.set_title('drag vertices to update path')
ax.set_xlim(-3, 4)
ax.set_ylim(-3, 4)
plt.show()
|
-- Given two abelian groups A, B
-- the set of all group homomorphisms from A to B
-- is itself an abelian group.
-- In other words, Ab is cartesian closed.
-- This is needed to show Ab is an abelian category.
{-# OPTIONS --safe #-}
module Cubical.Algebra.AbGroup.Instances.Hom where
open import Cubical.Algebra.AbGroup.Base
open import Cubical.Algebra.Group.Morphisms
open import Cubical.Algebra.Group.MorphismProperties
open import Cubical.Algebra.Group.Properties
open import Cubical.Foundations.Prelude
private
variable
ℓ ℓ' : Level
module _ (A : AbGroup ℓ) (B : AbGroup ℓ') where
-- These names are useful for the proofs
private
open IsGroupHom
open AbGroupStr (A .snd) using () renaming (0g to 0A; _+_ to _⋆_; -_ to inv)
open AbGroupStr (B .snd) using (_+_; -_; +Comm; +Assoc; +IdR ; +InvR)
renaming (0g to 0B)
open GroupTheory (AbGroup→Group B) using (invDistr) renaming (inv1g to inv0B)
-- Some lemmas
idrB : (b : B .fst) → b + 0B ≡ b
idrB b = +IdR b
invrB : (b : B .fst) → b + (- b) ≡ 0B
invrB b = +InvR b
hom0AB : (f : AbGroupHom A B) → f .fst 0A ≡ 0B
hom0AB f = hom1g (AbGroupStr→GroupStr (A .snd)) (f .fst)
(AbGroupStr→GroupStr (B .snd)) (f .snd .pres·)
homInvAB : (f : AbGroupHom A B) → (a : A .fst) → f .fst (inv a) ≡ (- f .fst a)
homInvAB f a = homInv (AbGroupStr→GroupStr (A .snd)) (f .fst)
(AbGroupStr→GroupStr (B .snd)) (f .snd .pres·) a
-- Zero morphism
zero : AbGroupHom A B
zero .fst a = 0B
zero .snd .pres· a a' = sym (idrB _)
zero .snd .pres1 = refl
zero .snd .presinv a = sym (inv0B)
-- Pointwise addition of morphisms
module _ (f* g* : AbGroupHom A B) where
private
f = f* .fst
g = g* .fst
HomAdd : AbGroupHom A B
HomAdd .fst = λ a → f a + g a
HomAdd .snd .pres· a a' =
f (a ⋆ a') + g (a ⋆ a') ≡⟨ cong (_+ g(a ⋆ a')) (f* .snd .pres· _ _) ⟩
(f a + f a') + g (a ⋆ a') ≡⟨ cong ((f a + f a') +_) (g* .snd .pres· _ _) ⟩
(f a + f a') + (g a + g a') ≡⟨ sym (+Assoc _ _ _) ⟩
f a + (f a' + (g a + g a')) ≡⟨ cong (f a +_) (+Assoc _ _ _) ⟩
f a + ((f a' + g a) + g a') ≡⟨ cong (λ b → (f a + b + g a')) (+Comm _ _) ⟩
f a + ((g a + f a') + g a') ≡⟨ cong (f a +_) (sym (+Assoc _ _ _)) ⟩
f a + (g a + (f a' + g a')) ≡⟨ +Assoc _ _ _ ⟩
(f a + g a) + (f a' + g a') ∎
HomAdd .snd .pres1 =
f 0A + g 0A ≡⟨ cong (_+ g 0A) (hom0AB f*) ⟩
0B + g 0A ≡⟨ cong (0B +_) (hom0AB g*) ⟩
0B + 0B ≡⟨ idrB _ ⟩
0B ∎
HomAdd .snd .presinv a =
f (inv a) + g (inv a) ≡⟨ cong (_+ g (inv a)) (homInvAB f* _) ⟩
(- f a) + g (inv a) ≡⟨ cong ((- f a) +_) (homInvAB g* _) ⟩
(- f a) + (- g a) ≡⟨ +Comm _ _ ⟩
(- g a) + (- f a) ≡⟨ sym (invDistr _ _) ⟩
- (f a + g a) ∎
-- Pointwise inverse of morphism
module _ (f* : AbGroupHom A B) where
private
f = f* .fst
HomInv : AbGroupHom A B
HomInv .fst = λ a → - f a
HomInv .snd .pres· a a' =
- f (a ⋆ a') ≡⟨ cong -_ (f* .snd .pres· _ _) ⟩
- (f a + f a') ≡⟨ invDistr _ _ ⟩
(- f a') + (- f a) ≡⟨ +Comm _ _ ⟩
(- f a) + (- f a') ∎
HomInv .snd .pres1 =
- (f 0A) ≡⟨ cong -_ (f* .snd .pres1) ⟩
- 0B ≡⟨ inv0B ⟩
0B ∎
HomInv .snd .presinv a =
- f (inv a) ≡⟨ cong -_ (homInvAB f* _) ⟩
- (- f a) ∎
-- Group laws for morphisms
private
0ₕ = zero
_+ₕ_ = HomAdd
-ₕ_ = HomInv
-- Morphism addition is associative
HomAdd-assoc : (f g h : AbGroupHom A B) → (f +ₕ (g +ₕ h)) ≡ ((f +ₕ g) +ₕ h)
HomAdd-assoc f g h = GroupHom≡ (funExt λ a → +Assoc _ _ _)
-- Morphism addition is commutative
HomAdd-comm : (f g : AbGroupHom A B) → (f +ₕ g) ≡ (g +ₕ f)
HomAdd-comm f g = GroupHom≡ (funExt λ a → +Comm _ _)
-- zero is right identity
HomAdd-zero : (f : AbGroupHom A B) → (f +ₕ zero) ≡ f
HomAdd-zero f = GroupHom≡ (funExt λ a → idrB _)
-- -ₕ is right inverse
HomInv-invr : (f : AbGroupHom A B) → (f +ₕ (-ₕ f)) ≡ zero
HomInv-invr f = GroupHom≡ (funExt λ a → invrB _)
-- Abelian group structure on AbGroupHom A B
open AbGroupStr
HomAbGroupStr : (A : AbGroup ℓ) → (B : AbGroup ℓ') → AbGroupStr (AbGroupHom A B)
HomAbGroupStr A B .0g = zero A B
HomAbGroupStr A B ._+_ = HomAdd A B
HomAbGroupStr A B .-_ = HomInv A B
HomAbGroupStr A B .isAbGroup = makeIsAbGroup isSetGroupHom
(HomAdd-assoc A B) (HomAdd-zero A B) (HomInv-invr A B) (HomAdd-comm A B)
HomAbGroup : (A : AbGroup ℓ) → (B : AbGroup ℓ') → AbGroup (ℓ-max ℓ ℓ')
HomAbGroup A B = AbGroupHom A B , HomAbGroupStr A B
|
-- Tasty makes it easy to test your code. It is a test framework that can
-- combine many different types of tests into one suite. See its website for
-- help: <http://documentup.com/feuerbach/tasty>.
import Test.Tasty
-- Hspec is one of the providers for Tasty. It provides a nice syntax for
-- writing tests. Its website has more info: <https://hspec.github.io>.
-- import Test.Tasty.Hspec
import Test.Tasty.HUnit
import ExplicitSimplexStream
import Persistence
import Numeric.LinearAlgebra
-- EXAMPLE STREAMS --
stream1 :: Stream Int
stream1 = addSimplex (addVertex (addVertex (addVertex initializeStream 1) 2) 3) (Simplex [1,2])
stream2 :: Stream Int
stream2 = addVertex (addSimplex (addVertex (addVertex initializeStream 1) 2) (Simplex [2,1])) 3
stream3 :: Stream Int
stream3 = addVertex (addVertex (addVertex (addVertex initializeStream 1) 2) 3) 4
stream4 :: Stream Int
stream4 = initializeStream
stream5 :: Stream Int
stream5 = addSimplex stream4 (Simplex [1,2,3,4])
----
main :: IO ()
main = do
defaultMain (testGroup "SimplexStream tests" [addSingleVertexTest, initializeStreamTest, streamEqualityTest, streamNumVerticesEmptyTest, streamNumVertices4CellTest, streamGetSizeEmptyTest, streamGetSize4CellTest, streamGetSize4VertexTest, streamToOrderedSimplexListFourVerticesTest, streamToOrderedSimplexListThreeVerticesOneEdgeTest, getBoundaryMapTest, getBoundaryMapTest2, getBoundaryMapTest3, getHomologyDimensionTest, persistenceTest, persistenceTest2, persistenceTest3, persistenceTest4])
addSingleVertexTest :: TestTree
addSingleVertexTest = testCase "Testing addition of single vertex"
(assertEqual "Should return true for search of vertex 5" (True) (isVertexInStream (addVertex (initializeStream :: Stream Int) 5) 5))
initializeStreamTest :: TestTree
initializeStreamTest = testCase "Testing initialization of stream"
(assertEqual "Should return Simplices []" (Simplices [Simplex []] :: Stream Int) (initializeStream :: Stream Int))
-- Testing stream equality.
-- Order of Simplex in Stream data type _should not matter_
streamEqualityTest :: TestTree
streamEqualityTest = testCase "Testing quality of streams"
(assertEqual "Should return Simplices []" (True) (stream1 == stream2))
-- Test numVertices
streamNumVerticesEmptyTest :: TestTree
streamNumVerticesEmptyTest = testCase "Testing number of vertices in empty stream"
(assertEqual "Should return 0" (0) (numVertices stream4))
streamNumVertices4CellTest :: TestTree
streamNumVertices4CellTest = testCase "Testing number of vertices in a 4-cell"
(assertEqual "Should return 4" (4) (numVertices stream5))
-- Test getSize
streamGetSizeEmptyTest :: TestTree
streamGetSizeEmptyTest = testCase "Testing get size on empty stream."
(assertEqual "Should return 1 (the null-cell)" (1) (getSize stream4))
streamGetSize4CellTest :: TestTree
streamGetSize4CellTest = testCase "Testing get size on 4-cell stream."
(assertEqual "Should return 16" (16) (getSize stream5))
streamGetSize4VertexTest :: TestTree
streamGetSize4VertexTest = testCase "Testing get size on stream with 4 vertices."
(assertEqual "Should return 5 (four vertices + 1 null cell)." (5) (getSize stream3))
-- Test streamToOrderedSimplexList
streamToOrderedSimplexListFourVerticesTest :: TestTree
streamToOrderedSimplexListFourVerticesTest = testCase "Testing streamToOrderedSimplexList on stream with 4 vertices."
(assertEqual "Should return object with lengths 0 and 1 with four vertices inside the 1 key." (OrderedSimplexList [SimplexListByDegree 0 [(Simplex [])], SimplexListByDegree 1 [(Simplex [1]), (Simplex [2]), (Simplex [3]), (Simplex [4])]]) (streamToOrderedSimplexList stream3))
streamToOrderedSimplexListThreeVerticesOneEdgeTest :: TestTree
streamToOrderedSimplexListThreeVerticesOneEdgeTest = testCase "Testing streamToOrderedSimplexList on stream with 3 vertices and one edge."
(assertEqual "Should return object with lengths 0, 1, and 2 with three vertices inside the 1 key and 1 edge inside the 2 key." (OrderedSimplexList [SimplexListByDegree 0 [(Simplex [])], SimplexListByDegree 1 [(Simplex [1]), (Simplex [2]), (Simplex [3])], SimplexListByDegree 2 [(Simplex [1,2])]]) (streamToOrderedSimplexList stream1))
-- Test getBoundaryMap
simplexList1 :: SimplexListByDegree Int
simplexList2 :: SimplexListByDegree Int
simplexList1 = SimplexListByDegree 3 [(Simplex [1,2,3]), (Simplex [2,3,4])]
simplexList2 = SimplexListByDegree 2 [(Simplex [1,2]), (Simplex [2,3]), (Simplex [1,3]), (Simplex [2,4]), (Simplex [3,4])]
getBoundaryMapTest :: TestTree
getBoundaryMapTest = testCase "Testing getBoundaryMap."
(assertEqual "Should return ..." (fromLists [[1,0],[1,1],[-1,0],[0,-1],[0,1]]) (getBoundaryMap simplexList2 simplexList1))
simplexList3 :: SimplexListByDegree Int
simplexList4 :: SimplexListByDegree Int
simplexList3 = SimplexListByDegree 1 [(Simplex [1]), (Simplex [2]), (Simplex [3]), (Simplex [4])]
simplexList4 = SimplexListByDegree 2 [(Simplex [1,2]), (Simplex [1,3]), (Simplex [1,4]), (Simplex [2,3]), (Simplex [3,4])]
getBoundaryMapTest2 :: TestTree
getBoundaryMapTest2 = testCase "Testing getBoundaryMap."
(assertEqual "Should return ..." (fromLists [[-1,-1,-1,0,0],[1,0,0,-1,0],[0,1,0,1,-1],[0,0,1,0,1]]) (getBoundaryMap simplexList3 simplexList4))
simplexList5 :: SimplexListByDegree Int
simplexList6 :: SimplexListByDegree Int
simplexList5 = SimplexListByDegree 0 [(Simplex [])]
simplexList6 = SimplexListByDegree 1 [(Simplex [1]), (Simplex [2])]
getBoundaryMapTest3 :: TestTree
getBoundaryMapTest3 = testCase "Testing trivial case for getBoundaryMap."
(assertEqual "Should return ..." (fromLists [[1, 1]]) (getBoundaryMap simplexList5 simplexList6))
-- Test getHomologyDimension
simplexList7 :: SimplexListByDegree Int
simplexList8 :: SimplexListByDegree Int
simplexList9 :: SimplexListByDegree Int
simplexList7 = SimplexListByDegree 0 [(Simplex [])]
simplexList8 = SimplexListByDegree 1 [(Simplex [1]), (Simplex [2]), (Simplex [3])]
simplexList9 = SimplexListByDegree 2 [(Simplex [1,2]), (Simplex [2,3]), (Simplex [1,3])]
map1 :: Matrix Double
map2 :: Matrix Double
map1 = getBoundaryMap simplexList7 simplexList8
map2 = getBoundaryMap simplexList8 simplexList9
getHomologyDimensionTest :: TestTree
getHomologyDimensionTest = testCase "Testing getHomologyDimension function."
(assertEqual "Should return ..." (1) (getHomologyDimension map2 map1 0))
-- Test persistence
stream6 :: Stream Int
stream6 = addSimplex (addSimplex (addSimplex initializeStream (Simplex [1,2])) (Simplex [2,3])) (Simplex [1,3])
persistenceTest :: TestTree
persistenceTest = testCase "Testing persistence function."
(assertEqual "Should return ..." (BettiVector [1,1]) (persistence stream6 1))
persistenceTest2 :: TestTree
persistenceTest2 = testCase "Testing persistence function on filled tetrahedron."
(assertEqual "Should return ..." (BettiVector [1,0,0,0]) (persistence stream5 1))
persistenceTest3 :: TestTree
persistenceTest3 = testCase "Testing persistence function on four vertices."
(assertEqual "Should return ..." (BettiVector [4]) (persistence stream3 1))
stream7 :: Stream Int
stream7 = subtractSimplex (addSimplex initializeStream (Simplex [1,2,3,4])) (Simplex [1,2,3,4])
persistenceTest4 :: TestTree
persistenceTest4 = testCase "Testing persistence function."
(assertEqual "Should return ..." (BettiVector [1,0,1]) (persistence stream7 1))
|
#==============================================================================
# Test for cforest
#==============================================================================
source("tests.r")
test.data <- list(
call = list(
substitute(
cforest(
Sepal.Length ~ ., data = iris,
controls = cforest_control(ntree = 10, mtry = 3)
)
),
substitute(
cforest(
Species ~ ., data = iris,
controls = cforest_control(ntree = 10, mtry = 3)
)
)
),
formula = list(
Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + Species,
Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width
),
model.type = list("regression", "classification")
)
test.model.adapter("cforest", iris, test.data, object.has.call = FALSE)
rm(test.data)
|
Formal statement is: proposition isolated_zeros: assumes holf: "f holomorphic_on S" and "open S" "connected S" "\<xi> \<in> S" "f \<xi> = 0" "\<beta> \<in> S" "f \<beta> \<noteq> 0" obtains r where "0 < r" and "ball \<xi> r \<subseteq> S" and "\<And>z. z \<in> ball \<xi> r - {\<xi>} \<Longrightarrow> f z \<noteq> 0" Informal statement is: If $f$ is a holomorphic function on an open connected set $S$ and $f(\xi) = 0$ for some $\xi \in S$, then there exists a neighborhood of $\xi$ in $S$ on which $f$ is nonzero.
|
Require Import Bool Arith List Omega.
Require Import Recdef Morphisms.
Require Import Program.Tactics.
Require Import Relation_Operators.
Require FMapList.
Require FMapFacts.
Require Import Classical.
Require Import Coq.Classes.RelationClasses.
Require Import OrderedType OrderedTypeEx DecidableType.
Require Import Sorting.Permutation.
Import ListNotations.
Module NatMap := FMapList.Make Nat_as_OT.
Definition address := nat.
Definition version := nat.
Definition value := nat.
Definition lock := bool.
Definition variable := nat.
Definition store := NatMap.t value.
Definition heap := address -> option (value * lock * version).
Definition tid := nat.
Ltac myauto :=
repeat match goal with
| |- context[_] =>
auto 100; intuition; cbn in *; simpl in *; auto 100
| |- context[_] =>
try contradiction; try discriminate
end.
Inductive action:=
|dummy: action
|start_txn: action
|read_item: version -> action
|write_item: value -> action
|try_commit_txn: action
|lock_write_item: action
|validate_read_item: Prop -> action
|abort_txn: Prop -> action (*True means abort needs to unlock before abort, False means the transaction about to be aborted does not contain locks*)
(*|restart_txn: action*)
|complete_write_item: (*value -> action*)version -> action
(*|unlock_write_item: version -> action*)
(*|invalid_write_item: value -> action*)
|commit_txn: action
|seq_point: action.
(*|obtain_global_tid: action.*)
(*sp later than last lock, but must before the first commit*)
Definition trace := list (tid * action).
(*
Returns all tid*action with tid tid in the trace.
*)
Definition trace_filter_tid tid tr : trace :=
filter (fun pr => fst pr =? tid) tr.
(*
Returns all commit actions in the trace.
*)
(*
Definition trace_filter_commit tr: trace :=
filter (fun pr => match snd pr with
| commit_txn => true
| _ => false
end) tr.
*)
(*
Returns all write actions in the trace.
*)
Definition trace_filter_write tr: trace :=
filter (fun pr => match snd pr with
| write_item _ => true
| _ => false
end) tr.
(*
Returns all read actions in the trace.
*)
Definition trace_filter_read tr: trace :=
filter (fun pr => match snd pr with
| read_item _ => true
| _ => false
end) tr.
(*
Returns all complete_write actions in the trace for updating version number of reads
*)
Definition trace_filter_complete tr: trace :=
filter (fun pr => match snd pr with
| complete_write_item _ => true
| _ => false
end) tr.
(*
Returns the last action of the transaction tid
Returns dummy if there is no transaction with id tid.
*)
Definition trace_tid_last tid t: action :=
hd dummy (map snd (trace_filter_tid tid t)).
Ltac remove_unrelevant_last_txn :=
repeat match goal with
| [H: context[trace_tid_last ?tid ((?tid0, _) :: ?t) = _] |-_] =>
unfold trace_tid_last in H; inversion H;
destruct (Nat.eq_dec tid tid0); subst
| [H: context[hd _
(map snd
(if ?tid0 =? ?tid0
then (?tid0, ?x) :: trace_filter_tid ?tid0 ?t
else trace_filter_tid ?tid0 ?t)) = ?y] |-_] =>
rewrite <- beq_nat_refl in H; simpl in H; inversion H
| [H: context[?tid <> ?tid0] |-_] =>
apply not_eq_sym in H; apply Nat.eqb_neq in H
end.
(*
Returns the version number of the last commit
If there is no commit in the trace, returns 0.
*)
(*
Definition trace_commit_last t: version :=
match hd dummy (map snd (trace_filter_commit t)) with
| commit_txn n => n
| _ => 0
end.
*)
(*
Returns the value of the last write
If there is no write in the trace, returns 0.
*)
(*
PROBLEMATIC: last write cannot be 0 in this case.
SUGGESTION: Use "option"
*)
Definition trace_write_last t: value :=
match hd dummy (map snd (trace_filter_write t)) with
| write_item n => n
| _ => 0
end.
(*
Returns the version number of the last commit/complete_write
If there is no commit in the trace, returns 0.
*)
(*
Definition trace_commit_complete_last t: version :=
match hd dummy (map snd (trace_filter_commit_complete t)) with
| commit_txn n => n
| complete_write_item n => n
| _ => 0
end.
*)
Definition trace_complete_last t: version :=
match hd dummy (map snd (trace_filter_complete t)) with
| complete_write_item n => n
| _ => 0
end.
(*
Returns all lock_write_item actions in the trace.
*)
Definition trace_filter_lock tr: trace :=
filter (fun pr => match snd pr with
| lock_write_item => true
| _ => false
end) tr.
(*
All commit and abort have unlock_write_item.
If a transaction contains either of the actions, the transaction has unlock
*)
Definition trace_filter_unlock tr: trace :=
filter (fun pr => match snd pr with
| complete_write_item _ => true
| abort_txn True => true
| _ => false
end) tr.
(*
Returns the length of a trace
*)
Function length (tr: trace) : nat :=
match tr with
| [] => 0
| _ :: tr' => S (length tr')
end.
(*
Returns True if there exists no lock_write_item
Returns False if there exists locks
*)
Definition check_lock_or_unlock tr: Prop :=
match
length (trace_filter_lock tr) <=? length (trace_filter_unlock tr)
with
| true => True
| false => False
end.
(*
Returns true if an action is not a write action
Returns false if an action is a write action.
*)
Definition action_no_write e : Prop :=
match e with
| write_item _ => False
| _ => True
end.
Definition action_no_read e : Prop :=
match e with
| read_item _ => False
| _ => True
end.
(*
Returns true if an action is not a commit_txn action
Returns false if an action is a commit_txn action.
*)
Definition action_no_commit e : Prop :=
match e with
| commit_txn => False
| _ => True
end.
Definition action_no_seq_point e: Prop:=
match e with
| seq_point => False
| _ => True
end.
Definition action_is_lock e: Prop:=
match e with
| lock_write_item => True
| _ => False
end.
(*
Returns true if the transaction tid's trace has no writes
Returns false otherwise.
*)
Definition trace_no_writes tid t: Prop :=
Forall action_no_write (map snd (trace_filter_tid tid t)).
Definition trace_no_reads tid t: Prop :=
Forall action_no_read (map snd (trace_filter_tid tid t)).
(*
Returns true if the transaction tid's trace has no commits
Returns false otherwise.
*)
Definition trace_no_commits tid t: Prop :=
Forall action_no_commit (map snd (trace_filter_tid tid t)).
Definition trace_no_seq_points tid t: Prop :=
Forall action_no_seq_point (map snd (trace_filter_tid tid t)).
Definition trace_has_locks tid t: Prop :=
Forall action_is_lock (map snd (trace_filter_tid tid t)).
(*
Returns all read_item versions in a trace of a particular transaction.
*)
Function read_versions tr: list version :=
match tr with
| [] => []
| (read_item v) :: tail => v :: read_versions tail
| _ :: tail => read_versions tail
end.
(*
Returns all read_item versions of tid in a trace tr.
*)
Definition read_versions_tid tid tr: list version :=
read_versions (map snd (trace_filter_tid tid tr)).
(*
Returns True if reads are valid
Returns False otherwise
*)
Function check_version (vs : list version) (v : version) : Prop :=
match vs with
| [] => True
| hd :: tail => if hd =? v then check_version tail v
else False
end.
(*
Returns all start/read/write actions in the trace.
*)
(*
Definition trace_filter_startreadwrite tid tr: trace :=
filter (fun pr => (fst pr =? tid) &&
(match snd pr with
| read_item _ => true
| write_item _ => true
| start_txn => true
| _ => false
end)) tr.
*)
(*
Returns all newtid with start/read/write actions in the trace.
*)
(*
Function trace_rollback (newtid: nat) (newver: version) (tr:trace): trace :=
match tr with
| [] => []
| (tid, read_item _) :: tr'
=> [(newtid, read_item newver)] ++ trace_rollback newtid newver tr'
| (tid, write_item val) :: tr'
=> [(newtid, write_item val)] ++ trace_rollback newtid newver tr'
| (tid, start_txn) :: tr'
=> [(newtid, start_txn)] ++ trace_rollback newtid newver tr'
| _ :: tr' => trace_rollback newtid newver tr'
end.
*)
Inductive sto_trace : trace -> Prop :=
| empty_step : sto_trace []
| start_txn_step: forall t tid,
(*trace_tid_last tid t = dummy*)
(* this tid doesn’t have start_txn*)
trace_filter_tid tid t = []
-> sto_trace t
-> sto_trace ((tid, start_txn)::t)
| read_item_step: forall t tid val oldver,
trace_tid_last tid t = start_txn (* start/read/write before read*)
\/ trace_tid_last tid t = read_item oldver
\/ trace_tid_last tid t = write_item val
(*check locked or not*)
-> check_lock_or_unlock t
-> sto_trace t
-> sto_trace ((tid, read_item (trace_complete_last t)) :: t)
| write_item_step: forall t tid oldval val ver,
trace_tid_last tid t = start_txn
\/ trace_tid_last tid t = read_item ver
\/ trace_tid_last tid t = write_item oldval
-> sto_trace t
-> sto_trace ((tid, write_item val) :: t)
| try_commit_txn_step: forall t tid ver val,
trace_tid_last tid t = read_item ver
\/ trace_tid_last tid t = write_item val
-> sto_trace t
-> sto_trace ((tid, try_commit_txn)::t)
| lock_write_item_step: forall t tid,
~ trace_no_writes tid t
-> check_lock_or_unlock t
-> trace_tid_last tid t = try_commit_txn
-> sto_trace t
-> sto_trace ((tid, lock_write_item) :: t)
| validate_read_item_step: forall t tid,
~ trace_no_reads tid t
-> (trace_tid_last tid t = try_commit_txn /\ trace_no_writes tid t ) (*read only*)
\/ trace_tid_last tid t = lock_write_item (*/\ ~ trace_no_writes tid t*)(*lock write*)
-> sto_trace t
-> sto_trace ((tid, validate_read_item (check_version (read_versions_tid tid t) (trace_complete_last t) ))::t)
(*
<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>
The other abort condition is not included here
If when trying to lock memory location to write (in lock_write_item_step), there already exists a lock on that location,
then the transaction should be aborted: lock_write_item -> abort
The other similar situation that is not encoded is: read_item -> abort
THIS MODEL IS NOT COMPLETE
*)
| abort_txn_step: forall t tid,
trace_tid_last tid t = validate_read_item False
-> sto_trace t
-> sto_trace ((tid, abort_txn (trace_has_locks tid t)) :: t) (*abort will contain unlock....*)
(*sequential point*)
| seq_point_step: forall t tid,
(trace_tid_last tid t = validate_read_item True (*/\ ~ trace_no_reads tid t*))
\/ (trace_tid_last tid t = lock_write_item /\ trace_no_reads tid t)
(*-> trace_no_commits tid t*)
-> trace_no_seq_points tid t
-> sto_trace t
-> sto_trace ((tid, seq_point) :: t)
| complete_write_item_step: forall t tid,
~ trace_no_writes tid t
-> trace_tid_last tid t = seq_point
-> sto_trace t
-> sto_trace ((tid, complete_write_item (S (trace_complete_last t))) :: t)
| commit_txn_step: forall t tid ver,
(trace_tid_last tid t = seq_point /\ trace_no_writes tid t)
\/ (trace_tid_last tid t = complete_write_item ver)
-> sto_trace t
-> sto_trace ((tid, commit_txn) :: t).
Hint Constructors sto_trace.
Definition example_txn:=
[(2, commit_txn); (2, complete_write_item 1); (2, seq_point); (2, validate_read_item True); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (2, read_item 0); (2, start_txn); (1, commit_txn); (1, seq_point); (1, validate_read_item True); (1, try_commit_txn); (1, read_item 0); (1, start_txn)].
Definition example_txn2:=
[(3, commit_txn); (3, seq_point); (3, validate_read_item True); (3, try_commit_txn); (3, read_item 1); (3, start_txn); (1, abort_txn False); (1, validate_read_item False); (1, try_commit_txn); (2, commit_txn); (2, complete_write_item 1); (2, seq_point); (2, validate_read_item True); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (1, read_item 0); (2, read_item 0); (2, start_txn); (1, start_txn)].
(*
Returns the serialized sequence of transactions in the STO trace based on seq_point of each transaction
The first element (tid) of the sequence is the first transaction that completes in the serial trace
Note that STO-trace is constructed in a reverse order: the first (tid * action) pair is the last operation in the trace
*)
Function seq_list (sto_trace: trace): list nat:=
match sto_trace with
| [] => []
| (tid, seq_point) :: tail => tid :: seq_list tail
| (tid, dummy) :: tail => seq_list tail
| (tid, start_txn) :: tail => seq_list tail
| (tid, read_item n) :: tail => seq_list tail
| (tid, write_item n) :: tail => seq_list tail
| (tid, try_commit_txn) :: tail => seq_list tail
| (tid, lock_write_item) :: tail => seq_list tail
| (tid, validate_read_item b) :: tail => seq_list tail
| (tid, abort_txn _) :: tail => seq_list tail
| (tid, complete_write_item n) :: tail => seq_list tail
| (tid, commit_txn) :: tail => seq_list tail
end.
Eval compute in seq_list example_txn.
Eval compute in seq_list example_txn2.
Function create_serialized_trace (sto_trace: trace) (seqls : list nat): trace:=
match seqls with
| [] => []
| head :: tail
=> trace_filter_tid head sto_trace ++ create_serialized_trace sto_trace tail
end.
(*
Check whether an element a is in the list l
*)
Fixpoint In_bool (a:nat) (l:list nat) : bool :=
match l with
| [] => false
| b :: m => (b =? a) || In_bool a m
end.
Fixpoint does_not_contain_tid tid (tr:trace) : Prop :=
match tr with
| [] => True
| (tid', _) :: rest => tid <> tid' /\ does_not_contain_tid tid rest
end.
(*
The function checks if a trace is a serial trace by making sure that
tid is only increaing as we traverse the trace
In this function, we assume that the trace is in the correct order.
That is, the first (tid*action) in the trace is actually the first one that gets to be executed
*)
Function check_is_serial_trace (tr: trace) : Prop :=
match tr with
| [] => True
| (tid, x) :: rest =>
match rest with
| [] => True
| (tid', y) :: _ => (tid = tid' \/ trace_filter_tid tid rest = [])
/\ check_is_serial_trace rest
end
end.
(*
This function executes STO trace in the reverse order
The goal is to record all read and write actions of all *committed* transactions
*)
Function exec (sto_trace: trace) (commit_tid: list nat) : list (tid * action) :=
match sto_trace with
| [] => []
| (tid, action) :: tail => if (In_bool tid commit_tid)
then match action with
| read_item _ => (tid, action) :: exec tail commit_tid
| write_item _ => (tid, action) :: exec tail commit_tid
| _ => exec tail commit_tid
end
else exec tail commit_tid
end.
Eval compute in exec example_txn (seq_list example_txn).
(*
This function returns all values that is written in a trace to a memory location
*)
Function tid_write_value (trace: trace) : list value :=
match trace with
| [] => []
| (_, write_item val) :: tail => val :: tid_write_value tail
| _ :: tail => tid_write_value tail
end.
(*
This function returns a list of pair that records write values of each tids in the trace
the first element in a pair is tid
the second element is all the write values written by the transaction with the id tid
*)
Function get_write_value (trace: trace) (tids: list nat) : list (nat * (list value)):=
match tids with
| [] => []
| head :: tail => (head, tid_write_value (trace_filter_tid head trace)) :: get_write_value trace tail
end.
Definition get_write_value_out (sto_trace: trace) : list (nat * (list value)) :=
get_write_value sto_trace (seq_list sto_trace).
Eval compute in get_write_value_out example_txn.
(*
This function compares values in two lists one by one
The value at each position in one list should be the same as that in the other list
*)
Function compare_value (ls1: list value) (ls2: list value): bool:=
match ls1, ls2 with
| [], [] => true
| _ , [] => false
| [], _ => false
| h1::t1, h2::t2 => if h1=?h2 then compare_value t1 t2 else false
end.
(*
We compare writes of two traces in this function
If they have the same write sequence, then the function will return True
****************************************************
We assume that two traces have the same tids in the same sequence
Should we actually add tid1 =? tid2 in the code to remove this assumption?
****************************************************
*)
Function compare_write_list (ls1: list (nat * (list value))) (ls2:list (nat * (list value))): Prop :=
match ls1, ls2 with
| [], [] => True
| _ , [] => False
| [], _ => False
| (tid1, ver1)::t1, (tid2, ver2)::t2
=> if (tid1 =? tid2 ) && (compare_value ver1 ver2) then compare_write_list t1 t2
else False
end.
Definition write_synchronization trace1 trace2: Prop:=
compare_write_list (get_write_value_out trace1) (get_write_value_out trace2).
(*
The function returns the last write value of a STO-trace
*)
Definition last_write_value trace: nat:=
trace_write_last (exec trace (seq_list trace)).
Eval compute in last_write_value example_txn.
(*
Definition write_synchronization trace1 trace2: Prop:=
if (last_write_value trace1) =? (last_write_value trace2)
then True
else False.
*)
Eval compute in write_synchronization example_txn (create_serialized_trace example_txn (seq_list example_txn)).
(*
This function returns the version numbers of all the reads in a trace
*)
Function tid_read_version (trace: trace) : list version :=
match trace with
| [] => []
| (_, read_item ver) :: tail => ver :: tid_read_version tail
| _ :: tail => tid_read_version tail
end.
(*
This function groups all versions of reads of a transaction in a trace.
This returns a list of pairs
The first element of a pair is the tid
The second element is a list of all versions of the reads from the transaction tid
*)
Function get_read_version (trace: trace) (tids: list nat) : list (nat * (list version)):=
match tids with
| [] => []
| head :: tail => (head, tid_read_version (trace_filter_tid head trace)) :: get_read_version trace tail
end.
Definition get_read_version_out (trace: trace) : list (nat * (list version)) :=
get_read_version trace (seq_list trace).
Eval compute in get_read_version_out example_txn.
Eval compute in get_read_version_out example_txn2.
(*
This function compares all read versions of two list
This is the same as compare all write in previous functions
*)
Function compare_version (ls1: list version) (ls2: list version): bool:=
match ls1, ls2 with
| [], [] => true
| _ , [] => false
| [], _ => false
| h1::t1, h2::t2 => if h1=?h2 then compare_version t1 t2 else false
end.
Function compare_read_list (ls1: list (nat * (list version))) (ls2:list (nat * (list version))): Prop :=
match ls1, ls2 with
| [], [] => True
| _ , [] => False
| [], _ => False
| (tid1, ver1)::t1, (tid2, ver2)::t2
=> if (tid1 =? tid2 ) && (compare_version ver1 ver2) then compare_read_list t1 t2
else False
end.
Definition read_synchronization trace1 trace2: Prop:=
compare_read_list (get_read_version_out trace1) (get_read_version_out trace2).
Eval compute in read_synchronization example_txn (create_serialized_trace example_txn (seq_list example_txn)).
Eval compute in compare_read_list [(1, [0;0]); (2, [1;1])] [(1, [0]);(2, [1;1])].
(*
Two traces can be considered equivalent in execution if they produce the same reads and writes
*)
Definition Exec_Equivalence trace1 trace2: Prop:=
write_synchronization trace1 trace2 /\ read_synchronization trace1 trace2.
Eval compute in Exec_Equivalence example_txn (create_serialized_trace example_txn (seq_list example_txn)).
(*
Function create_serialized_trace (sto_trace: trace) (sto_trace_copy: trace): trace:=
match sto_trace with
| [] => []
| (tid, seq_point) :: tail
=> trace_filter_tid tid sto_trace_copy ++ create_serialized_trace tail sto_trace_copy
| _ :: tail => create_serialized_trace tail sto_trace_copy
end.
Eval compute in create_serialized_trace example_txn example_txn.
Eval compute in create_serialized_trace example_txn2 example_txn2.
Lemma serial_action_remove tid action t:
sto_trace ((tid, action) :: t) ->
~ In (tid, seq_point) t ->
create_serialized_trace t ((tid, action) :: t) = create_serialized_trace t t.
Proof.
intros.
apply sto_trace_app in H.
induction H.
simpl. auto.
simpl.
Admitted.
*)
Lemma sto_trace_app tid action t:
sto_trace ((tid, action) :: t) -> sto_trace t.
Proof.
intros.
inversion H; subst; auto.
Qed.
Lemma sto_trace_app2 t1 t2:
sto_trace (t1 ++ t2) -> sto_trace t2.
Proof.
intros.
induction t1. rewrite app_nil_l in H. auto.
simpl in *. destruct a.
apply sto_trace_app with (tid0 := t) (action0:= a) in H.
apply IHt1 in H. auto.
Qed.
(* some error with this lemma
Lemma sto_trace_app2 tid tid0 action action0 t:
tid <> tid0 -> sto_trace ((tid, action) :: (tid0, action0) :: t) -> sto_trace ((tid, action) :: t).
Proof.
intros.
inversion H0; subst.
apply sto_trace_app in H5. unfold trace_tid_last in H3; simpl in H3.
apply not_eq_sym in H. apply Nat.eqb_neq in H; rewrite H in H3.
apply start_txn_step with (tid := tid) in H5; [ auto | unfold trace_tid_last; auto ].
apply sto_trace_app in H6. unfold trace_tid_last in H4; simpl in H4.
apply not_eq_sym in H. apply Nat.eqb_neq in H; rewrite H in H4.
unfold check_lock_or_unlock in H5. unfold trace_filter_lock in H5.
unfold trace_filter_unlock in H5.
apply read_item_step with (tid := tid) (val := val) (oldver := oldver) in H6; [ auto | unfold trace_tid_last; auto | auto].
Qed.
*)
Lemma seq_list_not_seqpoint tid action t:
sto_trace ((tid, action) :: t) ->
action <> seq_point ->
~ In tid (seq_list ((tid, action) :: t))
-> ~ In tid (seq_list t).
Proof.
intros; destruct action; simpl in *; auto.
Qed.
Lemma seq_list_not_seqpoint2 tid action t:
sto_trace ((tid, action) :: t) ->
~ In tid (seq_list ((tid, action) :: t))
-> ~ In tid (seq_list t).
Proof.
intros; destruct action; simpl in *; auto.
Qed.
Lemma seq_list_seqpoint tid action t:
sto_trace ((tid, action) :: t) ->
action = seq_point ->
In tid (seq_list ((tid, action) :: t)).
Proof.
intros; subst; simpl.
left. auto.
Qed.
Lemma trace_seqlist_seqpoint t tid:
In (tid, seq_point) t
-> In tid (seq_list t).
Proof.
intros.
functional induction seq_list t.
inversion H.
destruct (Nat.eq_dec tid tid0); subst; simpl.
left. auto.
right. apply IHl. apply in_inv in H. destruct H.
inversion H. apply Nat.eq_sym in H1. contradiction. auto.
all: destruct (Nat.eq_dec tid tid0); subst; apply IHl; apply in_inv in H; destruct H; try inversion H; auto.
Qed.
Lemma trace_seqlist_seqpoint_rev t tid:
In tid (seq_list t)
-> In (tid, seq_point) t.
Proof.
intros.
functional induction seq_list t.
inversion H.
destruct (Nat.eq_dec tid tid0); subst; simpl; simpl in H. left. auto.
destruct H. apply eq_sym in H. congruence. apply IHl in H. right. auto.
all: destruct (Nat.eq_dec tid tid0); subst; apply in_cons; auto.
Qed.
Lemma filter_app (f: nat * action -> bool) l1 l2:
filter f (l1 ++ l2) = filter f l1 ++ filter f l2.
Proof.
induction l1.
- simpl. auto.
- Search (filter).
simpl. remember (f a) as X. destruct X.
simpl. rewrite IHl1. auto. auto.
Qed.
Lemma trace_filter_tid_app tid tr1 tr2:
trace_filter_tid tid (tr1 ++ tr2) =
trace_filter_tid tid tr1 ++ trace_filter_tid tid tr2.
Proof.
unfold trace_filter_tid.
apply filter_app.
Qed.
Lemma Forall_app (P: action -> Prop) l1 l2:
Forall P (l1 ++ l2) <-> Forall P l1 /\ Forall P l2.
Proof.
split.
- intros. split;
rewrite Forall_forall in *.
intros. Search (In _ ( _ ++ _ )).
assert (In x l1 \/ In x l2 -> In x (l1 ++ l2)). apply in_or_app.
assert (In x l1 \/ In x l2). left. auto.
apply H1 in H2. auto.
intros.
assert (In x l1 \/ In x l2 -> In x (l1 ++ l2)). apply in_or_app.
assert (In x l1 \/ In x l2). right. auto.
apply H1 in H2. auto.
- intros. destruct_pairs.
rewrite Forall_forall in *. Search (In _ ( _ ++ _ )).
intros.
apply in_app_or in H1.
(* firstorder. solved here*)
destruct H1.
apply H in H1. auto. apply H0 in H1. auto.
Qed.
Lemma seq_list_no_two_seqpoint t tid:
sto_trace ((tid, seq_point) :: t)
-> ~ In (tid, seq_point) t.
Proof.
intros.
assert (sto_trace t). { apply sto_trace_app with (tid0 := tid) (action0 := seq_point). auto. }
inversion H.
intuition.
unfold trace_no_seq_points in H4.
apply in_split in H6.
destruct H6. destruct H3.
rewrite H3 in H4. simpl in H4.
rewrite trace_filter_tid_app in H4.
simpl in H4.
rewrite <-beq_nat_refl in H4.
rewrite map_app in H4.
rewrite Forall_app in H4.
destruct H4. simpl in H6.
apply Forall_inv in H6. simpl in H6. auto.
unfold trace_no_seq_points in H4.
apply in_split in H6.
destruct H6. destruct H6.
rewrite H6 in H4. simpl in H4.
rewrite trace_filter_tid_app in H4.
simpl in H4.
rewrite <-beq_nat_refl in H4.
rewrite map_app in H4.
rewrite Forall_app in H4.
destruct H4. simpl in H7.
apply Forall_inv in H7. simpl in H7. auto.
Qed.
Lemma seq_list_no_two_seqpoint2 t1 t2 tid:
sto_trace (t1 ++ (tid, seq_point) :: t2)
-> ~ In (tid, seq_point) t1.
Proof.
intros.
intuition.
apply in_split in H0. destruct H0. destruct H0.
rewrite H0 in H.
rewrite <- app_assoc in H.
apply sto_trace_app2 in H.
inversion H; subst.
unfold trace_no_seq_points in H4.
rewrite trace_filter_tid_app in H4.
rewrite map_app in H4. rewrite Forall_app in H4.
destruct H4. simpl in H1. rewrite <- beq_nat_refl in H1.
simpl in H1. apply Forall_inv in H1. simpl in H1. auto.
Qed.
Lemma seq_point_after t1 t2 tid action:
sto_trace ((tid, action) :: t1 ++ (tid, seq_point) :: t2)
-> action = commit_txn \/ action = complete_write_item (S (trace_complete_last (t1 ++ (tid, seq_point) :: t2))).
Proof.
intros.
induction t1.
simpl in H.
- inversion H; subst.
-- simpl in H2. rewrite <- beq_nat_refl in H2. inversion H2.
-- unfold trace_tid_last in H3. simpl in H3. rewrite <- beq_nat_refl in H3. repeat destruct or H3; inversion H3.
-- unfold trace_tid_last in H2. simpl in H2. rewrite <- beq_nat_refl in H2. repeat destruct or H2; inversion H2.
-- unfold trace_tid_last in H2. simpl in H2. rewrite <- beq_nat_refl in H2. repeat destruct or H2; inversion H2.
-- unfold trace_tid_last in H5. simpl in H5. rewrite <- beq_nat_refl in H5. inversion H5.
-- unfold trace_tid_last in H4. simpl in H4. rewrite <- beq_nat_refl in H4. destruct H4. destruct H0. simpl in H0. inversion H0. simpl in H0. inversion H0.
-- unfold trace_tid_last in H2. simpl in H2. rewrite <- beq_nat_refl in H2. inversion H2.
-- unfold trace_tid_last in H3. simpl in H3. rewrite <- beq_nat_refl in H3. destruct H3. inversion H0. inversion H0. inversion H1.
-- unfold trace_tid_last in H4. simpl in H4. rewrite <- beq_nat_refl in H4. simpl in *. right. auto.
-- auto.
- destruct a. destruct a.
-- destruct (Nat.eq_dec tid t); subst; apply sto_trace_app in H; inversion H.
-- destruct (Nat.eq_dec tid t); subst.
admit.
Admitted.
Lemma seq_list_last_tid_start_txn tid t:
sto_trace t ->
In tid (seq_list t) ->
trace_tid_last tid t <> start_txn.
Proof.
intros ST; induction ST; intros.
all: cbn in *.
contradiction.
all: unfold trace_tid_last; simpl; destruct (Nat.eq_dec tid0 tid); [subst |].
apply trace_seqlist_seqpoint_rev in H0.
apply in_split in H0. destruct H0. destruct H0.
rewrite H0 in H. rewrite trace_filter_tid_app in H. simpl in H.
rewrite <- beq_nat_refl in H. apply app_eq_nil in H. destruct H.
inversion H1.
apply IHST in H0. unfold trace_tid_last in H0.
apply Nat.eqb_neq in n; rewrite n. auto.
all: try rewrite <- beq_nat_refl; simpl; try congruence.
all: apply Nat.eqb_neq in n; rewrite n; try apply IHST in H1; auto.
apply Nat.eqb_neq in n. destruct H1. congruence.
apply IHST in H1; auto.
Qed.
(*
Lemma seq_list_last_tid_read_item tid t:
sto_trace t ->
In tid (seq_list t) ->
exists ver, trace_tid_last tid t <> read_item ver.
Proof.
intros ST; induction ST; intros.
all: cbn in *.
contradiction.
all: unfold trace_tid_last; simpl; destruct (Nat.eq_dec tid0 tid); [subst |].
apply trace_seqlist_seqpoint_rev in H0.
apply in_split in H0. destruct H0. destruct H0.
rewrite H0 in H. rewrite trace_filter_tid_app in H. simpl in H.
rewrite <- beq_nat_refl in H. apply app_eq_nil in H. destruct H.
inversion H1.
apply IHST in H0. unfold trace_tid_last in H0.
apply Nat.eqb_neq in n; rewrite n. auto.
all: try rewrite <- beq_nat_refl; simpl; try congruence.
all: apply Nat.eqb_neq in n; rewrite n; try apply IHST in H1; auto.
apply Nat.eqb_neq in n. apply in_app_or in H1. destruct H1.
apply IHST in H1; auto.
simpl in H1. destruct H1. congruence. contradiction.
Qed.*)
Lemma seq_list_seq tid t:
sto_trace t ->
trace_tid_last tid t = seq_point
-> In tid (seq_list t).
Proof.
intros.
induction H; simpl; try discriminate.
1, 3-4, 7, 10 : remove_unrelevant_last_txn; rewrite n in H3; apply IHsto_trace in H3; auto.
1, 3, 5: remove_unrelevant_last_txn; rewrite n in H4; apply IHsto_trace in H4; auto.
1: remove_unrelevant_last_txn; rewrite n in H5; apply IHsto_trace in H5; auto.
remove_unrelevant_last_txn.
left. auto.
rewrite n in H4.
apply IHsto_trace in H4.
right. auto.
Qed.
Lemma same_version v1 v2:
complete_write_item v1 = complete_write_item v2
-> v1 = v2.
Admitted.
Lemma seq_list_complete tid t ver:
sto_trace t ->
trace_tid_last tid t = complete_write_item ver
-> In tid (seq_list t).
Proof.
intros.
induction H; simpl; try discriminate.
1, 3-4, 7, 10 : remove_unrelevant_last_txn; rewrite n in H3; apply IHsto_trace in H3; auto.
1, 3, 4: remove_unrelevant_last_txn; rewrite n in H4; apply IHsto_trace in H4; auto.
1: remove_unrelevant_last_txn; rewrite n in H5; apply IHsto_trace in H5; auto.
remove_unrelevant_last_txn.
simpl in *. rewrite <- beq_nat_refl in H0; simpl in *.
apply seq_list_seq in H1. auto. auto.
rewrite n in H4.
apply IHsto_trace in H4. myauto.
Qed.
Lemma seq_point_before_commit (t:trace) (tid: tid):
sto_trace ((tid, commit_txn) :: t) ->
In (tid, seq_point) t.
Proof.
intros.
inversion H.
destruct H2.
destruct H2.
apply seq_list_seq in H2.
apply trace_seqlist_seqpoint_rev in H2.
auto. auto.
apply seq_list_complete in H2.
apply trace_seqlist_seqpoint_rev in H2.
auto. auto.
Qed.
Lemma seq_point_before_complete (t:trace) (tid: tid):
sto_trace ((tid, complete_write_item (S (trace_complete_last t))) :: t) ->
In (tid, seq_point) t.
Proof.
intros.
inversion H.
apply seq_list_seq in H4.
apply trace_seqlist_seqpoint_rev in H4.
auto. auto.
Qed.
Lemma seq_list_action tid action t:
sto_trace ((tid, action) :: t) ->
action = seq_point \/ action = commit_txn \/ action = complete_write_item (S (trace_complete_last t))
<-> In tid (seq_list ((tid, action) :: t)).
Proof.
split.
intros. destruct H0.
apply seq_list_seqpoint; auto.
subst. inversion H; subst.
simpl.
1-8: destruct H0; inversion H0.
simpl. apply seq_list_seq in H5. auto. auto.
destruct H3. destruct H1. simpl.
apply seq_list_seq in H1; auto.
apply seq_list_complete in H1; auto.
intros.
apply trace_seqlist_seqpoint_rev in H0.
apply in_inv in H0.
destruct H0.
inversion H0. left. auto.
right.
apply in_split in H0. destruct H0. destruct H0. rewrite H0 in *.
apply seq_point_after in H. auto.
Qed.
Lemma seq_list_action_neg tid action t:
sto_trace ((tid, action) :: t) ->
action <> seq_point /\ action <> commit_txn /\ action <> complete_write_item (S (trace_complete_last t))
<-> ~ In tid (seq_list ((tid, action) :: t)).
Proof.
intros. split.
intros.
intuition.
apply seq_list_action in H .
destruct H. apply H3 in H1.
destruct H1; auto. destruct H1; auto.
intros.
intuition.
apply seq_list_action in H .
destruct H.
assert (action = seq_point \/ action = commit_txn \/ action = complete_write_item (S (trace_complete_last t))). { left. auto. }
apply H in H3. auto.
apply seq_list_action in H .
destruct H.
assert (action = seq_point \/ action = commit_txn \/ action = complete_write_item (S (trace_complete_last t))). { right. left. auto. }
apply H in H3. auto.
apply seq_list_action in H .
destruct H.
assert (action = seq_point \/ action = commit_txn \/ action = complete_write_item (S (trace_complete_last t))). { right. right. auto. }
apply H in H3. auto.
Qed.
Lemma seqlist_filter t t' tid:
sto_trace t
-> ~ In (tid, seq_point) t
-> seq_list (trace_filter_tid tid t ++ t') = seq_list t'.
Proof.
intros.
induction H; simpl; auto.
all: destruct (Nat.eq_dec tid0 tid); subst.
all: simpl in H0; apply not_or_and in H0; destruct H0; try apply IHsto_trace in H2.
all: try rewrite <- beq_nat_refl; simpl; auto.
all: try rewrite <- Nat.eqb_neq in n; try rewrite n; auto.
intuition.
Qed.
Lemma seq_list_split_no_seqpoint t1 t2:
seq_list (t1 ++ t2) = seq_list t1 ++ seq_list t2.
Proof.
intros.
induction t1.
simpl. auto.
simpl in *. destruct a. destruct a.
all: simpl in *; auto.
rewrite IHt1. auto.
Qed.
Lemma seq_list_split_with_seqpoint t1 tid t2:
seq_list (t1 ++ (tid, seq_point) :: t2) = seq_list t1 ++ tid :: (seq_list t2).
Proof.
intros.
rewrite seq_list_split_no_seqpoint.
simpl in *. auto.
Qed.
(*
This function creates a serialized trace.
Just like STO-trace, the order is reversed: that is, the first (tid*action) pair in the serial trace
constructed by this function is the last operation performed in this trace
*)
Lemma serial_action_remove tid action t:
~ In tid (seq_list t) ->
create_serialized_trace ((tid, action) :: t) (seq_list t)= create_serialized_trace t (seq_list t).
Proof.
intros.
induction (seq_list t).
simpl. auto.
simpl.
simpl in H.
assert (a <> tid /\ ~ In tid l). { intuition. }
destruct H0.
apply not_eq_sym in H0. rewrite <- Nat.eqb_neq in H0. rewrite H0.
apply IHl in H1. rewrite H1. auto.
Qed.
Lemma serial_action_helper tid action t:
sto_trace ((tid, action) :: t) ->
action <> seq_point /\ action <> commit_txn /\ action <> complete_write_item (S (trace_complete_last t))
-> create_serialized_trace ((tid, action) :: t) (seq_list ((tid, action) :: t)) =
create_serialized_trace t (seq_list t).
Proof.
intros.
assert (sto_trace ((tid, action) :: t)). { auto. }
assert (sto_trace ((tid, action) :: t)). { auto. }
inversion H; subst.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := start_txn) in H1. auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := read_item (trace_complete_last t)) in H1; auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := write_item val) in H1; auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := try_commit_txn) in H1; auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := lock_write_item) in H1; auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := validate_read_item
(check_version (read_versions_tid tid t) (trace_complete_last t))) in H1; auto.
apply seq_list_action_neg in H. destruct H. apply H in H0.
apply seq_list_not_seqpoint2 in H1; [ | auto ].
apply serial_action_remove with (action0 := abort_txn (trace_has_locks tid t)) in H1; auto.
destruct H0. congruence.
destruct H0. destruct H3. congruence.
destruct H0. destruct H3. congruence.
Qed.
Lemma serial_action_helper2 tid t:
sto_trace ((tid, seq_point)::t)
-> create_serialized_trace ((tid, seq_point)::t) (seq_list ((tid, seq_point)::t)) = trace_filter_tid tid ((tid, seq_point)::t) ++ create_serialized_trace t (seq_list t).
Proof.
intros. simpl.
rewrite <- beq_nat_refl.
assert (sto_trace ((tid, seq_point) :: t)). { auto. }
apply seq_list_no_two_seqpoint in H.
assert (~ In (tid, seq_point) t -> ~ In tid (seq_list t)). { intuition. apply trace_seqlist_seqpoint_rev in H2. auto. }
apply H1 in H.
apply serial_action_remove with (action0:= seq_point) in H.
rewrite H. auto. auto.
Qed.
Lemma serial_action_seq_list tid action t:
sto_trace ((tid, action) :: t) ->
~ In tid (seq_list ((tid, action) :: t))
-> create_serialized_trace ((tid, action) :: t) (seq_list ((tid, action) :: t)) = create_serialized_trace t (seq_list t).
Proof.
intros.
assert (sto_trace ((tid, action):: t)). { auto. }
assert (sto_trace ((tid, action):: t)). { auto. }
assert (~ In tid (seq_list t)). { apply seq_list_not_seqpoint2 with (action0:= action); auto. }
apply seq_list_action_neg in H. destruct H. apply H4 in H0.
destruct action; simpl; try apply serial_action_helper; auto.
destruct H0. congruence.
Qed.
(*
This lemma proves that the seq_point of each transaction in a STO-trace determines its serialized order in the serial trace
*)
Lemma serial_action_split tid t t1 t2:
sto_trace t
-> t = t1 ++ (tid, seq_point) :: t2
-> create_serialized_trace t (seq_list t) =
create_serialized_trace t (seq_list t1) ++ trace_filter_tid tid ((t1 ++ (tid, seq_point) :: t2)) ++ create_serialized_trace t (seq_list t2).
Proof.
intros. rewrite H0 in *. rewrite seq_list_split_with_seqpoint.
clear H H0.
induction (seq_list t1).
simpl in *. auto.
simpl. rewrite IHl.
apply app_assoc.
Qed.
Lemma serial_action_before_commit tid t1 t2 t:
sto_trace ((tid, commit_txn) :: t)
-> t = t1 ++ (tid, seq_point) :: t2
-> create_serialized_trace ((tid, commit_txn) :: t) (seq_list ((tid, commit_txn) :: t)) =
create_serialized_trace t (seq_list t1) ++ (tid, commit_txn) :: trace_filter_tid tid t ++ create_serialized_trace t (seq_list t2).
Proof.
intros.
remember ((tid, commit_txn) :: t) as tbig.
assert (create_serialized_trace tbig (seq_list tbig) =
create_serialized_trace tbig (seq_list ((tid, commit_txn) :: t1))
++ trace_filter_tid tid tbig
++ create_serialized_trace tbig (seq_list t2)). {
repeat rewrite Heqtbig.
repeat rewrite H0.
rewrite app_comm_cons.
apply serial_action_split.
rewrite <- app_comm_cons.
rewrite <- H0.
rewrite <- Heqtbig.
auto.
auto.
}
rewrite Heqtbig in H1; simpl in H1.
rewrite <- beq_nat_refl in H1.
assert (~ In tid (seq_list t1)). {
rewrite Heqtbig in H. rewrite H0 in H.
apply sto_trace_app in H.
apply seq_list_no_two_seqpoint2 in H.
intuition. apply trace_seqlist_seqpoint_rev in H2. auto.
}
apply serial_action_remove with (action0:= commit_txn) in H2.
Admitted.
Lemma serial_action_before_complete tid t1 t2 t:
sto_trace ((tid, complete_write_item (S (trace_complete_last t))) :: t)
-> t = t1 ++ (tid, seq_point) :: t2
-> create_serialized_trace ((tid, complete_write_item (S (trace_complete_last t))) :: t) (seq_list ((tid, complete_write_item (S (trace_complete_last t))) :: t)) =
create_serialized_trace t (seq_list t1) ++ (tid, complete_write_item (S (trace_complete_last t))) :: trace_filter_tid tid t ++ create_serialized_trace t (seq_list t2).
Proof.
intros.
assert (~ In tid (seq_list t2)).
Admitted.
Lemma seq_list_equal trace:
sto_trace trace ->
seq_list (create_serialized_trace trace (seq_list trace)) = seq_list trace.
Proof.
intros. simpl.
induction H; simpl.
auto.
apply start_txn_step with (tid0 := tid0) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H1. auto.
apply read_item_step with (tid := tid0) (val:= val) (oldver:= oldver) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H2; inversion H. auto. auto.
apply write_item_step with (tid := tid0) (ver:= ver) (oldval:= oldval) (val := val) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H1; inversion H. auto.
apply try_commit_txn_step with (tid := tid0) (ver:= ver) (val:= val) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H1; inversion H. auto.
apply lock_write_item_step with (tid := tid0) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H3. auto. auto. auto.
apply validate_read_item_step with (tid := tid0) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H2. auto. auto.
apply abort_txn_step with (tid := tid0) in H0. apply serial_action_helper in H0. rewrite <- H0 in IHsto_trace. auto. split; intuition; inversion H1. auto.
rewrite <- beq_nat_refl. simpl.
apply seq_point_step with (tid:= tid0) in H1.
assert (sto_trace ((tid0, seq_point) :: t)). { auto. }
apply seq_list_no_two_seqpoint in H1.
assert (~ In (tid0, seq_point) t -> ~ In tid0 (seq_list t)). { intuition; apply trace_seqlist_seqpoint_rev in H4; apply H3 in H4; auto. }
apply H3 in H1.
apply serial_action_remove with (action0 := seq_point) in H1.
rewrite <- H1 in IHsto_trace.
assert (seq_list (trace_filter_tid tid0 t ++ create_serialized_trace ((tid0, seq_point) :: t) (seq_list t)) = seq_list (create_serialized_trace ((tid0, seq_point) :: t) (seq_list t))). { apply seqlist_filter. assert (sto_trace ((tid0, seq_point) :: t)). { auto. } apply sto_trace_app in H2. auto. apply seq_list_no_two_seqpoint in H2. auto. }
rewrite H4.
remember (seq_list (create_serialized_trace ((tid0, seq_point) :: t) (seq_list t))) as too_long. rewrite <- IHsto_trace. rewrite Heqtoo_long. auto.
auto. auto. auto.
apply complete_write_item_step with (tid := tid0) in H0; [ | auto | auto].
assert (sto_trace t). { apply sto_trace_app in H0. auto. }
assert (sto_trace ((tid0, complete_write_item (S (trace_complete_last t))) :: t)). { auto. }
apply seq_point_before_complete in H0.
apply in_split in H0. destruct H0. destruct H0.
assert (t = x ++ (tid0, seq_point) :: x0). { auto. }
assert (t = x ++ (tid0, seq_point) :: x0). { auto. }
apply serial_action_before_complete in H0; [ | auto].
simpl in H0.
unfold tid in H0. rewrite H0.
rewrite seq_list_split_no_seqpoint. simpl.
apply serial_action_split in H4; [ | auto].
rewrite <- IHsto_trace. rewrite H4. rewrite H5.
rewrite <- seq_list_split_no_seqpoint. auto.
apply commit_txn_step with (tid := tid0) (ver:= ver) in H0; [ | auto].
assert (sto_trace t). { apply sto_trace_app in H0. auto. }
assert (sto_trace ((tid0, commit_txn) :: t)). { auto. }
apply seq_point_before_commit in H0.
apply in_split in H0. destruct H0. destruct H0.
assert (t = x ++ (tid0, seq_point) :: x0). { auto. }
assert (t = x ++ (tid0, seq_point) :: x0). { auto. }
apply serial_action_before_commit in H0; [ | auto].
simpl in H0.
unfold tid in H0. rewrite H0.
rewrite seq_list_split_no_seqpoint. simpl.
apply serial_action_split in H4; [ | auto].
rewrite <- IHsto_trace. rewrite H4. rewrite H3.
rewrite <- seq_list_split_no_seqpoint. auto.
Qed.
(*
Lemma serial_action_add tid t:
sto_trace t
-> sto_trace (create_serialized_trace t (seq_list t))
-> ~ In tid (seq_list t)
-> sto_trace (trace_filter_tid tid t ++ create_serialized_trace t (seq_list t)).
Proof.
intros.
assert (~ In (tid, seq_point) t).
{ intuition. apply trace_seqlist_seqpoint in H2. apply H1 in H2. auto. }
assert (~ In (tid, seq_point) (trace_filter_tid tid t)).
{ unfold trace_filter_tid.
intuition.
apply filter_In in H3.
destruct H3. apply H2 in H3. auto. }
induction (trace_filter_tid tid t).
simpl. auto.
destruct a.
simpl in H3. apply not_or_and in H3. destruct H3.
apply IHt0 in H4.
Admitted.
*)
Lemma sto_trace_single tid t:
sto_trace ((tid, seq_point) :: t)
-> sto_trace (create_serialized_trace t (seq_list t))
-> sto_trace ((trace_filter_tid tid ((tid, seq_point) :: t)) ++ (create_serialized_trace t (seq_list t))).
Proof.
intros; simpl.
rewrite <- beq_nat_refl.
inversion H.
induction H.
Admitted.
Lemma is_sto_trace trace:
sto_trace trace ->
sto_trace (create_serialized_trace trace (seq_list trace)).
Proof.
intros.
induction H; simpl.
auto.
apply start_txn_step in H; [ | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H1.
apply read_item_step in H; [ | auto | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H2.
apply write_item_step with (val := val) in H; [ | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H1.
apply try_commit_txn_step in H; [ | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H1.
apply lock_write_item_step in H; [ | auto | auto | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H3.
apply validate_read_item_step in H; [ | auto| auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H2; inversion H0.
apply abort_txn_step in H; [ | auto].
apply serial_action_helper in H. simpl in H. rewrite <- H in IHsto_trace. auto. split; intuition; inversion H1.
rewrite <- beq_nat_refl.
apply seq_point_step in H; [ | auto | auto].
apply sto_trace_single with (tid0 := tid0) in IHsto_trace.
unfold trace_filter_tid in IHsto_trace. simpl in IHsto_trace.
rewrite <- beq_nat_refl in IHsto_trace. unfold trace_filter_tid.
assert (sto_trace ((tid0, seq_point) :: t)). { auto. }
apply seq_list_no_two_seqpoint in H.
assert (~ In (tid0, seq_point) t -> ~ In tid0 (seq_list t)).
{ intuition. apply trace_seqlist_seqpoint_rev in H4. apply H3 in H4. auto. }
apply H3 in H.
apply serial_action_remove with (action0 := seq_point) in H; [ | auto].
rewrite <- H in IHsto_trace. auto. auto.
apply complete_write_item_step in H; [ | auto | auto].
admit.
apply commit_txn_step in H; [ | auto].
admit.
Admitted.
Lemma check_app a tr:
check_is_serial_trace (a :: tr) -> check_is_serial_trace tr.
Proof.
intros.
destruct a. destruct tr.
auto. destruct p. destruct tr.
simpl. auto. simpl in H.
destruct (Nat.eq_dec t0 t).
subst. rewrite <- beq_nat_refl in H. simpl in *.
destruct H. auto.
rewrite <- Nat.eqb_neq in n. rewrite n in H. simpl in *.
destruct H. auto.
Qed.
Lemma check_split_right tr1 tr2:
check_is_serial_trace (tr1 ++ tr2)
-> check_is_serial_trace tr2.
Proof.
intros.
induction tr1.
rewrite app_nil_l in H. auto.
simpl in H. apply check_app in H. apply IHtr1 in H. auto.
Qed.
(*
Eval compute in check_is_serial_trace [(2, commit_txn 1); (2, seq_point); (2, complete_write_item 1); (2, validate_read_item True); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (2, read_item 0); (2, start_txn); (3, commit_txn 1); (3, seq_point); (3, validate_read_item True); (3, try_commit_txn); (3, read_item 1); (3, start_txn)].
Eval compute in check_is_serial_trace [(3, commit_txn 1); (3, seq_point); (3, validate_read_item True); (3, try_commit_txn); (3, read_item 1); (3, start_txn); (1, abort_txn); (1, validate_read_item False); (1, try_commit_txn); (2, commit_txn 1); (2, seq_point); (2, complete_write_item 1); (2, validate_read_item True); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (1, read_item 0); (2, read_item 0); (2, start_txn); (1, start_txn)].
*)
Eval compute in check_is_serial_trace example_txn.
(***************************************************)
Lemma is_serial trace:
sto_trace trace ->
check_is_serial_trace (create_serialized_trace trace (seq_list trace)).
Proof.
intros.
induction H.
simpl. auto.
simpl. apply start_txn_step with (tid0 := tid0) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H1. auto.
simpl. apply read_item_step with (tid := tid0) (val:= val) (oldver:= oldver) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H2; inversion H. auto. auto.
simpl. apply write_item_step with (tid := tid0) (oldval:= oldval) (ver:= ver) (val:= val) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H1; inversion H. auto.
simpl. apply try_commit_txn_step with (tid := tid0) (ver:= ver) (val:= val) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H1; inversion H. auto.
simpl. apply lock_write_item_step with (tid := tid0) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H3; inversion H. auto. auto. auto.
simpl. apply validate_read_item_step with (tid := tid0) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H2; inversion H. auto. auto.
simpl. apply abort_txn_step with (tid := tid0) in H0. rewrite serial_action_helper; auto. split; intuition; inversion H1. auto.
simpl. rewrite <- beq_nat_refl.
apply seq_point_step with (tid := tid0) in H1; [ | auto| auto].
assert (sto_trace ((tid0, seq_point) :: t)). { auto. }
apply seq_list_no_two_seqpoint in H1.
assert (~ In (tid0, seq_point) t -> ~ In tid0 (seq_list t)).
{ intuition; apply trace_seqlist_seqpoint_rev in H4; apply H3 in H4; auto. }
apply H3 in H1.
apply serial_action_remove with (action0 := seq_point) in H1; [ | auto].
admit.
simpl. apply complete_write_item_step with (tid := tid0) in H0; [ | auto | auto].
admit.
simpl. apply commit_txn_step with (tid := tid0) (ver:= ver) in H0; [ | auto].
admit.
Admitted.
(***************************************************)
Lemma write_sync_list_unchanged_noseq :
forall t tid a, sto_trace ((tid, a)::t) ->
a = start_txn ->
get_write_value_out t = get_write_value_out ((tid, a)::t).
Proof.
intros.
unfold get_write_value_out.
subst. simpl.
induction (seq_list t).
simpl. auto.
simpl.
destruct (Nat.eq_dec tid0 a).
- subst. rewrite <- beq_nat_refl. rewrite IHl. intuition.
- rewrite <- Nat.eqb_neq in n. rewrite n. rewrite IHl. auto.
Qed.
Lemma write_sync_list_unchanged_noseq2 :
forall t tid a, sto_trace ((tid, a)::t) ->
a <> seq_point ->
get_write_value_out t = get_write_value_out ((tid, a)::t).
Proof.
intros.
unfold get_write_value_out.
subst. destruct a. simpl.
induction (seq_list t).
simpl. auto.
simpl.
destruct (Nat.eq_dec tid0 a).
- subst. rewrite <- beq_nat_refl. rewrite IHl. intuition.
- rewrite <- Nat.eqb_neq in n. rewrite n. rewrite IHl. auto.
Admitted.
(*
A STO-trace and its serial trace should have the same writes.
**************************************************
Should we prove that create_serialized_trace function actually prove the correct serial trace of the
STO-trace
**************************************************
*)
(***************************************************)
Lemma write_consistency trace:
sto_trace trace
-> sto_trace (create_serialized_trace trace (seq_list trace))
-> check_is_serial_trace (create_serialized_trace trace (seq_list trace))
-> write_synchronization trace (create_serialized_trace trace (seq_list trace)).
Proof.
intros.
induction H.
- unfold write_synchronization; unfold get_write_value_out; simpl; auto.
- unfold write_synchronization. unfold get_write_value_out.
apply start_txn_step in H; [ | auto].
apply serial_action_helper in H; [ | split].
unfold tid in H. rewrite H.
Admitted.
(***************************************************)
(*
Now we proceed to the read part of the proof.
*)
(*
A STO-trace and its serial trace should have the same reads.
*)
(***************************************************)
Lemma read_consistency trace:
sto_trace trace
-> read_synchronization trace (create_serialized_trace trace (seq_list trace)).
Admitted.
(***************************************************)
(*
The capstone theorem: prove serializability of a sto-trace
*)
Theorem txn_equal t:
sto_trace t
-> exists t', sto_trace t'
-> check_is_serial_trace t'
-> Exec_Equivalence t t'.
Proof.
exists (create_serialized_trace t (seq_list t)).
intros.
unfold Exec_Equivalence. split.
apply write_consistency; auto.
apply read_consistency; auto.
Qed.
|
Formal statement is: lemmas prime_imp_coprime_int = prime_imp_coprime[where ?'a = int] Informal statement is: If $p$ is a prime number, then $p$ and $n$ are coprime if and only if $p$ does not divide $n$.
|
lemma continuous_map_o_Pair: assumes h: "continuous_map (prod_topology X Y) Z h" and t: "t \<in> topspace X" shows "continuous_map Y Z (h \<circ> Pair t)"
|
[STATEMENT]
lemma mkuexpr_uminus [mkuexpr]: "mk\<^sub>e (\<lambda> s. - f s) = - mk\<^sub>e f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mk\<^sub>e (\<lambda>s. - f s) = - mk\<^sub>e f
[PROOF STEP]
by (simp add: uminus_uexpr_def, transfer, simp)
|
r=359.97
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7fw2v/media/images/d7fw2v-026/svc:tesseract/full/full/359.97/default.jpg Accept:application/hocr+xml
|
//
// Copyright (c) 2019 Vinnie Falco ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/json
//
#ifndef BOOST_JSON_IMPL_STREAM_PARSER_IPP
#define BOOST_JSON_IMPL_STREAM_PARSER_IPP
#include <boost/json/stream_parser.hpp>
#include <boost/json/basic_parser_impl.hpp>
#include <boost/json/error.hpp>
#include <cstring>
#include <stdexcept>
#include <utility>
BOOST_JSON_NS_BEGIN
stream_parser::
stream_parser(
storage_ptr sp,
parse_options const& opt,
unsigned char* buffer,
std::size_t size) noexcept
: p_(
opt,
std::move(sp),
buffer,
size)
{
reset();
}
stream_parser::
stream_parser(
storage_ptr sp,
parse_options const& opt) noexcept
: p_(
opt,
std::move(sp),
nullptr,
0)
{
reset();
}
void
stream_parser::
reset(storage_ptr sp) noexcept
{
p_.reset();
p_.handler().st.reset(sp);
}
std::size_t
stream_parser::
write_some(
char const* data,
std::size_t size,
error_code& ec)
{
return p_.write_some(
true, data, size, ec);
}
std::size_t
stream_parser::
write_some(
char const* data,
std::size_t size)
{
error_code ec;
auto const n = write_some(
data, size, ec);
if(ec)
detail::throw_system_error(ec,
BOOST_CURRENT_LOCATION);
return n;
}
std::size_t
stream_parser::
write(
char const* data,
std::size_t size,
error_code& ec)
{
auto const n = write_some(
data, size, ec);
if(! ec && n < size)
{
ec = error::extra_data;
p_.fail(ec);
}
return n;
}
std::size_t
stream_parser::
write(
char const* data,
std::size_t size)
{
error_code ec;
auto const n = write(
data, size, ec);
if(ec)
detail::throw_system_error(ec,
BOOST_CURRENT_LOCATION);
return n;
}
void
stream_parser::
finish(error_code& ec)
{
p_.write_some(false, nullptr, 0, ec);
}
void
stream_parser::
finish()
{
error_code ec;
finish(ec);
if(ec)
detail::throw_system_error(ec,
BOOST_CURRENT_LOCATION);
}
value
stream_parser::
release()
{
if(! p_.done())
{
// prevent undefined behavior
finish();
}
return p_.handler().st.release();
}
BOOST_JSON_NS_END
#endif
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.semiquot
import data.rat.floor
/-!
# Implementation of floating-point numbers (experimental).
-/
def int.shift2 (a b : ℕ) : ℤ → ℕ × ℕ
| (int.of_nat e) := (a.shiftl e, b)
| -[1+ e] := (a, b.shiftl e.succ)
namespace fp
@[derive inhabited]
inductive rmode
| NE -- round to nearest even
class float_cfg :=
(prec emax : ℕ)
(prec_pos : 0 < prec)
(prec_max : prec ≤ emax)
variable [C : float_cfg]
include C
def prec := C.prec
def emax := C.emax
def emin : ℤ := 1 - C.emax
def valid_finite (e : ℤ) (m : ℕ) : Prop :=
emin ≤ e + prec - 1 ∧ e + prec - 1 ≤ emax ∧ e = max (e + m.size - prec) emin
instance dec_valid_finite (e m) : decidable (valid_finite e m) :=
by unfold valid_finite; apply_instance
inductive float
| inf : bool → float
| nan : float
| finite : bool → Π e m, valid_finite e m → float
def float.is_finite : float → bool
| (float.finite s e m f) := tt
| _ := ff
def to_rat : Π (f : float), f.is_finite → ℚ
| (float.finite s e m f) _ :=
let (n, d) := int.shift2 m 1 e,
r := rat.mk_nat n d in
if s then -r else r
theorem float.zero.valid : valid_finite emin 0 :=
⟨begin
rw add_sub_assoc,
apply le_add_of_nonneg_right,
apply sub_nonneg_of_le,
apply int.coe_nat_le_coe_nat_of_le,
exact C.prec_pos
end,
suffices prec ≤ 2 * emax,
begin
rw ← int.coe_nat_le at this,
rw ← sub_nonneg at *,
simp only [emin, emax] at *,
ring_nf,
assumption
end, le_trans C.prec_max (nat.le_mul_of_pos_left dec_trivial),
by rw max_eq_right; simp [sub_eq_add_neg]⟩
def float.zero (s : bool) : float :=
float.finite s emin 0 float.zero.valid
instance : inhabited float := ⟨float.zero tt⟩
protected def float.sign' : float → semiquot bool
| (float.inf s) := pure s
| float.nan := ⊤
| (float.finite s e m f) := pure s
protected def float.sign : float → bool
| (float.inf s) := s
| float.nan := ff
| (float.finite s e m f) := s
protected def float.is_zero : float → bool
| (float.finite s e 0 f) := tt
| _ := ff
protected def float.neg : float → float
| (float.inf s) := float.inf (bnot s)
| float.nan := float.nan
| (float.finite s e m f) := float.finite (bnot s) e m f
def div_nat_lt_two_pow (n d : ℕ) : ℤ → bool
| (int.of_nat e) := n < d.shiftl e
| -[1+ e] := n.shiftl e.succ < d
-- TODO(Mario): Prove these and drop 'meta'
meta def of_pos_rat_dn (n : ℕ+) (d : ℕ+) : float × bool :=
begin
let e₁ : ℤ := n.1.size - d.1.size - prec,
cases h₁ : int.shift2 d.1 n.1 (e₁ + prec) with d₁ n₁,
let e₂ := if n₁ < d₁ then e₁ - 1 else e₁,
let e₃ := max e₂ emin,
cases h₂ : int.shift2 d.1 n.1 (e₃ + prec) with d₂ n₂,
let r := rat.mk_nat n₂ d₂,
let m := r.floor,
refine (float.finite ff e₃ (int.to_nat m) _, r.denom = 1),
{ exact undefined }
end
meta def next_up_pos (e m) (v : valid_finite e m) : float :=
let m' := m.succ in
if ss : m'.size = m.size then
float.finite ff e m' (by unfold valid_finite at *; rw ss; exact v)
else if h : e = emax then
float.inf ff
else
float.finite ff e.succ (nat.div2 m') undefined
meta def next_dn_pos (e m) (v : valid_finite e m) : float :=
match m with
| 0 := next_up_pos _ _ float.zero.valid
| nat.succ m' :=
if ss : m'.size = m.size then
float.finite ff e m' (by unfold valid_finite at *; rw ss; exact v)
else if h : e = emin then
float.finite ff emin m' undefined
else
float.finite ff e.pred (bit1 m') undefined
end
meta def next_up : float → float
| (float.finite ff e m f) := next_up_pos e m f
| (float.finite tt e m f) := float.neg $ next_dn_pos e m f
| f := f
meta def next_dn : float → float
| (float.finite ff e m f) := next_dn_pos e m f
| (float.finite tt e m f) := float.neg $ next_up_pos e m f
| f := f
meta def of_rat_up : ℚ → float
| ⟨0, _, _, _⟩ := float.zero ff
| ⟨nat.succ n, d, h, _⟩ :=
let (f, exact) := of_pos_rat_dn n.succ_pnat ⟨d, h⟩ in
if exact then f else next_up f
| ⟨-[1+n], d, h, _⟩ := float.neg (of_pos_rat_dn n.succ_pnat ⟨d, h⟩).1
meta def of_rat_dn (r : ℚ) : float :=
float.neg $ of_rat_up (-r)
meta def of_rat : rmode → ℚ → float
| rmode.NE r :=
let low := of_rat_dn r, high := of_rat_up r in
if hf : high.is_finite then
if r = to_rat _ hf then high else
if lf : low.is_finite then
if r - to_rat _ lf > to_rat _ hf - r then high else
if r - to_rat _ lf < to_rat _ hf - r then low else
match low, lf with float.finite s e m f, _ :=
if 2 ∣ m then low else high
end
else float.inf tt
else float.inf ff
namespace float
instance : has_neg float := ⟨float.neg⟩
meta def add (mode : rmode) : float → float → float
| nan _ := nan
| _ nan := nan
| (inf tt) (inf ff) := nan
| (inf ff) (inf tt) := nan
| (inf s₁) _ := inf s₁
| _ (inf s₂) := inf s₂
| (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) :=
let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in
of_rat mode (to_rat f₁ rfl + to_rat f₂ rfl)
meta instance : has_add float := ⟨float.add rmode.NE⟩
meta def sub (mode : rmode) (f1 f2 : float) : float :=
add mode f1 (-f2)
meta instance : has_sub float := ⟨float.sub rmode.NE⟩
meta def mul (mode : rmode) : float → float → float
| nan _ := nan
| _ nan := nan
| (inf s₁) f₂ := if f₂.is_zero then nan else inf (bxor s₁ f₂.sign)
| f₁ (inf s₂) := if f₁.is_zero then nan else inf (bxor f₁.sign s₂)
| (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) :=
let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in
of_rat mode (to_rat f₁ rfl * to_rat f₂ rfl)
meta def div (mode : rmode) : float → float → float
| nan _ := nan
| _ nan := nan
| (inf s₁) (inf s₂) := nan
| (inf s₁) f₂ := inf (bxor s₁ f₂.sign)
| f₁ (inf s₂) := zero (bxor f₁.sign s₂)
| (finite s₁ e₁ m₁ v₁) (finite s₂ e₂ m₂ v₂) :=
let f₁ := finite s₁ e₁ m₁ v₁, f₂ := finite s₂ e₂ m₂ v₂ in
if f₂.is_zero then inf (bxor s₁ s₂) else
of_rat mode (to_rat f₁ rfl / to_rat f₂ rfl)
end float
end fp
|
lemma lipschitz_on_normD: "norm (f x - f y) \<le> L * norm (x - y)" if "lipschitz_on L X f" "x \<in> X" "y \<in> X"
|
lemma sets_null_measure[simp, measurable_cong]: "sets (null_measure M) = sets M"
|
[STATEMENT]
lemma lt__le:
assumes "A B Lt C D"
shows "A B Le C D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A B Le C D
[PROOF STEP]
using Lt_def assms
[PROOF STATE]
proof (prove)
using this:
?A ?B Lt ?C ?D \<equiv> ?A ?B Le ?C ?D \<and> \<not> Cong ?A ?B ?C ?D
A B Lt C D
goal (1 subgoal):
1. A B Le C D
[PROOF STEP]
by blast
|
"""
Disjoint set data structure
"""
import collections
class DisjointSet:
""" Disjoint set data structure for incremental connectivity queries.
.. versionadded:: 1.6.0
Attributes
----------
n_subsets : int
The number of subsets.
Methods
-------
add
merge
connected
subset
subsets
__getitem__
Notes
-----
This class implements the disjoint set [1]_, also known as the *union-find*
or *merge-find* data structure. The *find* operation (implemented in
`__getitem__`) implements the *path halving* variant. The *merge* method
implements the *merge by size* variant.
References
----------
.. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
Examples
--------
>>> from scipy.cluster.hierarchy import DisjointSet
Initialize a disjoint set:
>>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
Merge some subsets:
>>> disjoint_set.merge(1, 2)
True
>>> disjoint_set.merge(3, 'a')
True
>>> disjoint_set.merge('a', 'b')
True
>>> disjoint_set.merge('b', 'b')
False
Find root elements:
>>> disjoint_set[2]
1
>>> disjoint_set['b']
3
Test connectivity:
>>> disjoint_set.connected(1, 2)
True
>>> disjoint_set.connected(1, 'b')
False
List elements in disjoint set:
>>> list(disjoint_set)
[1, 2, 3, 'a', 'b']
Get the subset containing 'a':
>>> disjoint_set.subset('a')
{'a', 3, 'b'}
Get all subsets in the disjoint set:
>>> disjoint_set.subsets()
[{1, 2}, {'a', 3, 'b'}]
"""
def __init__(self, elements=None):
self.n_subsets = 0
self._sizes = {}
self._parents = {}
# _nbrs is a circular linked list which links connected elements.
self._nbrs = {}
# _indices tracks the element insertion order - OrderedDict is used to
# ensure correct ordering in `__iter__`.
self._indices = collections.OrderedDict()
if elements is not None:
for x in elements:
self.add(x)
def __iter__(self):
"""Returns an iterator of the elements in the disjoint set.
Elements are ordered by insertion order.
"""
return iter(self._indices)
def __len__(self):
return len(self._indices)
def __contains__(self, x):
return x in self._indices
def __getitem__(self, x):
"""Find the root element of `x`.
Parameters
----------
x : hashable object
Input element.
Returns
-------
root : hashable object
Root element of `x`.
"""
if x not in self._indices:
raise KeyError(x)
# find by "path halving"
parents = self._parents
while self._indices[x] != self._indices[parents[x]]:
parents[x] = parents[parents[x]]
x = parents[x]
return x
def add(self, x):
"""Add element `x` to disjoint set
"""
if x in self._indices:
return
self._sizes[x] = 1
self._parents[x] = x
self._nbrs[x] = x
self._indices[x] = len(self._indices)
self.n_subsets += 1
def merge(self, x, y):
"""Merge the subsets of `x` and `y`.
The smaller subset (the child) is merged into the larger subset (the
parent). If the subsets are of equal size, the root element which was
first inserted into the disjoint set is selected as the parent.
Parameters
----------
x, y : hashable object
Elements to merge.
Returns
-------
merged : bool
True if `x` and `y` were in disjoint sets, False otherwise.
"""
xr = self[x]
yr = self[y]
if self._indices[xr] == self._indices[yr]:
return False
sizes = self._sizes
if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
xr, yr = yr, xr
self._parents[yr] = xr
self._sizes[xr] += self._sizes[yr]
self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
self.n_subsets -= 1
return True
def connected(self, x, y):
"""Test whether `x` and `y` are in the same subset.
Parameters
----------
x, y : hashable object
Elements to test.
Returns
-------
result : bool
True if `x` and `y` are in the same set, False otherwise.
"""
return self._indices[self[x]] == self._indices[self[y]]
def subset(self, x):
"""Get the subset containing `x`.
Parameters
----------
x : hashable object
Input element.
Returns
-------
result : set
Subset containing `x`.
"""
if x not in self._indices:
raise KeyError(x)
result = [x]
nxt = self._nbrs[x]
while self._indices[nxt] != self._indices[x]:
result.append(nxt)
nxt = self._nbrs[nxt]
return set(result)
def subsets(self):
"""Get all the subsets in the disjoint set.
Returns
-------
result : set
Subsets in the disjoint set.
"""
result = []
visited = set()
for x in self:
if x not in visited:
xset = self.subset(x)
visited.update(xset)
result.append(xset)
return result
|
import data.list.chain
import data.sigma.basic
variables {ι : Type*} {M : ι → Type*} {G : ι → Type*} {N : Type*}
variables [Π i, monoid (M i)] [Π i, group (G i)] [monoid N]
open list function
namespace coprod.pre
def reduced (l : list (Σ i, M i)) : Prop :=
l.chain' (λ a b, a.1 ≠ b.1) ∧ ∀ a : Σ i, M i, a ∈ l → a.2 ≠ 1
@[simp] lemma reduced_nil : reduced ([] : list (Σ i, M i)) :=
⟨list.chain'_nil, λ _, false.elim⟩
lemma reduced_singleton {i : Σ i, M i} (hi : i.2 ≠ 1) : reduced [i] :=
⟨by simp, begin
cases i with i a,
rintros ⟨j, b⟩,
simp only [and_imp, ne.def, mem_singleton],
rintro rfl h₂,
simp * at *
end⟩
lemma reduced_of_reduced_cons {i : Σ i, M i} {l : list (Σ i, M i)}
(h : reduced (i :: l)) : reduced l :=
⟨(list.chain'_cons'.1 h.1).2, λ b hb, h.2 _ (mem_cons_of_mem _ hb)⟩
lemma reduced_cons_of_reduced_cons {i : ι} {a b : M i} {l : list (Σ i, M i)}
(h : reduced (⟨i, a⟩ :: l)) (hb : b ≠ 1) : reduced (⟨i, b⟩ :: l) :=
⟨chain'_cons'.2 (chain'_cons'.1 h.1),
begin
rintros ⟨k, c⟩ hk,
cases (mem_cons_iff _ _ _).1 hk with hk hk,
{ simp only at hk,
rcases hk with ⟨rfl, h⟩,
simp * at * },
{ exact h.2 _ (mem_cons_of_mem _ hk) }
end⟩
lemma reduced_cons_cons {i j : ι} {a : M i} {b : M j}
{l : list (Σ i, M i)} (hij : i ≠ j) (ha : a ≠ 1)
(hbl : reduced (⟨j, b⟩ :: l)) : reduced (⟨i, a⟩ :: ⟨j, b⟩ :: l) :=
⟨chain'_cons.2 ⟨hij, hbl.1⟩,
begin
rintros ⟨k, c⟩ hk,
cases (mem_cons_iff _ _ _).1 hk with hk hk,
{ simp only at hk,
rcases hk with ⟨rfl, h⟩,
simp * at * },
{ exact hbl.2 _ hk }
end⟩
lemma reduced_reverse {l : list (Σ i, M i)} (h : reduced l) : reduced l.reverse :=
⟨chain'_reverse.2 $ by {convert h.1, simp [function.funext_iff, eq_comm] },
by simpa using h.2⟩
@[simp] lemma reduced_reverse_iff {l : list (Σ i, M i)} : reduced l.reverse ↔ reduced l :=
⟨λ h, by convert reduced_reverse h; simp, reduced_reverse⟩
lemma reduced_of_reduced_append_right : ∀ {l₁ l₂ : list (Σ i, M i)} (h : reduced (l₁ ++ l₂)),
reduced l₂
| [] l₂ h := h
| (i::l₁) l₂ h := begin
rw cons_append at h,
exact reduced_of_reduced_append_right (reduced_of_reduced_cons h)
end
lemma reduced_of_reduced_append_left {l₁ l₂ : list (Σ i, M i)} (h : reduced (l₁ ++ l₂)) :
reduced l₁ :=
begin
rw [← reduced_reverse_iff],
rw [← reduced_reverse_iff, reverse_append] at h,
exact reduced_of_reduced_append_right h
end
variables {ι} [decidable_eq ι] {M} [Π i, decidable_eq (M i)]
def rcons : (Σ i, M i) → list (Σ i, M i) → list (Σ i, M i)
| i [] := [i]
| i (j::l) :=
if hij : i.1 = j.1
then let c := i.2 * cast (congr_arg M hij).symm j.2 in
if c = 1
then l
else ⟨i.1, c⟩ :: l
else i::j::l
def reduce : list (Σ i, M i) → list (Σ i, M i)
| [] := []
| (i :: l) := if i.2 = 1 then reduce l else rcons i (reduce l)
@[simp] lemma reduce_nil : reduce ([] : list (Σ i, M i)) = [] := rfl
lemma reduce_cons (i : Σ i, M i) (l : list (Σ i, M i)) :
reduce (i::l) = if i.2 = 1 then reduce l else rcons i (reduce l) := rfl
lemma reduced_rcons : ∀ {i : Σ i, M i} {l : list (Σ i, M i)},
i.2 ≠ 1 → reduced l → reduced (rcons i l)
| ⟨i, a⟩ [] hi h := ⟨list.chain'_singleton _,
begin
rintros ⟨j, b⟩ hj,
simp only [rcons, list.mem_singleton] at hj,
rcases hj with ⟨rfl, h⟩,
simp * at *
end⟩
| ⟨i, a⟩ (⟨j, b⟩ :: l) hi h := begin
simp [rcons],
split_ifs,
{ exact reduced_of_reduced_cons h },
{ dsimp only at h_1,
subst h_1,
exact reduced_cons_of_reduced_cons h h_2 },
{ exact reduced_cons_cons h_1 hi h }
end
lemma reduced_reduce : ∀ l : list (Σ i, M i), reduced (reduce l)
| [] := reduced_nil
| (a::l) := begin
rw reduce,
split_ifs,
{ exact reduced_reduce l },
{ exact reduced_rcons h (reduced_reduce l) }
end
lemma rcons_eq_cons : ∀ {i : Σ i, M i} {l : list (Σ i, M i)},
reduced (i :: l) → rcons i l = i :: l
| i [] h := rfl
| i (j::l) h := dif_neg (chain'_cons.1 h.1).1
lemma rcons_reduce_eq_reduce_cons : ∀ {i : Σ i, M i} {l : list (Σ i, M i)},
i.2 ≠ 1 → rcons i (reduce l) = reduce (i :: l)
| a [] ha := by simp [rcons, reduce, ha]
| a (b::l) ha := begin
rw [reduce],
split_ifs,
{ rw [reduce, if_neg ha, reduce, if_pos h] },
{ rw [reduce, if_neg ha, reduce, if_neg h] }
end
lemma reduce_eq_self_of_reduced : ∀ {l : list (Σ i, M i)}, reduced l → reduce l = l
| [] h := rfl
| (a::l) h := by rw [← rcons_reduce_eq_reduce_cons (h.2 a (mem_cons_self _ _)),
reduce_eq_self_of_reduced (reduced_of_reduced_cons h), rcons_eq_cons h]
lemma rcons_eq_reduce_cons {i : Σ i, M i} {l : list (Σ i, M i)}
(ha : i.2 ≠ 1) (hl : reduced l) : rcons i l = reduce (i :: l) :=
by rw [← rcons_reduce_eq_reduce_cons ha, reduce_eq_self_of_reduced hl]
@[simp] lemma reduce_reduce (l : list (Σ i, M i)) : reduce (reduce l) = reduce l :=
reduce_eq_self_of_reduced (reduced_reduce l)
@[simp] lemma reduce_cons_reduce_eq_reduce_cons (i : Σ i, M i) (l : list (Σ i, M i)) :
reduce (i :: reduce l) = reduce (i :: l) :=
if ha : i.2 = 1 then by rw [reduce, if_pos ha, reduce, if_pos ha, reduce_reduce]
else by rw [← rcons_reduce_eq_reduce_cons ha, ← rcons_reduce_eq_reduce_cons ha,
reduce_reduce]
lemma length_rcons_le : ∀ (i : Σ i, M i) (l : list (Σ i, M i)),
(rcons i l).length ≤ (i::l : list _).length
| i [] := le_refl _
| ⟨i, a⟩ (⟨j, b⟩::l) := begin
simp [rcons],
split_ifs,
{ repeat { constructor } },
{ simp },
{ simp }
end
lemma length_reduce_le : ∀ (l : list (Σ i, M i)),
(reduce l).length ≤ l.length
| [] := le_refl _
| [a] := by { simp [reduce], split_ifs; simp [rcons] }
| (a::b::l) := begin
simp only [reduce, rcons],
split_ifs,
{ exact le_trans (length_reduce_le _)
(le_trans (nat.le_succ _) (nat.le_succ _)) },
{ exact le_trans (length_rcons_le _ _) (nat.succ_le_succ
(le_trans (length_reduce_le _) (nat.le_succ _))) },
{ exact le_trans (length_rcons_le _ _) (nat.succ_le_succ
(le_trans (length_reduce_le _) (nat.le_succ _))) },
{ exact le_trans (length_rcons_le _ _) (nat.succ_le_succ
(le_trans (length_rcons_le _ _) (nat.succ_le_succ
(length_reduce_le _)))) }
end
lemma length_rcons_lt_or_eq_rcons : ∀ (i : Σ i, M i) (l : list (Σ i, M i)),
(rcons i l).length < (i :: l : list _).length ∨ rcons i l = (i::l)
| i [] := or.inr rfl
| i (j::l) := begin
simp only [rcons],
split_ifs,
{ exact or.inl (nat.lt_succ_of_le (nat.le_succ _)) },
{ exact or.inl (nat.lt_succ_self _) },
{ simp }
end
lemma length_reduce_lt_or_eq_reduce : ∀ (l : list (Σ i, M i)),
(reduce l).length < l.length ∨ reduce l = l
| [] := or.inr rfl
| (i::l) := begin
simp only [reduce],
split_ifs,
{ exact or.inl (nat.lt_succ_of_le (length_reduce_le _)) },
{ cases length_rcons_lt_or_eq_rcons i (reduce l) with h h,
{ exact or.inl (lt_of_lt_of_le h (nat.succ_le_succ (length_reduce_le _))) },
{ rw h,
cases length_reduce_lt_or_eq_reduce l with h h,
{ exact or.inl (nat.succ_lt_succ h) },
{ rw h, right, refl } } }
end
lemma rcons_append : ∀ {i j : Σ i, M i} {l₁ l₂ : list (Σ i, M i)},
rcons i ((j::l₁) ++ l₂) = rcons i (j::l₁) ++ l₂
| i j [] l₂ := begin
simp [rcons], split_ifs; simp
end
| a b (c::l₁) l₂ := begin
rw [cons_append, rcons],
dsimp,
split_ifs,
{ simp [rcons, *] },
{ simp [rcons, *] },
{ simp [rcons, *] }
end
lemma rcons_rcons_of_mul_eq_one {i : ι} {a b : M i} : ∀ {l : list (Σ i, M i)},
a * b = 1 → reduced l → rcons ⟨i, a⟩ (rcons ⟨i, b⟩ l) = l
| [] hab hl := by simp [rcons, cast, hab]
| (⟨j, c⟩::l) hab hl := begin
simp only [rcons],
split_ifs,
{ dsimp only at h,
subst h,
rw [← rcons_eq_cons hl, left_inv_eq_right_inv hab h_1, cast_eq] },
{ dsimp only at h,
subst h,
simp only [rcons, dif_pos rfl],
rw [cast_eq, cast_eq, if_neg, ← mul_assoc, hab, one_mul],
{ rw [← mul_assoc, hab, one_mul],
exact hl.2 ⟨i, c⟩ (mem_cons_self _ _) } },
{ rw [rcons, dif_pos rfl, cast_eq], dsimp, rw [if_pos hab] }
end
lemma rcons_rcons_of_mul_ne_one {i : ι} {a b : M i} : ∀ {l : list (Σ i, M i)},
a * b ≠ 1 → a ≠ 1 → reduced l → rcons ⟨i, a⟩ (rcons ⟨i, b⟩ l) = rcons ⟨i, a * b⟩ l
| [] hab ha hl := by simp [rcons, hab]
| [⟨j, c⟩] hab ha hl := begin
simp only [rcons],
split_ifs,
{ rw [mul_assoc, h_1, mul_one] at h_2,
exact (ha h_2).elim },
{ simp [rcons, mul_assoc, h_1] },
{ simp only [rcons, ← mul_assoc, *, dif_pos rfl, if_pos rfl, cast_eq] },
{ dsimp only at h,
subst h,
simp only [rcons, dif_pos rfl, ← mul_assoc, cast_eq, *] at *,
simp, },
{ simp [rcons, if_neg hab, if_pos rfl] }
end
| (⟨j, c⟩::⟨k, d⟩::l) hab ha hl := begin
have hjk : j ≠ k, from (chain'_cons.1 hl.1).1,
dsimp only [rcons],
split_ifs,
{ rw [mul_assoc, h_1, mul_one] at h_2,
exact (ha h_2).elim },
{ dsimp [rcons],
subst h,
simp [*, rcons, mul_assoc] at * },
{ simp [*, rcons, ← mul_assoc] at * },
{ simp [*, rcons, ← mul_assoc] },
{ simp [*, rcons] }
end
lemma reduce_rcons : ∀ {i : Σ i, M i} (l : list (Σ i, M i)), i.2 ≠ 1 →
reduce (rcons i l) = rcons i (reduce l)
| i [] hi := by simp [rcons, reduce, hi]
| ⟨i, a⟩ [⟨j, b⟩] ha := begin
replace ha : a ≠ 1 := ha,
dsimp only [reduce, rcons],
by_cases hij : i = j,
{ subst hij,
split_ifs;
simp [*, reduce, rcons] at * },
{ simp [hij, reduce, rcons, ha] }
end
| ⟨i, a⟩ (⟨j, b⟩::l) ha := begin
dsimp only [rcons],
split_ifs,
{ subst h,
rw [cast_eq] at h_1,
rw [reduce, if_neg, rcons_rcons_of_mul_eq_one h_1 (reduced_reduce _)],
{ refine λ hb : b = 1, _,
rw [hb, mul_one] at h_1,
exact ha h_1 } },
{ subst h,
rw [reduce, if_neg h_1, reduce],
split_ifs,
{ erw [cast_eq, show b = 1, from h, mul_one] },
{ rw cast_eq at h_1,
rw [rcons_rcons_of_mul_ne_one h_1 ha (reduced_reduce l), cast_eq], } },
{ rw [rcons_eq_reduce_cons ha (reduced_reduce _), reduce_cons_reduce_eq_reduce_cons] }
end
lemma reduce_cons_cons_of_mul_eq_one {l : list (Σ i, M i)} {i : ι} {a b : M i}
(ha : a ≠ 1) (hb : b ≠ 1) (hab : a * b = 1) : reduce (⟨i, a⟩ :: ⟨i, b⟩ :: l) = reduce l :=
by rw [reduce, if_neg ha, reduce, if_neg hb, rcons_rcons_of_mul_eq_one hab (reduced_reduce _)]
lemma reduce_cons_cons_of_mul_ne_one {l : list (Σ i, M i)} {i : ι} {a b : M i}
(ha : a ≠ 1) (hab : a * b ≠ 1) : reduce (⟨i, a⟩ :: ⟨i, b⟩ :: l) = reduce (⟨i, a * b⟩ :: l) :=
begin
rw [reduce, if_neg ha, reduce],
split_ifs,
{ rw [rcons_eq_reduce_cons (show (⟨i, a⟩ : Σ i, M i).snd ≠ 1, from ha) (reduced_reduce _),
show b = 1, from h, mul_one, reduce_cons_reduce_eq_reduce_cons] },
{ rw [rcons_rcons_of_mul_ne_one hab ha (reduced_reduce _),
rcons_eq_reduce_cons (show (⟨i, a * b⟩ : Σ i, M i).snd ≠ 1, from hab : _) (reduced_reduce _),
reduce_cons_reduce_eq_reduce_cons] }
end
@[simp] lemma reduce_reduce_append_eq_reduce_append : ∀ (l₁ l₂ : list (Σ i, M i)),
reduce (reduce l₁ ++ l₂) = reduce (l₁ ++ l₂)
| [] l₂ := rfl
| (a::l₁) l₂ := begin
simp only [reduce, cons_append],
split_ifs with ha ha,
{ exact reduce_reduce_append_eq_reduce_append _ _ },
{ rw [← reduce_reduce_append_eq_reduce_append l₁ l₂],
induction h : reduce l₁,
{ simp [rcons, rcons_eq_reduce_cons ha (reduced_reduce _)] },
{ rw [← rcons_append, reduce_rcons _ ha] } }
end
@[simp] lemma reduce_append_reduce_eq_reduce_append : ∀ (l₁ l₂ : list (Σ i, M i)),
reduce (l₁ ++ reduce l₂) = reduce (l₁ ++ l₂)
| [] l₂ := by simp
| (a::l₁) l₂ := by rw [cons_append, ← reduce_cons_reduce_eq_reduce_cons,
reduce_append_reduce_eq_reduce_append,
reduce_cons_reduce_eq_reduce_cons, cons_append]
lemma reduced_iff_reduce_eq_self {l : list (Σ i, M i)} :
reduced l ↔ reduce l = l :=
⟨reduce_eq_self_of_reduced, λ h, h ▸ reduced_reduce l⟩
lemma reduced_append_overlap {l₁ l₂ l₃ : list (Σ i, M i)}
(h₁ : reduced (l₁ ++ l₂)) (h₂ : reduced (l₂ ++ l₃)) (hn : l₂ ≠ []):
reduced (l₁ ++ l₂ ++ l₃) :=
⟨chain'.append_overlap h₁.1 h₂.1 hn,
λ i hi, (mem_append.1 hi).elim (h₁.2 _) (λ hi, h₂.2 _ (mem_append_right _ hi))⟩
/-- `mul_aux` returns `reduce (l₁.reverse ++ l₂)` -/
@[simp] def mul_aux : Π (l₁ l₂ : list (Σ i, M i)), list (Σ i, M i)
| [] l₂ := l₂
| (i::l₁) [] := reverse (i :: l₁)
| (i::l₁) (j::l₂) :=
if hij : i.1 = j.1
then let c := i.2 * cast (congr_arg M hij).symm j.2 in
if c = 1
then mul_aux l₁ l₂
else l₁.reverse_core (⟨i.1, c⟩::l₂)
else l₁.reverse_core (i::j::l₂)
local attribute [simp] reverse_core_eq
@[simp] def mul_aux' : Π (l₁ l₂ : list (Σ i, M i)), list (Σ i, M i)
| [] l₂ := l₂
| (i::l₁) [] := reverse (i :: l₁)
| (i::l₁) (j::l₂) :=
if hij : i.1 = j.1
then let c := i.2 * cast (congr_arg M hij).symm j.2 in
if c = 1
then mul_aux' l₁ l₂
else mul_aux' l₁ (⟨i.1, c⟩::l₂)
else mul_aux' l₁ (i::j::l₂)
lemma mul_aux'_eq_append : Π {l₁ l₂ : list (Σ i, M i)},
reduced (l₁.reverse ++ l₂) → mul_aux' l₁ l₂ = l₁.reverse ++ l₂
| [] l₂ h := rfl
| (i::l₁) [] h := by simp
| (i::l₁) (j::l₂) h := begin
rw [mul_aux'],
have hij : i.fst ≠ j.fst,
{ rw [reduced, chain'_split, ← reverse_cons, chain'_reverse, chain'_cons] at h,
simp [flip] at h,
tauto },
rw [dif_neg hij, mul_aux'_eq_append];
simp * at *
end
lemma mul_aux_eq_mul_aux' : Π {l₁ l₂ : list (Σ i, M i)}
(h₁ : reduced l₁) (h₂ : reduced l₂),
mul_aux l₁ l₂ = mul_aux' l₁ l₂
| [] l₂ _ _ := rfl
| (i::l₁) [] _ _ := rfl
| [i] (j::l₂) _ _ :=
begin
unfold mul_aux mul_aux',
split_ifs; simp
end
| (i::j::l₁) (k::l₂) h₁ h₂:=
begin
unfold mul_aux mul_aux',
have hij : j.fst ≠ i.fst,
by simp [reduced] at h₁; tauto,
simp only [dif_neg hij],
split_ifs,
{ rw mul_aux_eq_mul_aux' (reduced_of_reduced_cons h₁)
(reduced_of_reduced_cons h₂) },
{ cases i with i a, cases k with k b,
dsimp only at h, subst h,
rw [reverse_core_eq, ← mul_aux_eq_mul_aux' (reduced_of_reduced_cons h₁)
(reduced_cons_of_reduced_cons h₂ h_1), mul_aux, dif_neg hij],
simp },
{ have : reduced (l₁.reverse ++ j :: i :: k :: l₂),
{ suffices : reduced ((j :: l₁).reverse ++ [i, k] ++ l₂),
{ simpa },
cases i with i a, cases j with j b, cases k with k c,
have hc1 : c ≠ 1, from h₂.2 ⟨k, c⟩ (mem_cons_self _ _),
have ha1 : a ≠ 1, from h₁.2 ⟨i, a⟩ (mem_cons_self _ _),
refine reduced_append_overlap _ _ (cons_ne_nil _ _),
{ suffices : reduced (⟨k, c⟩ :: ⟨i, a⟩ :: ⟨j, b⟩ :: l₁),
{ rw [← reduced_reverse_iff],
simpa },
exact reduced_cons_cons (ne.symm h) hc1 h₁ },
{ suffices : reduced (⟨i, a⟩ :: ⟨k, c⟩ :: l₂),
{ simpa },
exact reduced_cons_cons h ha1 h₂ } },
rw [mul_aux'_eq_append this],
simp }
end
@[simp] lemma mul_aux'_nil (l : list (Σ i, M i)) : mul_aux' l [] = l.reverse :=
by cases l; simp
lemma mul_aux'_single : Π (l₁ l₂ : list (Σ i, M i)) (i : Σ i, M i),
mul_aux' l₁ (mul_aux' l₂.reverse [i]) = mul_aux' (mul_aux' l₁ l₂).reverse [i]
| [] l₂ i := by simp
| (j::l₁) [] i := by simp
| (⟨j, b⟩::l₁) (⟨k, c⟩::l₂) ⟨i, a⟩ :=
list.reverse_rec_on l₂
begin
end
(begin
rintros l₂ ⟨m, d⟩ ih,
simp at *, dsimp at *,
split_ifs at *,
end)
lemma mul_aux'_cons : ∀ (l₁ l₂ : list (Σ i, M i)) (i : Σ i, M i),
mul_aux' (rcons i l₁) l₂ = mul_aux' l₁ (rcons i l₂)
| [] l₂ i := by simp [rcons]; admit
| (j::l₁) [] i := begin
simp [rcons, eq_comm],
split_ifs,
{ refl },
end
lemma mul_aux'_single : Π (l₁ l₂ : list (Σ i, M i)) (i : Σ i, M i),
mul_aux' l₁ (mul_aux' [i] l₂) = mul_aux' (mul_aux' l₁ [i]).reverse l₂
| [] l₂ i := by simp
| (j::l₁) [] i := by simp
| (⟨j, b⟩::l₁) (⟨k, c⟩::l₂) ⟨i, a⟩ := begin
rw [mul_aux', mul_aux'],
dsimp,
split_ifs,
{ simp, }
end
lemma mul_aux'_single : Π (l₁ l₂ : list (Σ i, M i)) (i : Σ i, M i),
mul_aux' l₁ (mul_aux' l₂ [i]) = mul_aux' (mul_aux' l₁ l₂.reverse).reverse [i]
| [] l₂ i := by simp
| (j::l₁) [] i := by simp
| (⟨j, b⟩::l₁) (⟨k, c⟩::l₂) ⟨i, a⟩ :=
list.reverse_rec_on l₂
@[simp] lemma mul_aux_nil (l : list (Σ i, M i)) : mul_aux l [] = l.reverse :=
by cases l; refl
@[simp] lemma nil_mul_aux (l : list (Σ i, M i)) : mul_aux [] l = l := rfl
lemma mul_aux_single_reverse : ∀ (l : list (Σ i, M i)) (i : Σ i, M i),
mul_aux l [i] = mul_aux [i] l
| [] i := by simp
| (i::l) j := list.reverse_rec_on l
(by simp [mul_aux, reverse_core_eq]; split_ifs; simp)
_
lemma mul_aux_single : Π (l₁ l₂ : list (Σ i, M i)) (i : Σ i, M i),
mul_aux l₁ (mul_aux l₂.reverse [i]) = mul_aux (mul_aux l₁ l₂).reverse [i]
| [] l₂ i := by simp [mul_aux]
| (j::l₁) [] i := by simp [mul_aux]
| (j::l₁) (k::l₂) i := begin
simp only [mul_aux],
split_ifs,
{ rw ← mul_aux_single, sorry },
{ simp [reverse_core_eq], }
end
lemma mul_aux_eq_reduce_append : ∀ {l₁ l₂: list (Σ i, M i)},
reduced l₁ → reduced l₂ → mul_aux l₁ l₂ = reduce (l₁.reverse ++ l₂)
| [] l₂ := λ h₁ h₂,
by clear_aux_decl; simp [mul_aux, reduce_eq_self_of_reduced, *]
| (i::hd) [] := λ h₁ h₂,
by rw [mul_aux, append_nil, reduce_eq_self_of_reduced (reduced_reverse h₁)]
| (⟨i,a⟩::l₁) (⟨j,b⟩::l₂) := λ h₁ h₂,
begin
simp only [mul_aux],
dsimp only,
have ha : a ≠ 1, from h₁.2 ⟨i, a⟩ (by simp),
have hb : b ≠ 1, from h₂.2 ⟨j, b⟩ (list.mem_cons_self _ _),
rcases decidable.em (i = j) with ⟨rfl, hij⟩,
{ rw [dif_pos rfl, cast_eq],
split_ifs,
{ have hrl₁ : reduced l₁,
{ exact reduced_of_reduced_cons h₁ },
have hrl₂ : reduced l₂, from reduced_of_reduced_cons h₂,
rw [mul_aux_eq_reduce_append hrl₁ hrl₂, reverse_cons, append_assoc,
cons_append, nil_append, ← reduce_append_reduce_eq_reduce_append _ (_ :: _),
reduce_cons_cons_of_mul_eq_one ha hb h_1,
reduce_append_reduce_eq_reduce_append] },
{ have hrl₁ :reduced (l₁.reverse ++ [⟨i, a * b⟩]),
{ rw [← reduced_reverse_iff, reverse_append, reverse_reverse,
reverse_singleton, singleton_append],
exact reduced_cons_of_reduced_cons h₁ h_1 },
have hrl₂ :reduced ([⟨i, a * b⟩] ++ l₂),
{ rw [singleton_append],
exact reduced_cons_of_reduced_cons h₂ h_1 },
simp only [reverse_cons, singleton_append, append_assoc, reverse_core_eq],
rw [← reduce_append_reduce_eq_reduce_append,
reduce_cons_cons_of_mul_ne_one ha h_1,
reduce_append_reduce_eq_reduce_append, ← singleton_append, ← append_assoc],
exact (reduce_eq_self_of_reduced
(reduced_append_overlap hrl₁ hrl₂ (by simp))).symm } },
{ suffices : reduce (l₁.reverse ++ [⟨i, a⟩, ⟨j, b⟩] ++ l₂) =
l₁.reverse ++ [⟨i, a⟩, ⟨j, b⟩] ++ l₂,
{ simpa [eq_comm, dif_neg h, reverse_core_eq] },
have hrl₁ : reduced (l₁.reverse ++ [⟨i, a⟩, ⟨j, b⟩]),
{ rw [← reduced_reverse_iff],
simp only [reverse_append, reverse_cons, cons_append, reverse_nil, nil_append,
reverse_reverse],
refine reduced_cons_cons (ne.symm h) hb h₁ },
have hrl₂ : reduced ([⟨i, a⟩, ⟨j, b⟩] ++ l₂),
{ simp only [cons_append, nil_append],
refine reduced_cons_cons h ha h₂ },
exact reduce_eq_self_of_reduced (reduced_append_overlap hrl₁ hrl₂ (by simp)) }
end
protected def mul (l₁ l₂ : list (Σ i, M i)) : list (Σ i, M i) :=
mul_aux l₁.reverse l₂
lemma mul_eq_reduce_append {l₁ l₂ : list (Σ i, M i)} (h₁ : reduced l₁) (h₂ : reduced l₂) :
coprod.pre.mul l₁ l₂ = reduce (l₁ ++ l₂) :=
by rw [coprod.pre.mul, mul_aux_eq_reduce_append (reduced_reverse h₁) h₂, reverse_reverse]
lemma reduced_mul {l₁ l₂ : list (Σ i, M i)} (h₁ : reduced l₁) (h₂ : reduced l₂) :
reduced (coprod.pre.mul l₁ l₂) :=
(mul_eq_reduce_append h₁ h₂).symm ▸ reduced_reduce _
protected lemma mul_assoc {l₁ l₂ l₃ : list (Σ i, M i)} (h₁ : reduced l₁) (h₂ : reduced l₂)
(h₃ : reduced l₃) : pre.mul (pre.mul l₁ l₂) l₃ = pre.mul l₁ (pre.mul l₂ l₃) :=
begin
rw [mul_eq_reduce_append (reduced_mul h₁ h₂) h₃, mul_eq_reduce_append h₁ h₂,
mul_eq_reduce_append h₂ h₃, mul_eq_reduce_append h₁ (reduced_reduce _)],
simp [append_assoc]
end
protected lemma one_mul (l : list (Σ i, M i)) : pre.mul [] l = l := rfl
protected lemma mul_one {l : list (Σ i, M i)} (h : reduced l) : pre.mul l [] = l :=
by rw [mul_eq_reduce_append h reduced_nil, append_nil, reduce_eq_self_of_reduced h]
section lift
variable (f : Π i, M i →* N)
def lift (l : list (Σ i, M i)) : N :=
l.foldl (λ n i, n * f i.1 i.2) 1
lemma lift_eq_map_prod (l : list (Σ i, M i)) :
lift f l = (l.map (λ i : Σ i, M i, f i.1 i.2)).prod :=
begin
rw [lift, ← one_mul (l.map _).prod],
generalize h : (1 : N) = n, clear h,
induction l with i l ih generalizing n,
{ simp },
{ rw [foldl_cons, ih, map_cons, prod_cons, mul_assoc] }
end
lemma map_prod_mul_aux : ∀ (l₁ l₂ : list (Σ i, M i)),
((mul_aux l₁ l₂).map (λ i : Σ i, M i, f i.1 i.2)).prod =
(l₁.reverse.map (λ i : Σ i, M i, f i.1 i.2)).prod *
(l₂.map (λ i : Σ i, M i, f i.1 i.2)).prod
| [] l₂ := by simp [mul_aux]
| (i::l₁) [] := by simp [mul_aux]
| (⟨i,a⟩::l₁) (⟨j,b⟩::l₂) := begin
rw [mul_aux],
split_ifs,
{ dsimp only at h,
subst h,
rw [cast_eq] at h_1,
simp only [prod_nil, mul_one, reverse_cons, map, prod_cons, prod_append,
map_append, map_reverse, mul_assoc],
rw [← mul_assoc (f _ _), ← monoid_hom.map_mul, h_1],
simp [map_prod_mul_aux] },
{ dsimp only at h,
subst h,
simp [reverse_core_eq, mul_assoc] },
{ simp [reverse_core_eq, mul_assoc] }
end
lemma lift_mul (l₁ l₂ : list (Σ i, M i)) : lift f (pre.mul l₁ l₂) = lift f l₁ * lift f l₂ :=
by simp [pre.mul, lift_eq_map_prod, map_prod_mul_aux]
end lift
section of
variables (i : ι) (a b : M i)
def of (i : ι) (a : M i) : list (Σ i, M i) :=
if a = 1 then [] else [⟨i, a⟩]
lemma reduced_of (i : ι) (a : M i) : reduced (of i a) :=
begin
rw of,
split_ifs,
{ simp },
{ exact reduced_singleton h }
end
lemma of_one : of i (1 : M i) = [] := if_pos rfl
lemma of_mul : of i (a * b) = pre.mul (of i a) (of i b) :=
begin
simp only [of, pre.mul, mul_aux],
split_ifs; simp [mul_aux, *, reverse_core_eq] at *
end
lemma lift_of (f : Π i, M i →* N) : lift f (of i a) = f i a :=
begin
simp [lift, of],
split_ifs;
simp *
end
end of
section embedding
variables {κ : Type*} {O : κ → Type*} [Π i, monoid (O i)]
variables (f : ι → κ) (hf : injective f)
(g : Π i, M i →* O (f i)) (hg : ∀ i a, g i a = 1 → a = 1)
protected def embedding (l : list (Σ i, M i)) : list Σ i, O i :=
l.map (λ i, ⟨f i.1, g i.1 i.2⟩)
include hf hg
variables [decidable_eq κ] [Π i, decidable_eq (O i)]
lemma embedding_mul_aux : ∀ (l₁ l₂ : list (Σ i, M i)),
pre.embedding f g (mul_aux l₁ l₂) = mul_aux (pre.embedding f g l₁) (pre.embedding f g l₂)
| [] l₂ := rfl
| (i::l₁) [] := by simp [pre.embedding, mul_aux]
| (⟨i,a⟩::l₁) (⟨j, b⟩::l₂) :=
begin
rw [mul_aux],
split_ifs,
{ dsimp only at h,
subst h,
have : g i a * g i b = 1,
{ erw [← monoid_hom.map_mul, h_1, monoid_hom.map_one] },
rw [embedding_mul_aux],
simp [pre.embedding, mul_aux, this] },
{ dsimp only at h,
subst h,
have : g i a * g i b ≠ 1,
{ rw [← monoid_hom.map_mul],
exact mt (hg i (a * b)) h_1 },
simp [pre.embedding, mul_aux, this, reverse_core_eq] },
{ dsimp only at h,
simp [pre.embedding, mul_aux, reverse_core_eq, hf.eq_iff, h] }
end
lemma embedding_mul {l₁ l₂ : list (Σ i, M i)} :
pre.embedding f g (pre.mul l₁ l₂) = pre.mul (pre.embedding f g l₁) (pre.embedding f g l₂) :=
begin
simp [pre.mul, embedding_mul_aux _ hf _ hg],
simp [pre.embedding]
end
lemma reduced_embedding {l : list (Σ i, M i)} (hl : reduced l) :
reduced (pre.embedding f g l) :=
⟨by simp [pre.embedding, list.chain'_map, hf.eq_iff, hl.1],
begin
simp only [pre.embedding, mem_map, and_imp, sigma.forall],
rintros i a ⟨⟨j, b⟩, hjb, rfl, h⟩,
rw [heq_iff_eq] at h,
subst a,
exact mt (hg j b) (hl.2 _ hjb)
end⟩
end embedding
section inv
variable [Π i, decidable_eq (G i)]
protected def inv (l : list (Σ i, G i)) : list (Σ i, G i) :=
list.reverse (l.map (λ i : Σ i, G i, ⟨i.1, i.2⁻¹⟩))
lemma reduced_inv (l : list (Σ i, G i)) (hl : reduced l) :
reduced (pre.inv l) :=
⟨list.chain'_reverse.2 ((list.chain'_map _).2 $
by { convert hl.1, simp [function.funext_iff, eq_comm, flip] }),
begin
rintros ⟨i, a⟩ hi,
rw [pre.inv, mem_reverse, mem_map] at hi,
rcases hi with ⟨⟨j, b⟩, hjl, h⟩,
simp only at h,
cases h with hij hba,
subst hij,
convert inv_ne_one.2 (hl.2 ⟨j, b⟩ hjl),
simp * at *
end⟩
protected lemma mul_left_inv_aux : ∀ l : list (Σ i, G i),
mul_aux (l.map (λ i : Σ i, G i, ⟨i.1, i.2⁻¹⟩)) l = []
| [] := rfl
| (i::l) := by simp [mul_aux, mul_left_inv_aux l]
protected lemma mul_left_inv (l : list (Σ i, G i)) :
pre.mul (pre.inv l) l = [] :=
by rw [pre.mul, pre.inv, reverse_reverse, pre.mul_left_inv_aux]
end inv
end coprod.pre
|
#include <bsplines/BSplinePose.hpp>
#include <sm/assert_macros.hpp>
// boost::tie
#include <boost/tuple/tuple.hpp>
#include <sm/kinematics/transformations.hpp>
using namespace sm::kinematics;
using namespace bsplines;
BSplinePose::BSplinePose(int splineOrder, const RotationalKinematics::Ptr& rotationalKinematics)
: BSpline(splineOrder), rotation_(rotationalKinematics) {}
BSplinePose::~BSplinePose() {}
Eigen::Matrix4d BSplinePose::transformation(double tk) const { return curveValueToTransformation(eval(tk)); }
Eigen::Matrix4d BSplinePose::transformationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::MatrixXd JS;
Eigen::VectorXd p;
p = evalDAndJacobian(tk, 0, &JS, coefficientIndices);
Eigen::MatrixXd JT;
Eigen::Matrix4d T = curveValueToTransformationAndJacobian(p, &JT);
if (J) {
*J = JT * JS;
}
return T;
}
Eigen::Matrix3d BSplinePose::orientationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Matrix3d C;
Eigen::MatrixXd JS;
Eigen::VectorXd p;
p = evalDAndJacobian(tk, 0, &JS, coefficientIndices);
Eigen::Matrix3d S;
C = rotation_->parametersToRotationMatrix(p.tail<3>(), &S);
Eigen::MatrixXd JO = Eigen::MatrixXd::Zero(3, 6);
JO.block(0, 3, 3, 3) = S;
if (J) {
*J = JO * JS;
}
return C;
}
Eigen::Matrix3d BSplinePose::inverseOrientationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Matrix3d C;
Eigen::MatrixXd JS;
Eigen::VectorXd p;
p = evalDAndJacobian(tk, 0, &JS, coefficientIndices);
Eigen::Matrix3d S;
C = rotation_->parametersToRotationMatrix(p.tail<3>(), &S).transpose();
Eigen::MatrixXd JO = Eigen::MatrixXd::Zero(3, 6);
JO.block(0, 3, 3, 3) = S;
if (J) {
*J = -C * JO * JS;
}
return C;
}
Eigen::Matrix4d BSplinePose::inverseTransformationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
// std::cout << __FUNCTION__ << "()\n";
// ASRL_THROW(std::runtime_error,"Not Implemented");
Eigen::MatrixXd JS;
Eigen::VectorXd p;
p = evalDAndJacobian(tk, 0, &JS, coefficientIndices);
Eigen::MatrixXd JT;
Eigen::Matrix4d T = curveValueToTransformationAndJacobian(p, &JT);
// Invert the transformation.
T.topLeftCorner<3, 3>().transposeInPlace();
T.topRightCorner<3, 1>() = (-T.topLeftCorner<3, 3>() * T.topRightCorner<3, 1>()).eval();
if (J) {
// The "box times" is the linearized transformation way of inverting the jacobian.
*J = -sm::kinematics::boxTimes(T) * JT * JS;
}
if (coefficientIndices) {
*coefficientIndices = localCoefficientVectorIndices(tk);
}
return T;
}
Eigen::Matrix4d BSplinePose::inverseTransformation(double tk) const {
Eigen::Matrix4d T = curveValueToTransformation(eval(tk));
T.topLeftCorner<3, 3>().transposeInPlace();
T.topRightCorner<3, 1>() = (-T.topLeftCorner<3, 3>() * T.topRightCorner<3, 1>()).eval();
return T;
}
Eigen::Vector4d BSplinePose::transformVectorAndJacobian(double tk, const Eigen::Vector4d& v_tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::MatrixXd JT;
Eigen::Matrix4d T_n_vk = transformationAndJacobian(tk, &JT, coefficientIndices);
Eigen::Vector4d v_n = T_n_vk * v_tk;
if (J) {
*J = sm::kinematics::boxMinus(v_n) * JT;
}
return v_n;
}
// Position at certain time. p = x[:3]
Eigen::Vector3d BSplinePose::position(double tk) const { return eval(tk).head<3>(); }
// Orientation at certain time, phi = x[3:], R = Exp(phi)
Eigen::Matrix3d BSplinePose::orientation(double tk) const {
return rotation_->parametersToRotationMatrix(eval(tk).tail<3>());
}
Eigen::Matrix3d BSplinePose::inverseOrientation(double tk) const {
return rotation_->parametersToRotationMatrix(eval(tk).tail<3>()).transpose();
}
// v_W = dp/dt
Eigen::Vector3d BSplinePose::linearVelocity(double tk) const { return evalD(tk, 1).head<3>(); }
// v_B = R_BW * v_W = R_WB^T * dp/dt
Eigen::Vector3d BSplinePose::linearVelocityBodyFrame(double tk) const {
Eigen::VectorXd r = evalD(tk, 0);
Eigen::Matrix3d C_wb = rotation_->parametersToRotationMatrix(r.tail<3>());
return C_wb.transpose() * evalD(tk, 1).head<3>();
}
// a_W = dp^2/dt^2
Eigen::Vector3d BSplinePose::linearAcceleration(double tk) const { return evalD(tk, 2).head<3>(); }
// a_B = R_BW * a_W = R_WB^T * dp^2/dt^2
Eigen::Vector3d BSplinePose::linearAccelerationBodyFrame(double tk) const {
Eigen::VectorXd r = evalD(tk, 0);
Eigen::Matrix3d C_wb = rotation_->parametersToRotationMatrix(r.tail<3>());
return C_wb.transpose() * evalD(tk, 2).head<3>();
}
Eigen::Vector3d BSplinePose::linearAccelerationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Vector3d a = evalDAndJacobian(tk, 2, J, coefficientIndices).head<3>();
if (J) {
J->conservativeResize(3, J->cols());
}
return a;
}
// \omega_w_{b,w} (angular velocity of the body frame as seen from the world frame, expressed in the world frame)
Eigen::Vector3d BSplinePose::angularVelocity(double tk) const {
Eigen::Vector3d omega;
Eigen::VectorXd r = evalD(tk, 0);
Eigen::VectorXd v = evalD(tk, 1);
// \omega = S(\bar \theta) \dot \theta
omega = -rotation_->parametersToSMatrix(r.tail<3>()) * v.tail<3>();
return omega;
}
// \omega_b_{w,b} (angular velocity of the world frame as seen from the body frame, expressed in the body frame)
Eigen::Vector3d BSplinePose::angularVelocityBodyFrame(double tk) const {
Eigen::Vector3d omega;
Eigen::VectorXd r = evalD(tk, 0);
Eigen::VectorXd v = evalD(tk, 1);
Eigen::Matrix3d S;
Eigen::Matrix3d C_w_b = rotation_->parametersToRotationMatrix(r.tail<3>(), &S);
// \omega = S(\bar \theta) \dot \theta
omega = -C_w_b.transpose() * S * v.tail<3>();
return omega;
}
// \omega_b_{w,b} (angular velocity of the world frame as seen from the body frame, expressed in the body frame)
Eigen::Vector3d BSplinePose::angularVelocityBodyFrameAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Vector3d omega;
Eigen::Vector3d p;
Eigen::Vector3d pdot;
Eigen::MatrixXd Jp;
Eigen::MatrixXd Jpdot;
p = evalDAndJacobian(tk, 0, &Jp, NULL).tail<3>();
pdot = evalDAndJacobian(tk, 1, &Jpdot, coefficientIndices).tail<3>();
Eigen::MatrixXd Jr;
Eigen::Matrix3d C_w_b = inverseOrientationAndJacobian(tk, &Jr, NULL);
// Rearrange the spline jacobian matrices. Now Jpdot is the
// jacobian of p wrt the spline coefficients stacked on top
// of the jacobian of pdot wrt the spline coefficients.
Jpdot.block(0, 0, 3, Jpdot.cols()) = Jp.block(3, 0, 3, Jp.cols());
// std::cout << "Jpdot\n" << Jpdot << std::endl;
Eigen::Matrix<double, 3, 6> Jo;
omega = -C_w_b * rotation_->angularVelocityAndJacobian(p, pdot, &Jo);
Jo = (-C_w_b * Jo).eval();
// std::cout << "Jo:\n" << Jo << std::endl;
if (J) {
*J = Jo * Jpdot + sm::kinematics::crossMx(omega) * Jr;
}
return omega;
}
// \omega_w_{b,w} (angular velocity of the body frame as seen from the world frame, expressed in the world frame)
Eigen::Vector3d BSplinePose::angularVelocityAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Vector3d omega;
Eigen::Vector3d p;
Eigen::Vector3d pdot;
Eigen::MatrixXd Jp;
Eigen::MatrixXd Jpdot;
p = evalDAndJacobian(tk, 0, &Jp, nullptr).tail<3>();
pdot = evalDAndJacobian(tk, 1, &Jpdot, coefficientIndices).tail<3>();
// Rearrange the spline jacobian matrices. Now Jpdot is the
// jacobian of p wrt the spline coefficients stacked on top
// of the jacobian of pdot wrt the spline coefficients.
Jpdot.block(0, 0, 3, Jpdot.cols()) = Jp.block(3, 0, 3, Jp.cols());
// std::cout << "Jpdot\n" << Jpdot << std::endl;
Eigen::Matrix<double, 3, 6> Jo;
// FixMe by CC: seems like lost the minus "-"?
omega = rotation_->angularVelocityAndJacobian(p, pdot, &Jo);
// std::cout << "Jo:\n" << Jo << std::endl;
if (J) {
*J = Jo * Jpdot;
}
return omega;
}
// \omega_dot_b_{w,b} (angular acceleration of the world frame as seen from the body frame, expressed in the body frame)
Eigen::Vector3d BSplinePose::angularAccelerationBodyFrame(double tk) const {
Eigen::Vector3d omega;
Eigen::VectorXd r = evalD(tk, 0);
Eigen::VectorXd v = evalD(tk, 2);
Eigen::Matrix3d S;
Eigen::Matrix3d C_w_b = rotation_->parametersToRotationMatrix(r.tail<3>(), &S);
// \omega = S(\bar \theta) \dot \theta
omega = -C_w_b.transpose() * S * v.tail<3>();
return omega;
}
// \omega_dot_b_{w,b} (angular acceleration of the world frame as seen from the body frame, expressed in the body frame)
Eigen::Vector3d BSplinePose::angularAccelerationBodyFrameAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Vector3d omega;
Eigen::Vector3d p;
Eigen::Vector3d pdot;
Eigen::MatrixXd Jp;
Eigen::MatrixXd Jpdot;
p = evalDAndJacobian(tk, 0, &Jp, NULL).tail<3>();
pdot = evalDAndJacobian(tk, 2, &Jpdot, coefficientIndices).tail<3>();
Eigen::MatrixXd Jr;
Eigen::Matrix3d C_w_b = inverseOrientationAndJacobian(tk, &Jr, NULL);
// Rearrange the spline jacobian matrices. Now Jpdot is the
// jacobian of p wrt the spline coefficients stacked on top
// of the jacobian of pdot wrt the spline coefficients.
Jpdot.block(0, 0, 3, Jpdot.cols()) = Jp.block(3, 0, 3, Jp.cols());
Eigen::Matrix<double, 3, 6> Jo;
omega = -C_w_b * rotation_->angularVelocityAndJacobian(p, pdot, &Jo);
Jo = (-C_w_b * Jo).eval();
if (J) {
*J = Jo * Jpdot + sm::kinematics::crossMx(omega) * Jr;
}
return omega;
}
// \omega_dot_w_{b,w} (angular acceleration of the body frame as seen from the world frame, expressed in the world
// frame)
Eigen::Vector3d BSplinePose::angularAccelerationAndJacobian(double tk, Eigen::MatrixXd* J,
Eigen::VectorXi* coefficientIndices) const {
Eigen::Vector3d omega;
Eigen::Vector3d p;
Eigen::Vector3d pdot;
Eigen::MatrixXd Jp;
Eigen::MatrixXd Jpdot;
p = evalDAndJacobian(tk, 0, &Jp, NULL).tail<3>();
pdot = evalDAndJacobian(tk, 2, &Jpdot, coefficientIndices).tail<3>();
// Rearrange the spline jacobian matrices. Now Jpdot is the
// jacobian of p wrt the spline coefficients stacked on top
// of the jacobian of pdot wrt the spline coefficients.
Jpdot.block(0, 0, 3, Jpdot.cols()) = Jp.block(3, 0, 3, Jp.cols());
Eigen::Matrix<double, 3, 6> Jo;
// FixMe by CC: seems like lost the minus "-"?
omega = rotation_->angularVelocityAndJacobian(p, pdot, &Jo);
if (J) {
*J = Jo * Jpdot;
}
return omega;
}
void BSplinePose::initPoseSpline(double t0, double t1, const Eigen::Matrix4d& T_n_t0, const Eigen::Matrix4d& T_n_t1) {
Eigen::VectorXd v0 = transformationToCurveValue(T_n_t0);
Eigen::VectorXd v1 = transformationToCurveValue(T_n_t1);
initSpline(t0, t1, v0, v1);
}
void BSplinePose::initPoseSpline2(const Eigen::VectorXd& times, const Eigen::Matrix<double, 6, Eigen::Dynamic>& poses,
int numSegments, double lambda) {
initSpline2(times, poses, numSegments, lambda);
}
void BSplinePose::initPoseSpline3(const Eigen::VectorXd& times, const Eigen::Matrix<double, 6, Eigen::Dynamic>& poses,
int numSegments, double lambda) {
initSpline3(times, poses, numSegments, lambda);
}
void BSplinePose::initPoseSplineSparse(const Eigen::VectorXd& times,
const Eigen::Matrix<double, 6, Eigen::Dynamic>& poses, int numSegments,
double lambda) {
initSplineSparse(times, poses, numSegments, lambda);
}
void BSplinePose::initPoseSplineSparseKnots(const Eigen::VectorXd& times, const Eigen::MatrixXd& interpolationPoints,
const Eigen::VectorXd& knots, double lambda) {
initSplineSparseKnots(times, interpolationPoints, knots, lambda);
}
void BSplinePose::addPoseSegment(double tk, const Eigen::Matrix4d& T_n_tk) {
Eigen::VectorXd vk = transformationToCurveValue(T_n_tk);
addCurveSegment(tk, vk);
}
void BSplinePose::addPoseSegment2(double tk, const Eigen::Matrix4d& T_n_tk, double lambda) {
Eigen::VectorXd vk = transformationToCurveValue(T_n_tk);
addCurveSegment2(tk, vk, lambda);
}
Eigen::Matrix4d BSplinePose::curveValueToTransformation(const Eigen::VectorXd& c) const {
SM_ASSERT_EQ_DBG(Exception, c.size(), 6, "The curve value is an unexpected size!");
Eigen::Matrix4d T = Eigen::Matrix4d::Identity();
T.topLeftCorner<3, 3>() = rotation_->parametersToRotationMatrix(c.tail<3>());
T.topRightCorner<3, 1>() = c.head<3>();
return T;
}
Eigen::VectorXd BSplinePose::transformationToCurveValue(const Eigen::Matrix4d& T) const {
Eigen::VectorXd c(6);
c.head<3>() = T.topRightCorner<3, 1>();
c.tail<3>() = rotation_->rotationMatrixToParameters(T.topLeftCorner<3, 3>());
return c;
}
RotationalKinematics::Ptr BSplinePose::rotation() const { return rotation_; }
Eigen::Matrix4d BSplinePose::curveValueToTransformationAndJacobian(const Eigen::VectorXd& p, Eigen::MatrixXd* J) const {
SM_ASSERT_EQ_DBG(Exception, p.size(), 6, "The curve value is an unexpected size!");
Eigen::Matrix4d T = Eigen::Matrix4d::Identity();
Eigen::Matrix3d S;
T.topLeftCorner<3, 3>() = rotation_->parametersToRotationMatrix(p.tail<3>(), &S);
T.topRightCorner<3, 1>() = p.head<3>();
if (J) {
*J = Eigen::MatrixXd::Identity(6, 6);
J->topRightCorner<3, 3>() = -crossMx(p.head<3>()) * S;
J->bottomRightCorner<3, 3>() = S;
}
return T;
}
|
September’s Free Book of the Month is here. Get yours now.
The story of Joseph offers a vivid picture of redemption, reconciliation, and forgiveness. But it’s so much more than that.
The Joseph story is sometimes described by scholars as ‘the Joseph cycle’. This is a helpful term. In Genesis, there are similarly ‘cycles’ of stories about Abraham (Gen. 12–25) and Jacob (Gen. 26–36); and in other parts of the Bible, there is a ‘David Cycle’ for example (1 Sam. 16–1 Kgs. 2) and one about Elijah (1 Kgs. 17–2 Kgs. 2). A ‘cycle’ in this sense is a series of connected and continuous narratives about a central figure, in which the component parts nevertheless have their own separate coherence and integrity—like episodes in a TV drama series perhaps or like the individual parts in a ‘cycle’ of Medieval mystery plays.
If you’ve ever wanted to take a closer look at this classic Bible story, Living the Dream provides easily digestible chunks to help you get the most from it. Each chapter of the book often aligns with a single chapter of the story of Joseph.
While you’re at it, add another book from the same collection for just $0.99!
This month only, when you get Living the Dream for free, you can also snag God, Pharaoh and Moses for less than a buck.
In God, Pharaoh and Moses, William Ford tackles questions like: why does God send a series of plagues to Egypt? How do we understand the hardening of Pharaoh’s heart?
1. Get your free book.
2. Add another for $0.99.
3. Enter to win the entire 9-volume Old Testament Studies Collection.
|
\DIFaddbegin
\clearpage
\subsection{\DIFadd{The Leather merchant}}
\label{sec:appendix:moj:leather}
\DIFadd{Viking leather workers made shoes for those who could afford them. To own a pair of shoes was a status symbol and showed that you had wealth. They also made bags, belts and scabbards for swords.
}
\begin{display}{The leather stall}
\label{fig:appendix:moj:places:leather:stall}
\DIFadd{\includegraphics[width=0.65\columnwidth]{img/Jorvik/places/leather stall}
}\end{display}
\begin{display}{The leather stall with a background and a merchant}
\label{fig:appendix:moj:places:leather}
\DIFadd{\includegraphics[width=0.65\columnwidth]{img/Jorvik/places/leather}
}\end{display}
\clearpage
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/sheath}}\\
\DIFaddFL{Sheath }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{14.11 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{When a sword was not in use it was kept safe in its sheath, called a scabbard. They were sometimes lined with fleece or fabric to further protect the blade}\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/drinking bottle}}\\
\DIFaddFL{Drinking Bottle }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{11.03 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{A bottle, or costrel, used for storing and serving drinks could be made out of leather, wood or clay.}\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/vambraces}}\\
\DIFaddFL{Vambraces }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{22.05 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{Vikings often wore chainmail in battle to protect their body in battle but they needed leather vambraces to give protection to their arms.}\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/thong}}\\
\DIFaddFL{Thong }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{2.21 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{Leather thongs could be used as a belt or to attach skates to shoes. They were very useful to have around the home.}\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/shoes}}\\
\DIFaddFL{Shoes }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{26.46 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{Norse shoes may not have lasted very long so they either underwent many repairs or were replaced regularly. }\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/belt}}\\
\DIFaddFL{Belt }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{8.82 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{Belts were made to be about 2cm wide, much narrower than modern belts. Every part of the belt might be decorated, the Coppergate dig have unearthed examples of both decorated and non-decorated belts and buckles.}\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht!]
\centering
\begin{tabular}{ p{3cm} c }\toprule
\textbf{\DIFaddFL{Name:}} & \multirow{5}{*}{\includegraphics[height=30mm]{img/Jorvik/objects/leather/bag}}\\
\DIFaddFL{Bag }& \\
\textbf{\DIFaddFL{Price:}} & \\
\DIFaddFL{17.64 silver. }& \\
\textbf{\DIFaddFL{Description:}} & \\
\multicolumn{2}{p{12cm}}{Viking clothing had no pockets so they used leather bags to carry coins, keys, a clean cloth and other small, useful items. The pouches would hang from their belts.}\\
\bottomrule
\end{tabular}
\end{table} \DIFaddend
|
= = = Demographics = = =
|
import Data.Vect
total allLengths' : Vect len String -> Vect len Nat
allLengths' [] = []
allLengths' (word :: words) = length word :: allLengths' words
|
lemma poly_cancel_eq_conv: fixes x :: "'a::field" shows "x = 0 \<Longrightarrow> a \<noteq> 0 \<Longrightarrow> y = 0 \<longleftrightarrow> a * y - b * x = 0"
|
Require Import Coq.Lists.List. Import ListNotations.
Require Import Coq.Classes.Morphisms.
Require Import Coq.Classes.RelationClasses.
Require Import Coq.Logic.PropExtensionality.
Require Import Coq.Logic.FunctionalExtensionality.
Require Import coqutil.Tactics.destr.
Definition set(A: Type) := A -> Prop.
Definition elem_of{K: Type}(k: K)(ks: K -> Prop): Prop := ks k.
Notation "x '\in' s" := (elem_of x s) (at level 70, no associativity).
Section PropSet. Local Set Default Proof Using "All".
Context {E: Type}.
(* basic definitions (which require knowing that set E = E -> Prop *)
Definition empty_set: set E := fun _ => False.
Definition singleton_set: E -> set E := eq.
Definition union: set E -> set E -> set E :=
fun s1 s2 x => x \in s1 \/ x \in s2.
Definition intersect: set E -> set E -> set E :=
fun s1 s2 x => x \in s1 /\ x \in s2.
Definition diff: set E -> set E -> set E :=
fun s1 s2 x => x \in s1 /\ ~ x \in s2.
Definition of_list(l: list E): set E := fun e => List.In e l.
(* derived definitions (based on basic definitions, without knowing that set E = E -> Prop) *)
Definition add(s: set E)(e: E) := union (singleton_set e) s.
Definition remove(s: set E)(e: E) := diff s (singleton_set e).
Definition subset(s1 s2: set E) := forall x, x \in s1 -> x \in s2.
Definition sameset(s1 s2: set E) := subset s1 s2 /\ subset s2 s1.
Definition disjoint(s1 s2: set E) := forall x, (~ x \in s1) \/ (~ x \in s2).
Definition of_option(o: option E) := match o with
| Some e => singleton_set e
| None => empty_set
end.
End PropSet.
#[global] Hint Unfold
elem_of
empty_set
singleton_set
union
intersect
diff
of_list
: unf_basic_set_defs.
#[global] Hint Unfold
add
remove
subset
sameset
disjoint
of_option
: unf_derived_set_defs.
Section PropSetLemmas. Local Set Default Proof Using "All".
Context {E: Type}.
Lemma of_list_cons: forall (e: E) (l: list E),
sameset (of_list (e :: l)) (add (of_list l) e).
Proof.
intros. repeat autounfold with unf_derived_set_defs. simpl. auto.
Qed.
Lemma of_list_app: forall (l1 l2: list E),
sameset (of_list (l1 ++ l2)) (union (of_list l1) (of_list l2)).
Proof.
induction l1; repeat autounfold with unf_basic_set_defs unf_derived_set_defs in *;
intros; simpl; [intuition idtac|].
setoid_rewrite in_app_iff in IHl1.
setoid_rewrite in_app_iff.
intuition idtac.
Qed.
Lemma disjoint_diff_l: forall (A B C: set E),
disjoint A C ->
disjoint (diff A B) C.
Proof.
intros. unfold set, disjoint, diff in *. firstorder idtac.
Qed.
Lemma disjoint_diff_r: forall (A B C: set E),
disjoint C A ->
disjoint C (diff A B).
Proof.
intros. unfold set, disjoint, diff in *. firstorder idtac.
Qed.
Lemma subset_empty_l (s : set E) :
subset empty_set s.
Proof. firstorder idtac. Qed.
Lemma subset_refl: forall (s: set E), subset s s.
Proof. intros s x. exact id. Qed.
Lemma union_empty_l (s : set E) :
sameset (union empty_set s) s.
Proof. firstorder idtac. Qed.
Lemma union_empty_r (s : set E) :
sameset (union s empty_set) s.
Proof. firstorder idtac. Qed.
Lemma disjoint_empty_l (s : set E) :
disjoint empty_set s.
Proof. firstorder idtac. Qed.
Lemma disjoint_empty_r (s : set E) :
disjoint s empty_set.
Proof. firstorder idtac. Qed.
Lemma union_comm (s1 s2 : set E) :
sameset (union s1 s2) (union s2 s1).
Proof. firstorder idtac. Qed.
Lemma union_assoc (s1 s2 s3 : set E) :
sameset (union s1 (union s2 s3)) (union (union s1 s2) s3).
Proof. firstorder idtac. Qed.
Lemma of_list_nil : sameset (@of_list E []) empty_set.
Proof. firstorder idtac. Qed.
Lemma of_list_singleton x: sameset (@of_list E [x]) (singleton_set x).
Proof. firstorder idtac. Qed.
Lemma singleton_set_eq_of_list: forall (x: E),
singleton_set x = of_list [x].
Proof.
unfold singleton_set, of_list, elem_of, In.
intros. extensionality y. apply propositional_extensionality.
intuition idtac.
Qed.
Lemma in_union_l: forall x (s1 s2: set E), x \in s1 -> x \in (union s1 s2).
Proof. unfold union, elem_of. auto. Qed.
Lemma in_union_r: forall x (s1 s2: set E), x \in s2 -> x \in (union s1 s2).
Proof. unfold union, elem_of. auto. Qed.
Lemma in_of_list: forall x (l: list E), List.In x l -> x \in (of_list l).
Proof. unfold of_list, elem_of. auto. Qed.
Lemma in_singleton_set: forall (x: E), x \in singleton_set x.
Proof. unfold elem_of, singleton_set. intros. reflexivity. Qed.
Lemma sameset_iff (s1 s2 : set E) :
sameset s1 s2 <-> (forall e, s1 e <-> s2 e).
Proof. firstorder idtac. Qed.
Lemma add_union_singleton (x : E) s :
add s x = union (singleton_set x) s.
Proof. firstorder idtac. Qed.
Lemma not_union_iff (s1 s2 : set E) x :
~ union s1 s2 x <-> ~ s1 x /\ ~ s2 x.
Proof. firstorder idtac. Qed.
Lemma disjoint_cons (s : set E) x l :
disjoint s (of_list (x :: l)) ->
disjoint s (of_list l) /\ disjoint s (singleton_set x).
Proof. firstorder idtac. Qed.
Lemma disjoint_sameset (s1 s2 s3 : set E) :
sameset s3 s1 ->
disjoint s1 s2 ->
disjoint s3 s2.
Proof. firstorder idtac. Qed.
Lemma disjoint_union_l_iff (s1 s2 s3 : set E) :
disjoint (union s1 s2) s3 <-> disjoint s1 s3 /\ disjoint s2 s3.
Proof. firstorder idtac. Qed.
Lemma disjoint_union_r_iff (s1 s2 s3 : set E) :
disjoint s1 (union s2 s3) <-> disjoint s1 s2 /\ disjoint s1 s3.
Proof. firstorder idtac. Qed.
Lemma subset_union_l (s1 s2 s3 : set E) :
subset s1 s3 ->
subset s2 s3 ->
subset (union s1 s2) s3.
Proof. firstorder idtac. Qed.
Lemma subset_union_rl (s1 s2 s3 : set E) :
subset s1 s2 ->
subset s1 (union s2 s3).
Proof. firstorder idtac. Qed.
Lemma subset_union_rr (s1 s2 s3 : set E) :
subset s1 s3 ->
subset s1 (union s2 s3).
Proof. firstorder idtac. Qed.
Lemma subset_disjoint_r (s1 s2 s3 : set E) :
subset s2 s3 ->
disjoint s1 s3 ->
disjoint s1 s2.
Proof. firstorder idtac. Qed.
Lemma subset_disjoint_l (s1 s2 s3 : set E) :
subset s1 s3 ->
disjoint s3 s2 ->
disjoint s1 s2.
Proof. firstorder idtac. Qed.
Global Instance Proper_union :
Proper (sameset ==> sameset ==> sameset) (@union E).
Proof. firstorder idtac. Defined.
Global Instance Proper_intersect :
Proper (sameset ==> sameset ==> sameset) (@intersect E).
Proof. firstorder idtac. Defined.
Global Instance Proper_diff :
Proper (sameset ==> sameset ==> sameset) (@diff E).
Proof. firstorder idtac. Defined.
Global Instance Proper_add :
Proper (sameset ==> eq ==> sameset) (@add E).
Proof.
repeat intro; apply Proper_union; auto.
subst. firstorder idtac.
Defined.
Global Instance Proper_remove :
Proper (sameset ==> eq ==> sameset) (@remove E).
Proof.
repeat intro; apply Proper_diff; auto.
subst. firstorder idtac.
Defined.
Global Instance subset_trans : Transitive (@subset E) | 10.
Proof. firstorder idtac. Defined.
Global Instance subset_ref : Reflexive (@subset E) | 10.
Proof. firstorder idtac. Defined.
Global Instance Proper_subset
: Proper (sameset ==> sameset ==> iff) (@subset E).
Proof. firstorder idtac. Defined.
Global Instance sameset_sym : Symmetric (@sameset E) | 10.
Proof. firstorder idtac. Defined.
Global Instance sameset_trans : Transitive (@sameset E) | 10.
Proof. firstorder idtac. Defined.
Global Instance sameset_ref : Reflexive (@sameset E) | 10.
Proof. firstorder idtac. Defined.
Global Instance disjoint_sym : Symmetric (@disjoint E) | 10.
Proof. firstorder idtac. Defined.
Global Instance Proper_disjoint
: Proper (sameset ==> sameset ==> iff) (@disjoint E).
Proof. firstorder idtac. Defined.
Section with_eqb. Local Set Default Proof Using "All".
Context {eqb}
{eq_dec : forall x y : E, BoolSpec (x = y) (x <> y) (eqb x y)}.
Lemma disjoint_singleton_r_iff (x : E) (s : set E) :
~ s x <->
disjoint s (singleton_set x).
Proof.
intros. split; [|firstorder idtac].
intros. intro y.
destruct (eq_dec x y);
subst; try firstorder idtac.
Qed.
Lemma disjoint_singleton_singleton (x y : E) :
y <> x ->
disjoint (singleton_set x) (singleton_set y).
Proof.
intros.
apply disjoint_singleton_r_iff;
firstorder congruence.
Qed.
Lemma disjoint_not_in x (l : list E) :
~ In x l ->
disjoint (singleton_set x) (of_list l).
Proof.
intros. symmetry. apply disjoint_singleton_r_iff; eauto.
Qed.
Lemma NoDup_disjoint (l1 l2 : list E) :
NoDup (l1 ++ l2) ->
disjoint (of_list l1) (of_list l2).
Proof.
revert l2; induction l1; intros *;
rewrite ?app_nil_l, <-?app_comm_cons;
[ solve [firstorder idtac] | ].
inversion 1; intros; subst.
rewrite of_list_cons.
apply disjoint_union_l_iff; split; eauto.
apply disjoint_not_in; eauto.
rewrite in_app_iff in *. tauto.
Qed.
Lemma disjoint_NoDup (l1 l2 : list E) :
NoDup l1 ->
NoDup l2 ->
disjoint (of_list l1) (of_list l2) ->
NoDup (l1 ++ l2).
Proof.
revert l2; induction l1; intros *;
rewrite ?app_nil_l, <-?app_comm_cons;
[ solve [firstorder idtac] | ].
inversion 1; intros; subst.
match goal with H : disjoint (of_list (_ :: _)) _ |- _ =>
symmetry in H;
apply disjoint_cons in H; destruct H end.
match goal with H : disjoint _ (singleton_set _) |- _ =>
apply disjoint_singleton_r_iff in H1;
cbv [of_list] in H1
end.
constructor; [ rewrite in_app_iff; tauto | ].
apply IHl1; eauto using disjoint_sym.
Qed.
Lemma disjoint_of_list_disjoint_Forall: forall (l1 l2: list E) (P1 P2: E -> Prop),
Forall P1 l1 ->
Forall P2 l2 ->
(forall x, P1 x -> P2 x -> False) ->
disjoint (of_list l1) (of_list l2).
Proof.
unfold disjoint, of_list, elem_of. intros.
destr (List.find (eqb x) l1).
- eapply find_some in E0. destruct E0 as [E1 E2].
destr (eqb x e). 2: discriminate.
destr (List.find (eqb e) l2).
+ eapply find_some in E0. destruct E0 as [F1 F2].
destr (eqb e e0). 2: discriminate.
eapply Forall_forall in H. 2: eassumption.
eapply Forall_forall in H0. 2: eassumption.
exfalso. eauto.
+ right. intro C. eapply find_none in E0. 2: exact C.
destr (eqb e e); congruence.
- left. intro C. eapply find_none in E0. 2: exact C.
destr (eqb x x); congruence.
Qed.
End with_eqb.
End PropSetLemmas.
Require Import Coq.Program.Tactics.
Require Import coqutil.Tactics.Tactics.
Ltac set_solver_generic E :=
repeat (so fun hyporgoal => match hyporgoal with
| context [of_list (?l1 ++ ?l2)] => unique pose proof (of_list_app l1 l2)
| context [of_list (?h :: ?t)] => unique pose proof (of_list_cons h t)
end);
repeat autounfold with unf_basic_set_defs unf_derived_set_defs in *;
unfold elem_of in *;
destruct_products;
intros;
specialize_with E;
intuition (subst *; auto).
Goal forall T (l1 l2: list T) (e: T),
subset (of_list (l2 ++ l1)) (union (of_list (e :: l1)) (of_list l2)).
Proof. intros. set_solver_generic T. Qed.
|
Formal statement is: lemma components_nonoverlap: "\<lbrakk>c \<in> components s; c' \<in> components s\<rbrakk> \<Longrightarrow> (c \<inter> c' = {}) \<longleftrightarrow> (c \<noteq> c')" Informal statement is: If $c$ and $c'$ are components of $s$, then $c$ and $c'$ are disjoint if and only if $c \neq c'$.
|
\title{Digital Arithmetic Cells with myHDL}
\author{Steven K Armour}
\maketitle
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Compression-of-Number-System-Values" data-toc-modified-id="Compression-of-Number-System-Values-1"><span class="toc-item-num">1 </span>Compression of Number System Values</a></span></li><li><span><a href="#Half-Adder" data-toc-modified-id="Half-Adder-2"><span class="toc-item-num">2 </span>Half Adder</a></span></li><li><span><a href="#The-full-adder" data-toc-modified-id="The-full-adder-3"><span class="toc-item-num">3 </span>The full adder</a></span></li></ul></div>
```python
from myhdl import *
from myhdlpeek import Peeker
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sympy import *
init_printing()
import random
#https://github.com/jrjohansson/version_information
%load_ext version_information
%version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, random
```
The version_information extension is already loaded. To reload it, use:
%reload_ext version_information
<table><tr><th>Software</th><th>Version</th></tr><tr><td>Python</td><td>3.6.4 64bit [GCC 7.2.0]</td></tr><tr><td>IPython</td><td>6.2.1</td></tr><tr><td>OS</td><td>Linux 4.13.0 45 generic x86_64 with debian stretch sid</td></tr><tr><td>myhdl</td><td>0.10</td></tr><tr><td>myhdlpeek</td><td>0.0.6</td></tr><tr><td>numpy</td><td>1.13.3</td></tr><tr><td>pandas</td><td>0.21.1</td></tr><tr><td>matplotlib</td><td>2.1.1</td></tr><tr><td>sympy</td><td>1.1.1</td></tr><tr><td>random</td><td>The 'random' distribution was not found and is required by the application</td></tr><tr><td colspan='2'>Fri Jun 29 23:35:01 2018 MDT</td></tr></table>
```python
#helper functions to read in the .v and .vhd generated files into python
def VerilogTextReader(loc, printresult=True):
with open(f'{loc}.v', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***Verilog modual from {loc}.v***\n\n', VerilogText)
return VerilogText
def VHDLTextReader(loc, printresult=True):
with open(f'{loc}.vhd', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText)
return VerilogText
```
# Compression of Number System Values
```python
ConversionTable=pd.DataFrame()
ConversionTable['Decimal']=np.arange(0, 21)
ConversionTable['Binary']=[bin(i, 3) for i in np.arange(0, 21)]
ConversionTable['hex']=[hex(i) for i in np.arange(0, 21)]
ConversionTable['oct']=[oct(i) for i in np.arange(0, 21)]
ConversionTable
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Decimal</th>
<th>Binary</th>
<th>hex</th>
<th>oct</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>000</td>
<td>0x0</td>
<td>0o0</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>001</td>
<td>0x1</td>
<td>0o1</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>010</td>
<td>0x2</td>
<td>0o2</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>011</td>
<td>0x3</td>
<td>0o3</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>100</td>
<td>0x4</td>
<td>0o4</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>101</td>
<td>0x5</td>
<td>0o5</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>110</td>
<td>0x6</td>
<td>0o6</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>111</td>
<td>0x7</td>
<td>0o7</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>1000</td>
<td>0x8</td>
<td>0o10</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>1001</td>
<td>0x9</td>
<td>0o11</td>
</tr>
<tr>
<th>10</th>
<td>10</td>
<td>1010</td>
<td>0xa</td>
<td>0o12</td>
</tr>
<tr>
<th>11</th>
<td>11</td>
<td>1011</td>
<td>0xb</td>
<td>0o13</td>
</tr>
<tr>
<th>12</th>
<td>12</td>
<td>1100</td>
<td>0xc</td>
<td>0o14</td>
</tr>
<tr>
<th>13</th>
<td>13</td>
<td>1101</td>
<td>0xd</td>
<td>0o15</td>
</tr>
<tr>
<th>14</th>
<td>14</td>
<td>1110</td>
<td>0xe</td>
<td>0o16</td>
</tr>
<tr>
<th>15</th>
<td>15</td>
<td>1111</td>
<td>0xf</td>
<td>0o17</td>
</tr>
<tr>
<th>16</th>
<td>16</td>
<td>10000</td>
<td>0x10</td>
<td>0o20</td>
</tr>
<tr>
<th>17</th>
<td>17</td>
<td>10001</td>
<td>0x11</td>
<td>0o21</td>
</tr>
<tr>
<th>18</th>
<td>18</td>
<td>10010</td>
<td>0x12</td>
<td>0o22</td>
</tr>
<tr>
<th>19</th>
<td>19</td>
<td>10011</td>
<td>0x13</td>
<td>0o23</td>
</tr>
<tr>
<th>20</th>
<td>20</td>
<td>10100</td>
<td>0x14</td>
<td>0o24</td>
</tr>
</tbody>
</table>
</div>
```python
binarySum=lambda a, b, bits=2: np.binary_repr(a+b, bits)
```
```python
for i in [[0,0], [0,1], [1,0], [1,1]]:
print(f'{i[0]} + {i[1]} yields {binarySum(*i)}_2, {int(binarySum(*i), 2)}_10')
```
0 + 0 yields 00_2, 0_10
0 + 1 yields 01_2, 1_10
1 + 0 yields 01_2, 1_10
1 + 1 yields 10_2, 2_10
Notice that when we add $1_2+1_2$ we get instead of just a single bit we also get a new bit in the next binary column which we call the carry bit. This yields the following truth table for what is called the Two Bit or Half adder
# Half Adder
```python
TwoBitAdderTT=pd.DataFrame()
TwoBitAdderTT['x2']=[0,0,1,1]
TwoBitAdderTT['x1']=[0,1,0,1]
TwoBitAdderTT['Sum']=[0,1,1,0]
TwoBitAdderTT['Carry']=[0,0,0,1]
TwoBitAdderTT
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>x2</th>
<th>x1</th>
<th>Sum</th>
<th>Carry</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>1</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>1</td>
<td>1</td>
<td>0</td>
<td>1</td>
</tr>
</tbody>
</table>
</div>
Looking at this truth table we can surmise the following
```python
x1, x2=symbols('x_1, x_2')
Sum, Carry=symbols(r'\text{Sum}, \text{Carry}')
HASumDef=x1^x2; HACarryDef=x1 & x2
HASumEq=Eq(Sum, HASumDef); HACarryDef=Eq(Carry, HACarryDef)
HASumEq, HACarryDef
```
$$\left ( \text{Sum} = x_{1} \veebar x_{2}, \quad \text{Carry} = x_{1} \wedge x_{2}\right )$$
We can thus generate the following myHDL
```python
@block
def HalfAdder(x1, x2, Sum, Carry):
"""
Half adder in myHDL
I/O:
x1 (bool): x1 input
x2 (bool): x2 input
Sum (bool): the sum Half Adder ouput
Carry (bool): the carry Half Adder ouput
"""
@always_comb
def logic():
Sum.next=x1^x2
Carry.next=x1 & x2
return logic
```
```python
Peeker.clear()
x1=Signal(bool(0)); Peeker(x1, 'x1')
x2=Signal(bool(0)); Peeker(x2, 'x2')
Sum=Signal(bool(0)); Peeker(Sum, 'Sum')
Carry=Signal(bool(0)); Peeker(Carry, 'Carry')
DUT=HalfAdder(x1, x2, Sum, Carry)
def HalfAdder_TB():
"""
Half Adder Testbench for use in python only
"""
@instance
def Stimules():
for _, row in TwoBitAdderTT.iterrows():
x1.next=row['x1']
x2.next=row['x2']
yield delay(1)
raise StopSimulation()
return instances()
sim = Simulation(DUT, HalfAdder_TB(), *Peeker.instances()).run()
```
```python
Peeker.to_wavedrom('x2 x1 | Sum Carry', title='Half Adder Wave Form', tock=True)
```
<div></div>
```python
HARes=Peeker.to_dataframe()
HARes=HARes[['x2', 'x1','Sum', 'Carry']]
HARes
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>x2</th>
<th>x1</th>
<th>Sum</th>
<th>Carry</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>1</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>1</td>
<td>1</td>
<td>0</td>
<td>1</td>
</tr>
</tbody>
</table>
</div>
Wich, we can then confirm via these results to the expected results via
```python
TwoBitAdderTT==HARes
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>x2</th>
<th>x1</th>
<th>Sum</th>
<th>Carry</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>True</td>
<td>True</td>
<td>True</td>
<td>True</td>
</tr>
<tr>
<th>1</th>
<td>True</td>
<td>True</td>
<td>True</td>
<td>True</td>
</tr>
<tr>
<th>2</th>
<td>True</td>
<td>True</td>
<td>True</td>
<td>True</td>
</tr>
<tr>
<th>3</th>
<td>True</td>
<td>True</td>
<td>True</td>
<td>True</td>
</tr>
</tbody>
</table>
</div>
```python
DUT.convert()
VerilogTextReader('HalfAdder');
```
***Verilog modual from HalfAdder.v***
// File: HalfAdder.v
// Generated by MyHDL 0.10
// Date: Fri Jun 29 23:13:48 2018
`timescale 1ns/10ps
module HalfAdder (
x1,
x2,
Sum,
Carry
);
// Half adder in myHDL
// I/O:
// x1 (bool): x1 input
// x2 (bool): x2 input
// Sum (bool): the sum Half Adder ouput
// Carry (bool): the carry Half Adder ouput
input x1;
input x2;
output Sum;
wire Sum;
output Carry;
wire Carry;
assign Sum = (x1 ^ x2);
assign Carry = (x1 & x2);
endmodule
**HalfAdder RTL**
**HalfAdder Synthesis**
```python
@block
def HalfAdder_TBV():
"""
Half Adder Testbench for use in Verilog
"""
x1 = Signal(bool(0))
x2 = Signal(bool(0))
Sum = Signal(bool(0))
Carry = Signal(bool(0))
DUT = HalfAdder(x1, x2, Sum, Carry)
testx1=Signal(intbv(int("".join([str(i) for i in TwoBitAdderTT['x1'].tolist()]), 2))[4:])
testx2=Signal(intbv(int("".join([str(i) for i in TwoBitAdderTT['x2'].tolist()]), 2))[4:])
@instance
def Stimulus():
for i in range(len(testx1)):
x1.next = testx1[i]
x2.next = testx2[i]
yield delay(1)
raise StopSimulation()
@always_comb
def print_data():
print(x1, x2, Sum, Carry)
return instances()
# create instaince of TB
TB = HalfAdder_TBV()
# convert to verilog with reintilzed values
TB.convert(hdl="verilog", initial_values=True)
VerilogTextReader('HalfAdder_TBV');
```
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
***Verilog modual from HalfAdder_TBV.v***
// File: HalfAdder_TBV.v
// Generated by MyHDL 0.10
// Date: Fri Jun 29 23:16:26 2018
`timescale 1ns/10ps
module HalfAdder_TBV (
);
// Half Adder Testbench for use in Verilog
wire Sum;
reg x1 = 0;
reg x2 = 0;
wire Carry;
wire [3:0] testx1;
wire [3:0] testx2;
assign testx1 = 4'd5;
assign testx2 = 4'd3;
assign Sum = (x1 ^ x2);
assign Carry = (x1 & x2);
initial begin: HALFADDER_TBV_STIMULUS
integer i;
for (i=0; i<4; i=i+1) begin
x1 <= testx1[i];
x2 <= testx2[i];
# 1;
end
$finish;
end
always @(Carry, x2, Sum, x1) begin: HALFADDER_TBV_PRINT_DATA
$write("%h", x1);
$write(" ");
$write("%h", x2);
$write(" ");
$write("%h", Sum);
$write(" ");
$write("%h", Carry);
$write("\n");
end
endmodule
/home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: testx1
category=ToVerilogWarning
/home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: testx2
category=ToVerilogWarning
Where the above warning can be ignored since we are using `testx1` and `testx2` as stores for the binay set of inputs to apply to `x1` and `x2`
# The full adder
If we assume that a carry is an input we can then extend the truth table for the two-bit adder to a three-bit adder yielding
```python
ThreeBitAdderTT=pd.DataFrame()
ThreeBitAdderTT['c1']=[0,0,0,0,1,1,1,1]
ThreeBitAdderTT['x2']=[0,0,1,1,0,0,1,1]
ThreeBitAdderTT['x1']=[0,1,0,1,0,1,0,1]
ThreeBitAdderTT['Sum']=[0,1,1,0,1,0,0,1]
ThreeBitAdderTT['Carry']=[0,0,0,1,0,1,1,1]
ThreeBitAdderTT
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>c1</th>
<th>x2</th>
<th>x1</th>
<th>Sum</th>
<th>Carry</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>0</td>
<td>1</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>4</th>
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>5</th>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>6</th>
<td>1</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>7</th>
<td>1</td>
<td>1</td>
<td>1</td>
<td>1</td>
<td>1</td>
</tr>
</tbody>
</table>
</div>
! need a way to make K-Maps in python
whereupon making and reducing the maps from the three-bit adder truth table we get the following equations
```python
c1, x1, x2=symbols('c_1,x_1, x_2')
Sum, Carry=symbols(r'\text{Sum}, \text{Carry}')
FASumDef=x1^x2^c1; FACarryDef=x1&x2 | x1&c1 | x2&c1
FASumEq=Eq(Sum, FASumDef); FACarryDef=Eq(Carry, FACarryDef)
FASumEq, FACarryDef
```
$$\left ( \text{Sum} = c_{1} \veebar x_{1} \veebar x_{2}, \quad \text{Carry} = \left(c_{1} \wedge x_{1}\right) \vee \left(c_{1} \wedge x_{2}\right) \vee \left(x_{1} \wedge x_{2}\right)\right )$$
yielding the following next myHDL module
```python
@block
def FullAdder(x1, x2, c1, Sum, Carry):
"""
Full adder in myHDL
I/O:
x1 (bool): x1 input
x2 (bool): x2 input
c1 (bool): carry input
Sum (bool): the sum Full Adder ouput
Carry (bool): the carry Full Adder ouput
Note!:
There something wrong on the HDL side at the moment
"""
@always_comb
def logic():
Sum.next=x1 ^ x2 ^c1
Carry.next=(x1 & x2) | (x1 & c1) | (x2 & c1)
return logic
```
```python
Peeker.clear()
x1=Signal(bool(0)); Peeker(x1, 'x1')
x2=Signal(bool(0)); Peeker(x2, 'x2')
c1=Signal(bool(0)); Peeker(c1, 'c1')
Sum=Signal(bool(0)); Peeker(Sum, 'Sum')
Carry=Signal(bool(0)); Peeker(Carry, 'Carry')
DUT=FullAdder(x1, x2, c1, Sum, Carry)
def FullAdder_TB():
@instance
def Stimules():
for _, row in ThreeBitAdderTT.iterrows():
x1.next=row['x1']
x2.next=row['x2']
c1.next=row['c1']
yield delay(1)
raise StopSimulation()
return instances()
sim = Simulation(DUT, HalfAdder_TB(), *Peeker.instances()).run()
```
```python
Peeker.to_wavedrom('c1 x2 x1 | Sum Carry', title='Full Adder Wave Form', tock=True)
```
<div></div>
```python
FARes=Peeker.to_dataframe()
FARes=FARes[['c1, x2','x1','Sum', 'Carry']]
FARes
```
```python
ThreeBitAdderTT==FARes
```
```python
DUT.convert()
VerilogTextReader('FullAdder');
```
***Verilog modual from FullAdder.v***
// File: FullAdder.v
// Generated by MyHDL 0.10
// Date: Fri Jun 29 23:15:35 2018
`timescale 1ns/10ps
module FullAdder (
x1,
x2,
c1,
Sum,
Carry
);
// Full adder in myHDL
// I/O:
// x1 (bool): x1 input
// x2 (bool): x2 input
// c1 (bool): carry input
// Sum (bool): the sum Full Adder ouput
// Carry (bool): the carry Full Adder ouput
input x1;
input x2;
input c1;
output Sum;
wire Sum;
output Carry;
wire Carry;
assign Sum = ((x1 ^ x2) ^ c1);
assign Carry = (((x1 & x2) | (x1 & c1)) | (x2 & c1));
endmodule
**FullAdder RTL**
**FullAdder Synthesis**
```python
@block
def FullAdder_TBV():
"""
Full Adder Testbench for use in Verilog
"""
x1=Signal(bool(0))
x2=Signal(bool(0))
c1=Signal(bool(0))
Sum=Signal(bool(0))
Carry=Signal(bool(0))
DUT = FullAdder(x1, x2, c1, Sum, Carry)
testx1=Signal(intbv(int("".join([str(i) for i in ThreeBitAdderTT['x1'].tolist()]), 2))[4:])
testx2=Signal(intbv(int("".join([str(i) for i in ThreeBitAdderTT['x2'].tolist()]), 2))[4:])
testc1=Signal(intbv(int("".join([str(i) for i in ThreeBitAdderTT['c1'].tolist()]), 2))[4:])
@instance
def Stimulus():
for i in range(len(testx1)):
x1.next=testx1[i]
x2.next=testx2[i]
c1.next=testc1[i]
yield delay(1)
raise StopSimulation()
@always_comb
def print_data():
print(x1, x2, c1, Sum, Carry)
return instances()
# create instaince of TB
TB = FullAdder_TBV()
# convert to verilog with reintilzed values
TB.convert(hdl="verilog", initial_values=True)
VerilogTextReader('FullAdder_TBV');
```
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
<class 'myhdl._Signal._Signal'> <class '_ast.Name'>
***Verilog modual from FullAdder_TBV.v***
// File: FullAdder_TBV.v
// Generated by MyHDL 0.10
// Date: Fri Jun 29 23:24:09 2018
`timescale 1ns/10ps
module FullAdder_TBV (
);
// Full Adder Testbench for use in Verilog
wire Sum;
reg x1 = 0;
reg x2 = 0;
wire Carry;
reg c1 = 0;
wire [3:0] testx1;
wire [3:0] testx2;
wire [3:0] testc1;
assign testx1 = 4'd5;
assign testx2 = 4'd3;
assign testc1 = 4'd15;
assign Sum = ((x1 ^ x2) ^ c1);
assign Carry = (((x1 & x2) | (x1 & c1)) | (x2 & c1));
initial begin: FULLADDER_TBV_STIMULUS
integer i;
for (i=0; i<4; i=i+1) begin
x1 <= testx1[i];
x2 <= testx2[i];
c1 <= testc1[i];
# 1;
end
$finish;
end
always @(Carry, x1, c1, x2, Sum) begin: FULLADDER_TBV_PRINT_DATA
$write("%h", x1);
$write(" ");
$write("%h", x2);
$write(" ");
$write("%h", c1);
$write(" ");
$write("%h", Sum);
$write(" ");
$write("%h", Carry);
$write("\n");
end
endmodule
/home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: testx1
category=ToVerilogWarning
/home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: testx2
category=ToVerilogWarning
/home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: testc1
category=ToVerilogWarning
Where the above warning can be ignored since we are using `testx1`, `testx2`, and `testc1` as stores for the binay set of inputs to apply to `x1`, `x2`, and `c1`
```python
```
|
#=====================================================
#Web of Science was used to locate the citations
#of individual papers that used or cited the
#following software packages:
#ADMB, Stan, TMB, JAGS, Classic BUGS, WinBUGS,
#OpenBUGS
#Since multiple methods were used to find citing
#papers, duplicates must be removed before returning
#the cites by year.
#=====================================================
XX <- read.csv("Citation analysis.csv", stringsAsFactors=F)
software <- c("Classic BUGS", "WinBUGS", "OpenBUGS",
"JAGS", "ADMB", "TMB", "Stan")
nsoftware <- length(software)
years <- seq(min(XX$PY), 2015, 1)
nyears <- length(years)
results <- matrix(nrow=nyears, ncol=nsoftware,
dimnames=list(years,software))
#get data in the right format
for (i in 1:nsoftware) {
#go through the data for each software package in turn
Xdata <- XX[XX$Software.package==software[i],]
#find the duplicated doi values
doi.dups <- duplicated(Xdata$DI, incomparables=c(""))
#find the duplicated journal+vol+pagestart
journal.dups <- duplicated(paste(Xdata$SO, Xdata$VL, Xdata$BP),
incomparables=c(""))
#find the duplicated WOS codes
WOS.dups <- duplicated(Xdata$UT, incomparables=c(""))
#extract the years (PY) from the data where there
#are no duplicates
#results[,i] <- hist(Xdata[!(doi.dups | journal.dups | WOS.dups),"PY"],
# breaks=seq(years[1]-0.5,years[nyears]+0.5,1), main="",
# xlab="Year", ylab="Citations", col="gray50")$counts
results[,i] <- tabulate(factor(Xdata[!(doi.dups | journal.dups | WOS.dups),"PY"], levels = years)) #alt way of getting results
## mtext(side=3,line=-1,software[i], cex=1.3)
}
write.csv(file="CountsByYear.csv",results)
## #create plot of citations
## pdf("Draft.pdf",width=12,height=12)
## mat <- matrix(1:nsoftware, nrow=nsoftware, ncol=1)
## par(mar=c(0,0,0,0), oma=c(3,3,1,1))
## ylims <- 110+apply(results, MARGIN=2, FUN=max)
## layout(mat=mat, widths=1, heights=ylims)
## for (i in 1:nsoftware) {
## if (i==nsoftware) {
## names.arg <- years
## } else {
## names.arg <- NA
## }
## barplot(results[,i], col="gray", ann=F, axes=F, names.arg=names.arg,
## xaxs="i", yaxs="i", ylim=c(0,ylims[i]))
## box()
## axis(side=2, las=1, at=seq(0,600,100))
## mtext(side=3, line =-1.7, outer=F, software[i], cex=1)
## }
## mtext(side=1, line=3, outer=T, "Year", cex=1.3)
## mtext(side=2, line=3, outer=T, "Number of citations", cex=1.3)
## dev.off()
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Data.Empty.Properties where
open import Cubical.Core.Everything
open import Cubical.Foundations.Prelude
open import Cubical.Data.Empty.Base
isProp⊥ : isProp ⊥
isProp⊥ x = ⊥-elim x
|
```python
%matplotlib inline
import numpy as np
import pandas as pd
import scipy.stats
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import rcParams
rcParams['figure.dpi'] = 120
from IPython.display import HTML
from IPython.display import YouTubeVideo
from functools import partial
YouTubeVideo_formato = partial(YouTubeVideo, modestbranding=1, disablekb=0,
width=640, height=360, autoplay=0, rel=0, showinfo=0)
```
# Estadística inferencial
La inferencia busca
> Extraer **conclusiones** a partir de **hechos u observaciones** a través de un **método o premisa**
En el caso particular de la **inferencia estadística** podemos realizar las siguientes asociaciones
- Hechos: Datos
- Premisa: Modelo probabilístico
- Conclusión: Una cantidad no observada que es interesante
Y lo que buscamos es
> Cuantificar la incerteza de la conclusión dado los datos y el modelo
La inferencia estadística puede dividirse en los siguientes tres niveles
1. Ajustar un modelo a nuestros datos
1. Verificar que el modelo sea confiable
1. Responder una pregunta usando el modelo
En esta lección estudiaremos las herramientas más utilizadas asociadas a cada uno de estos niveles
1. **Estimador de máxima verosimilitud**
1. **Bondad de ajuste** e **Intervalos de confianza**
1. **Test de hipótesis**
## Ajuste de modelos: Estimación de máxima verosimilitud
En este nivel de inferencia se busca **ajustar** un modelo teórico sobre nuestros datos. En esta lección nos enfocaremos en **modelos de tipo parámetrico**. Un modelo parámetrico es aquel donde **se explicita una distribución de probabilidad**.
Recordemos que una distribución tiene **parámetros**. Por ejemplo la distribución Gaussiana (univariada) se describe por su media $\mu$ y su varianza $\sigma^2$. Luego ajustar una distribución Gaussiana corresponde a encontrar el valor de $\mu$ y $\sigma$ que hace que el modelo se parezca lo más posible a la distribución empírica de los datos.
A continuación veremos los pasos necesarios para ajustar una distribución a nuestros datos
### ¿Qué distribución ajustar?
Antes de ajustar debemos realizar un supuesto sobre la distribución para nuestro modelo. En general podemos ajustar cualquier distribución pero un mal supuesto podría invalidar nuestra inferencia
Podemos usar las herramientas de **estadística descriptiva** para estudiar nuestros datos y tomar esta decisión de manera informada
En el siguiente ejemplo, un histograma de los datos revela que un modelo gaussiano no es una buena decisión
¿Por qué? La distribución empírica es claramente asimétrica, su cola derecha es más pesada que su cola izquierda. La distribución Gaussiana es simétrica por lo tanto no es apropiada en este caso ¿Qué distribución podría ser más apropiada?
### ¿Cómo ajustar mi modelo? Estimación de máxima verosimilitud
A continuación describiremos un procedimiento para ajustar modelos paramétricos llamado *maximum likelihood estimation* (MLE)
Sea un conjunto de datos $\{x_1, x_2, \ldots, x_N\}$
**Supuesto 1** Los datos siguen el modelo $f(x;\theta)$ donde $f(\cdot)$ es una distribución y $\theta$ son sus parámetros
$$
f(x_1, x_2, \ldots, x_N |\theta)
$$
**Supuesto 2** Las observaciones son independientes e idénticamente distribuidas (iid)
- Si dos variables son independientes se cumple que $P(x, y) = P(x)P(y)$
- Si son además idénticamente distribuidas entonces tienen **la misma distribución y parámetros**
Usando esto podemos escribir
$$
\begin{align}
f(x_1, x_2, \ldots, x_N |\theta) &= f(x_1|\theta) f(x_2|\theta) \ldots f(x_N|\theta) \nonumber \\
& = \prod_{i=1}^N f(x_i|\theta) \nonumber \\
& = \mathcal{L}(\theta)
\end{align}
$$
donde $\mathcal{L}(\theta)$ se conoce como la verosimilitud o probabilidad inversa de $\theta$
Si consideramos que los datos son fijos podemos buscar el valor de $\theta$ de máxima verosimilitud
$$
\begin{align}
\hat \theta &= \text{arg} \max_\theta \mathcal{L}(\theta) \nonumber \\
&= \text{arg} \max_\theta \log \mathcal{L}(\theta) \nonumber \\
&= \text{arg} \max_\theta \sum_{i=1}^N \log f(x_i|\theta)
\end{align}
$$
El segundo paso es valido por que el máximo de $g(x)$ y $\log(g(x))$ es el mismo. El logaritmo es monoticamente creciente. Además aplicar el logaritmo es muy conveniente ya que convierte la multiplicatoria en una sumatoria.
Ahora sólo falta encontrar el máximo. Podemos hacerlo
- Analíticamente, derivando con respecto a $\theta$ e igualando a cero
- Usando técnicas de optimización iterativas como gradiente descedente
**Ejemplo:** La pesa defectuosa
Su profesor quiere medir su peso pero sospecha que su pesa está defectuosa. Para comprobarlo mide su peso $N$ veces obteniendo un conjunto de observaciones $\{x_i\}$. ¿Es posible obtener un estimador del peso real $\hat x$ a partir de estas observaciones?
Modelaremos las observaciones como
$$
x_i = \hat x + \varepsilon_i
$$
donde $\varepsilon_i$ corresponde al ruido o error del instrumento y asumiremos que $\varepsilon_i \sim \mathcal{N}(0, \sigma_\varepsilon^2)$, es decir que el ruido es **independiente** y **Gaussiano** con media cero y **varianza** $\sigma_\varepsilon^2$ **conocida**
Entonces la distribución de $x_i$ es
$$
f(x_i|\hat x) = \mathcal{N}(\hat x, \sigma_\varepsilon^2)
$$
Para encontrar $\hat x$, primero escribimos el logaritmo de la **verosimilitud**
$$
\begin{align}
\log \mathcal{L}(\hat x) &= \sum_{i=1}^N \log f(x_i|\hat x) \nonumber \\
&= \sum_{i=1}^N \log \frac{1}{\sqrt{2\pi\sigma_\varepsilon^2}} \exp \left ( - \frac{1}{2\sigma_\varepsilon^2} (x_i - \hat x)^2 \right) \nonumber \\
&= -\frac{N}{2}\log(2\pi\sigma_\varepsilon^2) - \frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N (x_i - \hat x)^2 \nonumber
\end{align}
$$
Luego debemos resolver
$$
\begin{align}
\hat \theta &= \text{arg} \max_\theta \log \mathcal{L}(\theta) \nonumber \\
&= \text{arg} \max_\theta - \frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N (x_i - \hat x)^2
\end{align}
$$
donde podemos ignorar el primer término de la verosimilitud ya que no depende de $\theta$. Para encontrar el máximo derivamos la expresión anterior e igualamos a cero
$$
-\frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N 2(x_i - \hat x ) = 0.
$$
Finalmente si despejamos llegamos a que
$$
\hat x = \frac{1}{N} \sum_{i=1}^N x_i,
$$
que se conoce como el estimador de máxima verosimilitud **para la media de una Gaussiana**
Recordemos que podemos comprobar que es un máximo utilizando la segunda derivada
### Estimación MLE con `scipy`
Como vimos en la lección anterior el módulo [`scipy.stats`](https://docs.scipy.org/doc/scipy/reference/stats.html) provee de un gran número de distribuciones teóricas organizadas como
- continuas de una variable
- discretas de una variable
- multivariadas
Las distribuciones comparten muchos de sus métodos, a continuación revisaremos los más importantes. A modo de ejemplo consideremos la distribución Gaussiana (Normal)
```python
from scipy.stats import norm
dist = norm() # Esto crea una Gaussiana con media 0 y desviación estándar (std) 1
dist = norm(loc=2, scale=2) # Esto crea una Gaussiana con media 2 y std 2
```
**Crear una muestra aleatoria con `rvs`**
Luego de crear un objeto distribución podemos obtener una muestra aleatoria usando el método el atributo `rvs`
```python
dist = norm(loc=2, scale=2)
dist.rvs(size=10, # Cantidad de números aleatorios generados
random_state=None #Semilla aleatoria
)
```
Esto retorna un arreglo de 10 números generados aleatoriamente a partir de `dist`
**Evaluar la función de densidad de probabilidad**
La función de densidad de la Gaussiana es
$$
f(x; \mu, \sigma^2) = \frac{1}{\sqrt{2\pi \sigma^2}} \exp \left( -\frac{1}{2\sigma^2} (x-\mu)^2 \right)
$$
La densidad de un objeto distribución continuo puede obtenerse con el método `pdf` el cual es función de `x`
```python
dist = norm(loc=2, scale=2)
p = dist.pdf(x # Un ndrray que representa x en la ecuación superior
)
plt.plot(x, p) # Luego podemos graficar la fdp
```
De forma equivalente, si deseamos la función de densidad acumulada usamos el método `cdf`
Para objetos distribución discretos debemos usar el atributo `pmf`
**Ajustar los parámetros con MLE**
Para hacer el ajuste se usa el método `fit`
```python
params = norm.fit(data # Un ndarray con los datos
)
```
En el caso de la Gaussiana el vector `params` tiene dos componentes `loc` y `scale`. La cantidad de parámetros depende de la distribución que estemos ajustando. También es importante notar que para ajustar se usa `norm` (clase abstracta) y no `norm()` (instancia)
Una vez que tenemos los parámetros ajustados podemos usarlos con
```python
dist = norm(loc=params[0], scale=params[1])
```
Para distribuciones que tienen más de dos parámetros podemos usar
```python
dist = norm(*params[:-2], loc=params[-2], scale=params[-1])
```
### Ejercicio
Observe la siguiente distribución y reflexione ¿Qué características resaltan de la misma? ¿Qué distribución sería apropiado ajustar en este caso?
```python
df = pd.read_csv('../data/cancer.csv', index_col=0)
df = df[["diagnosis", "radius1", "texture1"]]
x = df["radius1"].values
fig, ax = plt.subplots(figsize=(5, 3), tight_layout=True)
ax.hist(x, bins=20, density=True)
ax.set_xlabel('Radio del nucleo');
```
- Seleccione una distribución de `scipy.stats` ajustela a los datos
- Grafique la pdf teórica sobre el histograma
```python
```
## Verificación de modelos: Tests de bondad de ajuste
Una vez que hemos ajustado un modelo es buena práctica verificar que tan confiable es este ajuste. Las herramientas más típicas para medir que tan bien se ajusta nuestra distribución teórica son
- el [test de Akaike](https://en.wikipedia.org/wiki/Akaike_information_criterion)
- los [gráficos cuantil-cuantil](https://es.wikipedia.org/wiki/Gr%C3%A1fico_Q-Q) (QQ plot)
- el test no-paramétrico de Kolmogorov-Smirnov (KS)
A continuación revisaremos el test de KS para bondad de ajuste
**El test de Kolmogorov-Smirnov**
Es un test no-paramétrico que compara una muestra de datos estandarizados (distribución empírica) con una distribución de densidad acumulada (CDF) teórica. Este test busca refutar la siguiente hipótesis
> **Hipótesis nula:** Las distribuciones son idénticas
Para aplicar el test primero debemos **estandarizar** los datos. Estandarizar se refiere a la transformación
$$
z = \frac{x - \mu_x}{\sigma_x}
$$
es decir los datos estándarizados tienen media cero y desviación estándar uno
Esto puede hacerse fácilmente con NumPy usando
```python
z = (x - np.mean(x))/np.std(x)
```
### Test de KS con `scipy`
Podemos realizar el test de KS con la función [`scipy.stats.kstest`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html) donde
```python
scipy.stats.kstest(rvs, # Una muestra de observaciones estandarizadas
cdf, # Una distribución acumulada teórica, por ejemplo scipy.stats.norm.cdf
...
)
```
Esta función retorna el valor del estadístico de KS y su *p-value* asociado. Mientras más cerca de cero sea el estadístico de KS mejor es el ajuste.
Más adelante haremos un repaso de tests de hipótesis en detalle. De momento recordemos que si el *p-value* es menor que una confianza $\alpha=0.05$ entonces rechazamos la hipótesis nula con confianza $1-\alpha = 0.95$ o $95\%$
### Ejercicio
Considere la muestra de datos anterior
- Seleccione un conjunto de distribuciones teóricas
- Encuentra la que tiene mejor ajuste usando `kstest`
```python
```
## Responder preguntas con nuestro modelo: Test de hipótesis
Se aplica un tratamiento nuevo a una muestra de la población
- ¿Es el tratamiento efectivo?
- ¿Existe una diferencia entre los que tomaron el tratamiento y los que no?
El test de hipótesis es un procedimiento estadístico para comprobar si el resultado de un experimento es significativo en la población
Para esto formulamos dos escenarios cada uno con una hipótesis asociada
- Hipótesis nula ($H_0$): Por ejemplo
- "El experimento no produjo diferencia"
- "El experimento no tuvo efecto"
- "Las observaciones son producto del azar"
- Hipótesis alternativa ($H_A$): Usualmente el complemento de $H_0$
> El test de hipótesis se diseña para medir que tan fuerte es la evidencia **en contra** de la hipótesis nula
### Algoritmo general de un test de hipótesis
El siguiente es el algoritmo general de un test de hipótesis paramétrico
1. Definimos $H_0$ y $H_A$
1. Definimos un estadístico $T$
1. Asumimos una distribución para $T$ dado que $H_0$ es cierto
1. Seleccionamos un nivel de significancia $\alpha$
1. Calculamos el $T$ para nuestros datos $T_{data}$
1. Calculamos el **p-value**
- Si nuestro test es de una cola:
- Superior: $p = P(T>T_{data})$
- Inferior: $p = P(T<T_{data})$
- Si nuestro test es dos colas: $p = P(T>T_{data}) + P(T<T_{data})$
Finalmente:
`Si` $p < \alpha$
> Rechazamos la hipótesis nula con confianza (1-$\alpha$)
`De lo contrario`
> No hay suficiente evidencia para rechazar la hipótesis nula
El valor de $\alpha$ nos permite controlar el **[Error tipo I](https://es.wikipedia.org/wiki/Errores_de_tipo_I_y_de_tipo_II)**, es decir el error que cometemos si rechazamos $H_0$ cuando en realidad era cierta (falso positivo)
Tipicamente se usa $\alpha=0.05$ o $\alpha=0.01$
**Errores de interpretación comunes**
Muchas veces se asume que el p-value es la probabilidad de que $H_0$ sea cierta dado nuestras observaciones
$$
p = P(H_0 | T> T_{data})
$$
Esto es un **grave error**. Formálmente el **p-value** es la probabilidad de observar un valor de $T$ más extremo que el observado, es decir
$$
p = P(T> T_{data} | H_0)
$$
Otro error común es creer que no ser capaz de rechazar $H_0$ es lo mismo que aceptar $H_0$
No tener suficiente evidencia para rechazar no es lo mismo que aceptar
### Un primer test de hipótesis: El t-test de una muestra
Sea un conjunto de $N$ observaciones iid $X = {x_1, x_2, \ldots, x_N}$ con media muestral $\bar x = \sum_{i=1}^N x_i$
El t-test de una muestra es un test de hipótesis que busca verificar si $\bar x$ es significativamente distinta de la **media poblacional** $\mu$, en el caso de que **no conocemos la varianza poblacional** $\sigma^2$
Las hipótesis son
- $H_0:$ $\bar x = \mu$
- $H_A:$ $\bar x \neq \mu$ (dos colas)
El estadístico de prueba es
$$
t = \frac{\bar x - \mu}{\hat \sigma /\sqrt{N-1}}
$$
donde $\hat \sigma = \sqrt{ \frac{1}{N} \sum_{i=1}^N (x_i - \bar x)^2}$ es la desviación estándar muestral (sesgada)
Si asumimos que $\bar x$ se distribuye $\mathcal{N}(\mu, \frac{\sigma^2}{N})$ entonces
$t$ se distribuye [t-student](https://en.wikipedia.org/wiki/Student%27s_t-distribution) con $N-1$ grados de libertad
- Para muestras iid y $N$ grande el supuesto se cumple por teorema central del límite
- Si $N$ es pequeño debemos verificar la normalidad de los datos
### Aplicación de t-test para probar que la regresión es significativa
En un modelo de regresión lineal donde tenemos $N$ ejemplos
$$
y_i = x_i \theta_1 + \theta_0, ~ i=1, 2, \ldots, N
$$
Podemos probar que la correlación entre $x$ es $y$ es significativa con un test sobre $\theta_1$
Por ejemplo podemos plantear las siguientes hipótesis
- $H_0:$ La pendiente es nula $\theta_1= 0$
- $H_A:$ La pendiente no es nula: $\theta_1\neq 0$ (dos colas)
Y asumiremos que $\theta_1$ es normal pero que desconocemos su varianza. Bajo este supuesto se puede formular el siguiente estadístico de prueba
$$
t = \frac{(\theta_1-\theta^*) }{\text{SE}_{\theta_1}/\sqrt{N-2}} = \frac{ r\sqrt{N-2}}{\sqrt{1-r^2}},
$$
donde $r$ es el coeficiente de correlación de Pearson (detalles más adelante) y la última expresión se obtiene reemplazando $\theta^*=0$ y $\text{SE}_{\theta_1} = \sqrt{ \frac{\frac{1}{N} \sum_i (y_i - \hat y_i)^2}{\text{Var}(x)}}$.
El estadístico tiene distribución t-student con dos grados de libertad (modelo de dos parámetros)
## Ejercicio formativo: Regresión lineal
En lecciones anteriores estudiamos el modelo de regresión lineal el cual nos permite estudiar si existe correlación entre variables continuas. También vimos como ajustar los parámetros del modelo usando el método de mínimos cuadrados. En este ejercicio formativo veremos como verificar si el modelo de regresión ajustado es correcto
Luego de revisar este ejercicio usted habrá aprendido
- La interpretación probabilística de la regresión lineal y la relación entre mínimos cuadrados ordinarios y la estimación por máxima verosimilitud
- El estadístico $r$ para medir la fuerza de la correlación entre dos variables
- Un test de hipótesis para verificar que la correlación encontrada es estadística significativa
Usaremos el siguiente dataset de consumo de helados. Referencia: [A handbook of small datasets](https://www.routledge.com/A-Handbook-of-Small-Data-Sets/Hand-Daly-McConway-Lunn-Ostrowski/p/book/9780367449667), estudio realizado en los años 50
```python
df = pd.read_csv('../data/helados.csv', header=0, index_col=0)
df.columns = ['consumo', 'ingreso', 'precio', 'temperatura']
display(df.head())
```
El dataset tiene la temperatura promedio del día (grados Fahrenheit), el precio promedio de los helados comprados (dolares), el ingreso promedio familiar semanal de las personas que compraron helado (dolares) y el consumo ([pintas](https://en.wikipedia.org/wiki/Pint) per capita).
A continuación se muestra un gráfico de dispersión del consumo en función de las demás variables. ¿Cree usted que existe correlación en este caso?
```python
fig, ax = plt.subplots(1, 3, figsize=(8, 3), tight_layout=True, sharey=True)
for i, col in enumerate(df.columns[1:]):
ax[i].scatter(df[col], df["consumo"], s=10)
ax[i].set_xlabel(col)
ax[0].set_ylabel(df.columns[0]);
```
### Interpretación probabilística y MLE de la regresión lineal
Sea $y$ el consumo y $x$ la temperatura.
Asumiremos errores gaussianos iid
$$
y_i = \hat y_i + \epsilon_i, \epsilon_i \sim \mathcal{N}(0, \sigma^2),
$$
y un modelo lineal de **dos parámetros** (linea recta)
$$
\hat y_i = \theta_0 + \theta_1 x_i
$$
Bajo estos supuestos el estimador de máxima verosimilitud es
$$
\begin{align}
\hat \theta &= \text{arg}\max_\theta \log \mathcal{L}(\theta) \nonumber \\
&=\text{arg}\max_\theta - \frac{1}{2\sigma^2} \sum_{i=1}^N (y_i - \theta_0 - \theta_1 x_i)^2 \nonumber
\end{align}
$$
Es decir que el estimador de máxima verosimilitud es equivalente al de mínimos cuadrados ordanrios $\hat \theta= (X^T X)^{-1} X^T y$ que vimos anteriormente
**Importante:** Cuando utilizamos la solución de mínimos cuadrados estamos asumiendo implicitamente que las observaciones son iid y que la verosimilitud es Gaussiana
Derivando con respecto a los parámetros e igualado a cero tenemos que
$$
\begin{align}
\sum_i y_i - N\theta_0 - \theta_1 \sum_i x_i &= 0 \nonumber \\
\sum_i y_i x_i - \theta_0 \sum_i x_i - \theta_1 \sum_i x_i^2 &= 0 \nonumber
\end{align}
$$
Finalmente podemos despejar
$$
\begin{align}
\theta_0 &= \bar y - \theta_1 \bar x \nonumber \\
\theta_1 &= \frac{\sum_i x_i y_i - N \bar x \bar y}{\sum_i x_i^2 - M \bar x^2} \nonumber \\
&= \frac{ \sum_i (y_i - \bar y)(x_i - \bar x)}{\sum_i (x_i - \bar x)^2} = \frac{\text{COV}(x, y)}{\text{Var}(x)}
\end{align}
$$
de donde reconocemos las expresiones para la covarianza entre $x$ e $y$ y la varianza de $x$
### Coeficiente de correlación de Pearson
La fuerza de la correlación se suele medir usando
$$
r^2 = 1 - \frac{\sum_i ( y_i - \hat y_i)^2}{\sum_i ( y_i - \bar y)^2} = 1 - \frac{\frac{1}{M} \sum_i (y_i - \hat y_i)^2}{\text{Var}(y)} = \frac{\text{COV}^2(x, y)}{\text{Var}(x) \text{Var}(y)}
$$
donde $r = \frac{\text{COV}(x, y)}{\sqrt{\text{Var}(x) \text{Var}(y)}} \in [-1, 1]$ se conoce como [coeficiente de correlación de Pearson](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient)
donde
- si $r=1$ existe una correlación lineal perfecta
- si $r=-1$ existe una anticorrelación lineal perfecta
- si $r=0$ no hay correlación lineal entre las variables
En general un $r>0.5$ se considera una correlación importante
**Calculando $r$ con y los parámetros de la regresión lineal**
Podemos usar el atributo de dataframe
```python
df.corr()
```
Que retorna la matriz de correlaciones lineales
```python
df.corr()
```
Si queremos también el valor de los parámetros podemos usar la función de scipy
```python
scipy.stats.linregress(x, # Variable independiente unidimensional
y # Variable dependiente unidimensional
)
```
Esta función retorna una tupla con
- Valor de la pendiente: $\theta_1$
- Valor de la intercepta: $\theta_0$
- Coeficiente de correlación $r$
- p-value
- Error estándar del ajuste
```python
fig, ax = plt.subplots(1, 3, figsize=(8, 3), tight_layout=True, sharey=True)
ax[0].set_ylabel(df.columns[0]);
for i, col in enumerate(df.columns[1:]):
res = scipy.stats.linregress(df[col], df["consumo"])
x_plot = np.linspace(np.amin(df[col]), np. amax(df[col]), num=100)
ax[i].scatter(df[col], df["consumo"], label='datos', s=10)
ax[i].plot(x_plot, res.slope*x_plot + res.intercept, lw=2, c='r', label='modelo');
ax[i].set_xlabel(col)
ax[i].set_title(f"$r$: {res.rvalue:0.5f}")
ax[i].legend()
```
Es decir que visualmente parece existir
- una correlación positiva alta entre consumo y temperatura
- una correlación negativa moderada entre consumo y precio
- una correlación cercana a cero entre consumo e ingreso
### Test de hipótesis y conclusiones
La función `linregress` implementa el t-test sobre $\theta_1$ que vimos anteriormente. Usemos estos resultados para verificar si las correlaciones son estadísticamente significativas
```python
alpha = 0.05
for i, col in enumerate(df.columns[1:]):
res = scipy.stats.linregress(df[col], df["consumo"])
print(f"{col}: \t p-value:{res.pvalue:0.4f} \t ¿Menor que {alpha}?: {res.pvalue < alpha}")
```
Como complemento visualizemos
- las distribuciones bajo la hipótesis nula: linea azul
- los límites dados por $\alpha$: linea punteada negra
- El valor del observado para cada una de las variables: linea roja
```python
fig, ax = plt.subplots(1, 3, figsize=(8, 2), tight_layout=True, sharey=True)
ax[0].set_ylabel(df.columns[0]);
N = df.shape[0]
t = np.linspace(-7, 7, num=1000)
dist = scipy.stats.t(loc=0, scale=1, df=N-2) # dos grados de libertad
for i, col in enumerate(df.columns[1:]):
res = scipy.stats.linregress(df[col], df["consumo"])
t_data = res.rvalue*np.sqrt(N-2)/np.sqrt(1.-res.rvalue**2)
ax[i].plot(t, dist.pdf(t))
ax[i].plot([dist.ppf(alpha/2)]*2, [0, np.amax(dist.pdf(t))], 'k--')
ax[i].plot([dist.ppf(1-alpha/2)]*2, [0, np.amax(dist.pdf(t))], 'k--')
ax[i].plot([t_data]*2, [0, np.amax(dist.pdf(t))], 'r-')
ax[i].set_xlabel(col)
```
**Conclusión**
Basado en los p-values y considerando $\alpha=0.05$
¿Qué podemos decir de las correlaciones con el consumo de helados?
> Rechazamos la hipótesis nula de que no existe correlación entre temperatura y consumo con un 95% de confianza
Para las variables ingreso y precio no existe suficiente evidencia para rechazar $H_0$
### Reflexión final
En el ejercicio anterior usamos t-test para una regresión lineal entre dos variables ¿Qué prueba puedo usar si quiero hacer regresión lineal multivariada?
> Se puede usar [ANOVA](https://pythonfordatascience.org/anova-python/)
¿Qué pasa si...
- mis datos tienen una relación que no es lineal?
- $\theta_1$ no es Gaussiano/normal?
- si el ruido no es Gaussiano?
- si el ruido es Gaussiano pero su varianza cambia en el tiempo?
> En estos casos no se cumplen los supuestos del modelo o del test, por ende el resultado no es confiable
Si mis supuestos no se cumplen con ninguna prueba parámetrica, la opión es utilizar pruebas no-paramétricas
## Prueba no-paramétrica: *Bootstrap*
Podemos estimar la incerteza de un estimador de forma no-paramétrica usando **muestreo tipo *bootstrap***
Esto consiste en tomar nuestro conjunto de datos de tamaño $N$ y crear $T$ nuevos conjuntos que "se le parezcan". Luego se calcula el valor del estimador que estamos buscando en los $T$ conjuntos. Con esto obtenemos una distribución para el estimador como muestra el siguiente diagrama
Para crear los subconjuntos podríamos suponer independencia y utilizar **muestreo con reemplazo**. Esto consiste en tomar $N$ muestras al azar permitiendo repeticiones, como muestra el siguiente diagrama
Si no es posible suponer indepdencia se puede realizar bootstrap basado en residuos y bootstrap dependiente. Puedes consultar más detalles sobre [*bootstrap*](https://www.stat.cmu.edu/~cshalizi/402/lectures/08-bootstrap/lecture-08.pdf) [aquí](http://homepage.divms.uiowa.edu/~rdecook/stat3200/notes/bootstrap_4pp.pdf) y [acá](https://www.sagepub.com/sites/default/files/upm-binaries/21122_Chapter_21.pdf). A continuación nos enfocaremos en el clásico muestreo con reemplazo y como implementarlo en Python
### Implementación con Numpy y Scipy
La función `numpy.random.choice` permite remuestrear un conjunto de datos
Por ejemplo para la regresión lineal debemos remuestrar las parejas/tuplas $(x_i, y_i)$
Luego calculamos y guardamos los parámetros del modelo para cada remuestreo. En este ejemplo haremos $1000$ repeticiones del conjunto de datos
```python
df = pd.read_csv('../data/helados.csv', header=0, index_col=0)
df.columns = ['consumo', 'ingreso', 'precio', 'temperatura']
x, y = df["temperatura"].values, df["consumo"].values
params = scipy.stats.linregress(x, y)
def muestreo_con_reemplazo(x, y):
N = len(x)
idx = np.random.choice(N, size=N, replace=True)
return x[idx], y[idx]
def boostrap_linregress(x, y, T=100):
# Parámetros: t0, t1 y r
params = np.zeros(shape=(T, 3))
for t in range(T):
res = scipy.stats.linregress(*muestreo_con_reemplazo(x, y))
params[t, :] = [res.intercept, res.slope, res.rvalue]
return params
boostrap_params = boostrap_linregress(x, y, T=1000)
```
### Intervalos de confianza empíricos
Veamos la distribución empírica de $r$ obtenida usando bootstrap
En la figura de abajo tenemos
- Histograma azul: Distribución bootstrap de $r$
- Linea roja: $r$ de los datos
- Lineas punteadas negras: Intervalo de confianza empírico al 95%
```python
r_bootstrap = boostrap_params[:, 2]
fig, ax = plt.subplots(figsize=(4, 3), tight_layout=True)
hist_val, hist_lim, _ = ax.hist(r_bootstrap, bins=20, density=True)
ax.plot([params.rvalue]*2, [0, np.max(hist_val)], 'r-', lw=2)
IC = np.percentile(r_bootstrap, [2.5, 97.5])
ax.plot([IC[0]]*2, [0, np.max(hist_val)], 'k--', lw=2)
ax.plot([IC[1]]*2, [0, np.max(hist_val)], 'k--', lw=2)
print(f"Intervalo de confianza al 95% de r: {IC}")
```
De la figura podemos notar que el 95% de la distribución empírica esta sobre $r=0.5$
También podemos notar que la distribución empírica de $r$ no es simétrica, por lo que aplicar un t-test parámetrico sobre $r$ no hubiera sido correcto
### Visualizando la incerteza del modelo
Usando la distribución empírica de los parámetros $\theta_0$ y $\theta_1$ podemos visualizar la incerteza de nuestro modelo de regresión lineal
En la figura de abajo tenemos
- Puntos azules: Datos
- Linea roja: Modelo de regresión lineal en los datos
- Sombra rojo claro: $\pm 2$ desviaciones estándar del modelo en base a la distribución empírica
```python
fig, ax = plt.subplots(figsize=(4, 3), tight_layout=True)
ax.set_ylabel('Consumo')
ax.set_xlabel('Temperatura')
ax.scatter(x, y, zorder=100, s=10, label='datos')
def model(theta0, theta1, x):
return x*theta1 + theta0
ax.plot(x_plot, model(params.intercept, params.slope, x_plot),
c='r', lw=2, label='mejor ajuste')
dist_lines = model(boostrap_params[:, 0], boostrap_params[:, 1], x_plot.reshape(-1, 1)).T
mean_lines, std_lines = np.mean(dist_lines, axis=0), np.std(dist_lines, axis=0)
ax.fill_between(x_plot,
mean_lines - 2*std_lines,
mean_lines + 2*std_lines,
color='r', alpha=0.25, label='incerteza')
plt.legend();
```
```python
```
|
(** * Projeto de LC1 - 2022-1 (30 pontos) *)
(**
Projeto De Lógica Computacional 1. 2022/1
Integrantes:
1] Andrey Calaca Resende 180062433
2]Gustavo Lopes Dezan 202033463
3]Felipe Dantas Borges 202021749
4]Eduardo Ferreira Marques Cavalcante 202006368
Descrição: Projeto que formaliza a equivalência entre as diferentes noções de permutação denominadas perm e equiv
utilizado o coq.
*)
Require Import PeanoNat List.
Open Scope nat_scope.
Require Import List Arith.
Require Import Permutation.
Open Scope nat_scope.
(** perm_hd = perm_skip acredito *)
(** perm_eq = perm_eq *)
(* Noção de permutação utilziado no trabalho, Permutation não tem perm_eq *)
Inductive perm : list nat -> list nat -> Prop :=
| perm_eq: forall l1, perm l1 l1
| perm_swap: forall x y l1, perm (x :: y :: l1) (y :: x :: l1)
| perm_hd: forall x l1 l2, perm l1 l2 -> perm (x :: l1) (x :: l2)
| perm_trans: forall l1 l2 l3, perm l1 l2 -> perm l2 l3 -> perm l1 l3.
(** (2 pontos) *)
(** Mostrar que o Permutation do coq library possui equivalência com perm deste trabalho *)
Lemma perm_equiv_Permutation: forall l1 l2, perm l1 l2 <-> Permutation l1 l2.
Proof.
split.
- intro H. induction H.
-- apply Permutation_refl.
-- apply Permutation.perm_swap.
-- apply Permutation.perm_skip.
--- apply IHperm.
-- apply Permutation.perm_trans with (l2).
--- apply IHperm1.
--- apply IHperm2.
- intro H. induction H.
-- apply perm_eq.
-- apply perm_hd.
--- apply IHPermutation.
-- apply perm_swap.
-- apply perm_trans with (l').
--- apply IHPermutation1.
--- apply IHPermutation2.
Qed.
(* Fixpoint para definir número de ocorrencias de algum elemento em uma lista *)
Fixpoint num_oc (x: nat) (l: list nat): nat :=
match l with
| nil => 0
| h::tl => if (x =? h) then S (num_oc x tl) else num_oc x tl
end.
(* Definição de equiv *)
Definition equiv l l' := forall n:nat, num_oc n l = num_oc n l'.
Lemma perm_app_cons: forall l1 l2 a, perm (a :: l1 ++ l2) (l1 ++ a :: l2).
Proof.
induction l1.
- intros l2 a.
simpl.
apply perm_eq.
- intros l2 a2. (* porque o coq usa o 'a' como o numero natural?*)
simpl.
apply perm_trans with (a :: a2 :: l1 ++ l2).
+ apply perm_swap.
+ apply perm_hd.
apply IHl1.
Qed.
Lemma num_oc_S: forall x l1 l2, num_oc x (l1 ++ x :: l2) = S (num_oc x (l1 ++ l2)).
Proof.
induction l1.
- intro l2.
simpl.
rewrite Nat.eqb_refl; reflexivity.
- intro l2.
simpl.
destruct (x =? a); rewrite IHl1; reflexivity.
Qed.
Lemma num_occ_cons: forall l x n, num_oc x l = S n -> exists l1 l2, l = l1 ++ x :: l2 /\ num_oc x (l1 ++ l2) = n.
Proof.
induction l.
-intros.
simpl in H.
inversion H.
-intros.
simpl in H.
destruct (x =? a) eqn: H1.
+specialize (IHl x n).
apply Nat.eqb_eq in H1.
rewrite H1.
exists nil.
exists l.
simpl.
rewrite H1 in H.
apply eq_add_S in H.
split.
*reflexivity.
*assumption.
+apply IHl in H.
destruct H.
destruct H.
destruct H.
rewrite H.
exists (a :: x0).
exists x1.
split.
*reflexivity.
*simpl.
rewrite H1.
assumption.
Qed.
Lemma num_oc_neq: forall n a l1 l2, n =? a = false -> num_oc n (l1 ++ a :: l2) = num_oc n (l1 ++ l2).
Proof.
induction l1.
- intros l2 H.
simpl.
rewrite H.
reflexivity.
- intros l2 Hfalse.
simpl.
destruct (n =? a0) eqn:H.
+ apply (IHl1 l2) in Hfalse.
rewrite Hfalse; reflexivity.
+ apply (IHl1 l2) in Hfalse.
assumption.
Qed.
Lemma equiv_nil: forall l, equiv nil l -> l = nil.
Proof.
intro l.
case l.
- intro H.
reflexivity.
- intros n l' H. unfold equiv in H.
specialize (H n). simpl in H.
rewrite Nat.eqb_refl in H.
inversion H.
Qed.
Lemma equiv_to_perm: forall l l', equiv l l' -> perm l l'.
Proof.
induction l.
- intros.
apply equiv_nil in H.
rewrite H.
apply perm_eq.
- intros.
assert (H' := H).
unfold equiv in H'.
specialize (H' a).
simpl in H'.
rewrite Nat.eqb_refl in H'.
symmetry in H'.
apply num_occ_cons in H'.
destruct H'.
destruct H0.
destruct H0.
assert(H2:=IHl).
specialize (IHl (x++x0)).
rewrite H0.
apply (perm_trans (a :: l) (a :: x ++ x0) (x ++ a :: x0) ).
-- apply perm_hd.
apply IHl.
rewrite H0 in H.
intro.
unfold equiv in H.
specialize (H n).
inversion H.
destruct (n =? a) eqn: eq.
--- apply Nat.eqb_eq in eq.
rewrite eq in H4.
rewrite (num_oc_S a x x0) in H4 .
inversion H4.
rewrite eq.
auto.
--- replace (num_oc n (x ++ a :: x0)) with (num_oc n (x ++ x0)) in H4.
---- auto.
---- symmetry.
apply (num_oc_neq n a x x0 ).
auto.
-- apply perm_app_cons.
Qed.
(** (18 pontos) *)
(* Prova do equiv <-> perm *)
Theorem perm_equiv: forall l l', equiv l l' <-> perm l l'.
Proof.
intros l l'.
split.
- apply equiv_to_perm. (* equiv -> perm dificil de se provar sem lemas separados *)
- intro H. induction H.
-- unfold equiv.
intro x.
reflexivity.
-- unfold equiv in *. intro n. simpl. destruct (n =? x) eqn: H.
+ destruct (n =? y) eqn: H'.
--- reflexivity.
--- reflexivity.
+ destruct (n =? y) eqn: H'.
--- reflexivity.
--- reflexivity.
-- unfold equiv in *.
intro n.
destruct (n=?x) eqn:H'.
--- simpl. rewrite H'. rewrite IHperm. reflexivity.
--- simpl. rewrite H'. rewrite IHperm. reflexivity.
-- unfold equiv in *.
intro n.
specialize (IHperm1 n).
rewrite IHperm1.
apply IHperm2.
Qed.
|
-- |
-- Module : BenchShow.Report
-- Copyright : (c) 2018 Composewell Technologies
--
-- License : BSD3
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : GHC
--
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TupleSections #-}
module BenchShow.Report
(
report
) where
import Control.Applicative (ZipList(..))
import Control.Monad (forM_)
import Data.Maybe (fromMaybe)
import Statistics.Types (Estimate(..))
import Text.PrettyPrint.ANSI.Leijen hiding ((<$>))
import Text.Printf (printf)
import BenchShow.Common
import BenchShow.Analysis
multiplesToPercentDiff :: Double -> Double
multiplesToPercentDiff x = (if x > 0 then x - 1 else x + 1) * 100
colorCode :: Word -> Double -> Doc -> Doc
colorCode thresh x =
if x > fromIntegral thresh
then dullred
else if x < (-1) * fromIntegral thresh
then dullgreen
else id
-- XXX in comparative reports render lower than baseline in green and higher
-- than baseline in red
genGroupReport :: RawReport -> Config -> IO ()
genGroupReport RawReport{..} cfg@Config{..} = do
let diffStr =
if length reportColumns > 1
then diffString presentation diffStrategy
else Nothing
case mkTitle of
Just _ -> putStrLn $ maybe "" (\f -> f reportIdentifier) mkTitle
Nothing -> putStrLn $ makeTitle reportIdentifier diffStr cfg
let benchcol = "Benchmark" : reportRowIds
groupcols =
let firstCol : tailCols = reportColumns
colorCol ReportColumn{..} =
let f x = case presentation of
Groups Diff ->
if x > 0 then dullred else dullgreen
Groups PercentDiff -> colorCode threshold x
Groups Multiples ->
let y = multiplesToPercentDiff x
in colorCode threshold y
_ -> id
in map f colValues
renderTailCols estimators col analyzed =
let regular = renderGroupCol $ showCol col Nothing analyzed
colored = zipWith ($) (id : id : colorCol col)
$ renderGroupCol
$ showCol col estimators analyzed
in case presentation of
Groups Diff -> colored
Groups PercentDiff -> colored
Groups Multiples -> colored
_ -> regular
in renderGroupCol (showFirstCol firstCol)
: case reportEstimators of
Just ests -> getZipList $
renderTailCols
<$> ZipList (map Just $ tail ests)
<*> ZipList tailCols
<*> ZipList (tail reportAnalyzed)
Nothing -> getZipList $
renderTailCols
<$> pure Nothing
<*> ZipList tailCols
<*> ZipList (tail reportAnalyzed)
rows = foldl (zipWith (<+>)) (renderCol benchcol) groupcols
putDoc $ vcat rows
putStrLn "\n"
where
renderCol [] = error "Bug: header row missing"
renderCol col@(h : rows) =
let maxlen = maximum (map length col)
in map (fill maxlen . text) (h : replicate maxlen '-' : rows)
renderGroupCol [] = error
"Bug: There has to be at least one column in raw report"
renderGroupCol col@(h : rows) =
let maxlen = maximum (map length col)
in map (\x -> indent (maxlen - length x) $ text x)
(h : replicate maxlen '-' : rows)
showEstimator est =
case est of
Mean -> "(mean)"
Median -> "(medi)"
Regression -> "(regr)"
showEstVal estvals est =
case est of
Mean ->
let sd = analyzedStdDev estvals
val = analyzedMean estvals
in
if val /= 0
then printf "(%.2f)" $ sd / abs val
else ""
Median ->
let x = ovFraction $ analyzedOutlierVar estvals
in printf "(%.2f)" x
Regression ->
case analyzedRegRSq estvals of
Just rsq -> printf "(%.2f)" (estPoint rsq)
Nothing -> ""
showFirstCol ReportColumn{..} =
let showVal = printf "%.2f"
withEstimator val estvals =
showVal val ++
if verbose
then showEstVal estvals estimator
else ""
withEstVal =
zipWith withEstimator colValues (head reportAnalyzed)
in colName : withEstVal
showCol ReportColumn{..} estimators analyzed = colName :
let showVal val =
let showDiff =
if val > 0
then printf "+%.2f" val
else printf "%.2f" val
in case presentation of
Groups Diff -> showDiff
Groups PercentDiff -> showDiff
Groups Multiples ->
if val > 0
then printf "%.2f" val
else printf "1/%.2f" (negate val)
_ -> printf "%.2f" val
showEstAnnot est =
case presentation of
Groups Diff -> showEstimator est
Groups PercentDiff -> showEstimator est
Groups Multiples -> showEstimator est
_ -> ""
in case estimators of
Just ests ->
let withAnnot val estvals est =
showVal val
++ if verbose
then showEstVal estvals est
++ showEstAnnot est
else ""
in getZipList $
withAnnot
<$> ZipList colValues
<*> ZipList analyzed
<*> ZipList ests
Nothing ->
let withEstVal val estvals est =
showVal val
++ if verbose then showEstVal estvals est else ""
in getZipList $
withEstVal
<$> ZipList colValues
<*> ZipList analyzed
<*> pure estimator
-- | Presents the benchmark results in a CSV input file as text reports
-- according to the provided configuration. The first parameter is the input
-- file name. The second parameter, when specified using 'Just', is the name
-- prefix for the output SVG image file(s). One or more output files may be
-- generated with the given prefix depending on the 'Presentation' setting.
-- When the second parameter is 'Nothing' the reports are printed on the
-- console. The last parameter is the configuration to customize the report,
-- you can start with 'defaultConfig' as the base and override any of the
-- fields that you may want to change.
--
-- For example:
--
-- @
-- report "bench-results.csv" Nothing 'defaultConfig'
-- @
--
-- @since 0.2.0
report :: FilePath -> Maybe FilePath -> Config -> IO ()
report inputFile outputFile cfg@Config{..} = do
let dir = fromMaybe "." outputDir
(csvlines, fields) <- prepareToReport inputFile cfg
(runs, matrices) <- prepareGroupMatrices cfg inputFile csvlines fields
case presentation of
Groups style ->
forM_ fields $
reportComparingGroups style dir outputFile TextReport runs
cfg genGroupReport matrices
Fields -> do
forM_ matrices $
reportPerGroup dir outputFile TextReport cfg genGroupReport
Solo ->
let funcs = map
(\mx -> reportComparingGroups Absolute dir
(fmap (++ "-" ++ groupName mx) outputFile)
TextReport runs cfg genGroupReport [mx])
matrices
in sequence_ $ funcs <*> fields
|
/*** Some usefull math macros ***/
#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
static double mnarg1,mnarg2;
#define FMAX(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) > (mnarg2) ?\
(mnarg1) : (mnarg2))
static double mnarg1,mnarg2;
#define FMIN(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) < (mnarg2) ?\
(mnarg1) : (mnarg2))
#define ERFC_NPTS (int) 75
#define ERFC_PARAM_DELTA (float) 0.1
static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS];
static gsl_interp_accel *erfc_acc;
static gsl_spline *erfc_spline;
#define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration
#define NMass 300
#define NSFR_high 200
#define NSFR_low 250
#define NGL_SFR 100 // 100
#define NMTURN 50//100
#define LOG10_MTURN_MAX ((double)(10))
#define LOG10_MTURN_MIN ((double)(5.-9e-8))
#define NR_END 1
#define FREE_ARG char*
#define MM 7
#define NSTACK 50
#define EPS2 3.0e-11
#define Luv_over_SFR (double)(1./1.15/1e-28)
// Luv/SFR = 1 / 1.15 x 10^-28 [M_solar yr^-1/erg s^-1 Hz^-1]
// G. Sun and S. R. Furlanetto (2016) MNRAS, 417, 33
#define delta_lnMhalo (double)(5e-6)
#define Mhalo_min (double)(1e6)
#define Mhalo_max (double)(1e16)
float calibrated_NF_min;
double *deltaz, *deltaz_smoothed, *NeutralFractions, *z_Q, *Q_value, *nf_vals, *z_vals;
int N_NFsamples,N_extrapolated, N_analytic, N_calibrated, N_deltaz;
bool initialised_ComputeLF = false;
gsl_interp_accel *LF_spline_acc;
gsl_spline *LF_spline;
gsl_interp_accel *deriv_spline_acc;
gsl_spline *deriv_spline;
struct CosmoParams *cosmo_params_ps;
struct UserParams *user_params_ps;
struct FlagOptions *flag_options_ps;
//double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR;
double sigma_norm, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR;
float MinMass, mass_bin_width, inv_mass_bin_width;
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias);
float *Mass_InterpTable, *Sigma_InterpTable, *dSigmadm_InterpTable;
float *log10_overdense_spline_SFR, *log10_Nion_spline, *Overdense_spline_SFR, *Nion_spline;
float *prev_log10_overdense_spline_SFR, *prev_log10_Nion_spline, *prev_Overdense_spline_SFR, *prev_Nion_spline;
float *Mturns, *Mturns_MINI;
float *log10_Nion_spline_MINI, *Nion_spline_MINI;
float *prev_log10_Nion_spline_MINI, *prev_Nion_spline_MINI;
float *xi_SFR,*wi_SFR, *xi_SFR_Xray, *wi_SFR_Xray;
float *overdense_high_table, *overdense_low_table, *log10_overdense_low_table;
float **log10_SFRD_z_low_table, **SFRD_z_high_table;
float **log10_SFRD_z_low_table_MINI, **SFRD_z_high_table_MINI;
double *lnMhalo_param, *Muv_param, *Mhalo_param;
double *log10phi, *M_uv_z, *M_h_z;
double *lnMhalo_param_MINI, *Muv_param_MINI, *Mhalo_param_MINI;
double *log10phi_MINI; *M_uv_z_MINI, *M_h_z_MINI;
double *deriv, *lnM_temp, *deriv_temp;
double *z_val, *z_X_val, *Nion_z_val, *SFRD_val;
double *Nion_z_val_MINI, *SFRD_val_MINI;
void initialiseSigmaMInterpTable(float M_Min, float M_Max);
void freeSigmaMInterpTable();
void initialiseGL_Nion(int n, float M_Min, float M_Max);
void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max);
float Mass_limit (float logM, float PL, float FRAC);
void bisection(float *x, float xlow, float xup, int *iter);
float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC);
double sheth_delc(double del, double sig);
float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2);
double dNion_ConditionallnM(double lnM, void *params);
double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES);
double dNion_ConditionallnM_MINI(double lnM, void *params);
double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES);
float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES);
float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES);
//JBM: Exact integral for power-law indices non zero (for zero it's erfc)
double Fcollapprox (double numin, double beta);
int n_redshifts_1DTable;
double zmin_1DTable, zmax_1DTable, zbin_width_1DTable;
double *FgtrM_1DTable_linear;
static gsl_interp_accel *Q_at_z_spline_acc;
static gsl_spline *Q_at_z_spline;
static gsl_interp_accel *z_at_Q_spline_acc;
static gsl_spline *z_at_Q_spline;
static double Zmin, Zmax, Qmin, Qmax;
void Q_at_z(double z, double *splined_value);
void z_at_Q(double Q, double *splined_value);
static gsl_interp_accel *deltaz_spline_for_photoncons_acc;
static gsl_spline *deltaz_spline_for_photoncons;
static gsl_interp_accel *NFHistory_spline_acc;
static gsl_spline *NFHistory_spline;
static gsl_interp_accel *z_NFHistory_spline_acc;
static gsl_spline *z_NFHistory_spline;
void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline);
void z_at_NFHist(double xHI_Hist, double *splined_value);
void NFHist_at_z(double z, double *splined_value);
//int nbin;
//double *z_Q, *Q_value, *Q_z, *z_value;
double FinalNF_Estimate, FirstNF_Estimate;
struct parameters_gsl_FgtrM_int_{
double z_obs;
double gf_obs;
};
struct parameters_gsl_SFR_General_int_{
double z_obs;
double gf_obs;
double Mdrop;
double Mdrop_upper;
double pl_star;
double pl_esc;
double frac_star;
double frac_esc;
double LimitMass_Fstar;
double LimitMass_Fesc;
};
struct parameters_gsl_SFR_con_int_{
double gf_obs;
double Mval;
double sigma2;
double delta1;
double delta2;
double Mdrop;
double Mdrop_upper;
double pl_star;
double pl_esc;
double frac_star;
double frac_esc;
double LimitMass_Fstar;
double LimitMass_Fesc;
};
unsigned long *lvector(long nl, long nh);
void free_lvector(unsigned long *v, long nl, long nh);
float *vector(long nl, long nh);
void free_vector(float *v, long nl, long nh);
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]);
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y);
void gauleg(float x1, float x2, float x[], float w[], int n);
/***** FUNCTION PROTOTYPES *****/
double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */
void free_ps(); /* deallocates the gsl structures from init_ps */
double sigma_z0(double M); //calculates sigma at z=0 (no dicke)
double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double TFmdm(double k); //Eisenstein & Hu power spectrum transfer function
void TFset_parameters();
double TF_CLASS(double k, int flag_int, int flag_dv); //transfer function of matter (flag_dv=0) and relative velocities (flag_dv=1) fluctuations from CLASS
double power_in_vcb(double k); /* Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double FgtrM(double z, double M);
double FgtrM_wsigma(double z, double sig);
double FgtrM_st(double z, double M);
double FgtrM_Watson(double growthf, double M);
double FgtrM_Watson_z(double z, double growthf, double M);
double FgtrM_General(double z, double M);
float erfcc(float x);
double splined_erfc(double x);
double M_J_WDM();
void Broadcast_struct_global_PS(struct UserParams *user_params, struct CosmoParams *cosmo_params){
cosmo_params_ps = cosmo_params;
user_params_ps = user_params;
}
/*
this function reads the z=0 matter (CDM+baryons) and relative velocity transfer functions from CLASS (from a file)
flag_int = 0 to initialize interpolator, flag_int = -1 to free memory, flag_int = else to interpolate.
flag_dv = 0 to output density, flag_dv = 1 to output velocity.
similar to built-in function "double T_RECFAST(float z, int flag)"
*/
double TF_CLASS(double k, int flag_int, int flag_dv)
{
static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH];
static gsl_interp_accel *acc_density, *acc_vcb;
static gsl_spline *spline_density, *spline_vcb;
float trash, currk, currTm, currTv;
double ans;
int i;
int gsl_status;
FILE *F;
char filename[500];
sprintf(filename,"%s/%s",global_params.external_table_path,CLASS_FILENAME);
if (flag_int == 0) { // Initialize vectors and read file
if (!(F = fopen(filename, "r"))) {
LOG_ERROR("Unable to open file: %s for reading.", filename);
Throw(IOError);
}
int nscans;
for (i = 0; i < CLASS_LENGTH; i++) {
nscans = fscanf(F, "%e %e %e ", &currk, &currTm, &currTv);
if (nscans != 3) {
LOG_ERROR("Reading CLASS Transfer Function failed.");
Throw(IOError);
}
kclass[i] = currk;
Tmclass[i] = currTm;
Tvclass_vcb[i] = currTv;
if (i > 0 && kclass[i] <= kclass[i - 1]) {
LOG_WARNING("Tk table not ordered");
LOG_WARNING("k=%.1le kprev=%.1le", kclass[i], kclass[i - 1]);
}
}
fclose(F);
LOG_SUPER_DEBUG("Read CLASS Transfer file");
gsl_set_error_handler_off();
// Set up spline table for densities
acc_density = gsl_interp_accel_alloc ();
spline_density = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_status = gsl_spline_init(spline_density, kclass, Tmclass, CLASS_LENGTH);
GSL_ERROR(gsl_status);
LOG_SUPER_DEBUG("Generated CLASS Density Spline.");
//Set up spline table for velocities
acc_vcb = gsl_interp_accel_alloc ();
spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_status = gsl_spline_init(spline_vcb, kclass, Tvclass_vcb, CLASS_LENGTH);
GSL_ERROR(gsl_status);
LOG_SUPER_DEBUG("Generated CLASS velocity Spline.");
return 0;
}
else if (flag_int == -1) {
gsl_spline_free (spline_density);
gsl_interp_accel_free(acc_density);
gsl_spline_free (spline_vcb);
gsl_interp_accel_free(acc_vcb);
return 0;
}
if (k > kclass[CLASS_LENGTH-1]) { // k>kmax
LOG_WARNING("Called TF_CLASS with k=%f, larger than kmax! Returning value at kmax.", k);
if(flag_dv == 0){ // output is density
return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]);
}
else if(flag_dv == 1){ // output is rel velocity
return (Tvclass_vcb[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]);
} //we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much.
}
else { // Do spline
if(flag_dv == 0){ // output is density
ans = gsl_spline_eval (spline_density, k, acc_density);
}
else if(flag_dv == 1){ // output is relative velocity
ans = gsl_spline_eval (spline_vcb, k, acc_vcb);
}
else{
ans=0.0; //neither densities not velocities?
}
}
return ans/k/k;
//we have to divide by k^2 to agree with the old-fashioned convention.
}
// FUNCTION sigma_z0(M)
// Returns the standard deviation of the normalized, density excess (delta(x)) field,
// smoothed on the comoving scale of M (see filter definitions for M<->R conversion).
// The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor,
// i.e. sigma(M,z) = sigma_z0(m) * dicke(z)
// normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H
// NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation
// M is in solar masses
// References: Padmanabhan, pg. 210, eq. 5.107
double dsigma_dk(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, kR;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
double Radius;
Radius = *(double *)params;
kR = k*Radius;
if ( (global_params.FILTER == 0) || (sigma_norm < 0) ){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
}
else if (global_params.FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
}
else {
LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER);
Throw(ValueError);
}
return k*k*p*w*w;
}
double sigma_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000);
double kstart, kend;
double Radius;
// R = MtoR(M);
Radius = MtoR(M);
// now lets do the integral for sigma and scale it with sigma_norm
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius, KBOT_CLASS);
kend = fmin(350.0/Radius, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius;
kend = 350.0/Radius;
}
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
F.function = &dsigma_dk;
F.params = &Radius;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: M=%e",M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return sigma_norm * sqrt(result);
}
// FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5
double TFmdm(double k){
double q, gamma_eff, q_eff, TF_m, q_nu;
q = k*pow(theta_cmb,2)/omhh;
gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4));
q_eff = q/gamma_eff;
TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff);
TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11)));
q_nu = 3.92*q/sqrt(f_nu/N_nu);
TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) /
(pow(q_nu,-1.6)+pow(q_nu,0.8));
return TF_m;
}
void TFset_parameters(){
double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality;
LOG_DEBUG("Setting Transfer Function parameters.");
z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0;
k_equality = 0.0746*omhh/(theta_cmb*theta_cmb);
z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674));
z_drag = 1 + z_drag*pow(cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle, 0.238*pow(omhh, 0.223));
z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828));
y_d = (1 + z_equality) / (1.0 + z_drag);
R_drag = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag);
R_equality = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality);
sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) *
log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) );
p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0;
p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0;
f_c = 1 - f_nu - f_baryon;
f_cb = 1 - f_nu;
f_nub = f_nu+f_baryon;
alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0);
alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3);
alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu;
alpha_nu *= pow(1+y_d, p_c-p_cb);
alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d);
beta_c = 1.0/(1.0-0.949*f_nub);
}
// Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V)
// at a given k mode linearly extrapolated to z=0
double power_in_k(double k){
double p, T, gamma, q, aa, bb, cc;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
/*
Returns the value of the linear power spectrum of the DM-b relative velocity
at kinematic decoupling (which we set at zkin=1010)
*/
double power_in_vcb(double k){
double p, T, gamma, q, aa, bb, cc;
//only works if using CLASS
if (user_params_ps->POWER_SPECTRUM == 5){ // CLASS
T = TF_CLASS(k, 1, 1); //read from CLASS file. flag_int=1 since we have initialized before, flag_vcb=1 for velocity
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else{
LOG_ERROR("Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
double init_ps(){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000);
double kstart, kend;
//we start the interpolator if using CLASS:
if (user_params_ps->POWER_SPECTRUM == 5){
LOG_DEBUG("Setting CLASS Transfer Function inits.");
TF_CLASS(1.0, 0, 0);
}
// Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc
R_CUTOFF = 0.201*pow((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15, 0.15)*pow(global_params.g_x/1.5, -0.29)*pow(global_params.M_WDM, -1.15);
omhh = cosmo_params_ps->OMm*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle;
theta_cmb = T_cmb / 2.7;
// Translate Parameters into forms GLOBALVARIABLES form
f_nu = global_params.OMn/cosmo_params_ps->OMm;
f_baryon = cosmo_params_ps->OMb/cosmo_params_ps->OMm;
if (f_nu < TINY) f_nu = 1e-10;
if (f_baryon < TINY) f_baryon = 1e-10;
TFset_parameters();
sigma_norm = -1;
double Radius_8;
Radius_8 = 8.0/cosmo_params_ps->hlittle;
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius_8, KBOT_CLASS);
kend = fmin(350.0/Radius_8, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius_8;
kend = 350.0/Radius_8;
}
lower_limit = kstart;
upper_limit = kend;
LOG_DEBUG("Initializing Power Spectrum with lower_limit=%e, upper_limit=%e, rel_tol=%e, radius_8=%g", lower_limit,upper_limit, rel_tol, Radius_8);
F.function = &dsigma_dk;
F.params = &Radius_8;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
LOG_DEBUG("Initialized Power Spectrum.");
sigma_norm = cosmo_params_ps->SIGMA_8/sqrt(result); //takes care of volume factor
return R_CUTOFF;
}
//function to free arrays related to the power spectrum
void free_ps(){
//we free the PS interpolator if using CLASS:
if (user_params_ps->POWER_SPECTRUM == 5){
TF_CLASS(1.0, -1, 0);
}
return;
}
/*
FUNCTION dsigmasqdm_z0(M)
returns d/dm (sigma^2) (see function sigma), in units of Msun^-1
*/
double dsigmasq_dm(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / (cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // JBM: CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
double Radius;
Radius = *(double *)params;
// now get the value of the window function
kR = k * Radius;
if (global_params.FILTER == 0){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
// now do d(w^2)/dm = 2 w dw/dr dr/dm
if ( (kR) < 1.0e-10 ){ dwdr = 0;}
else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*Radius);}
//3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );}
// dwdr = -1e8 * k / (R*1e3);
drdm = 1.0 / (4.0*PI * cosmo_params_ps->OMm*RHOcrit * Radius*Radius);
}
else if (global_params.FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
dwdr = - k*kR * w;
drdm = 1.0 / (pow(2*PI, 1.5) * cosmo_params_ps->OMm*RHOcrit * 3*Radius*Radius);
}
else {
LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER);
Throw(ValueError);
}
// return k*k*p*2*w*dwdr*drdm * d2fact;
return k*k*p*2*w*dwdr*drdm;
}
double dsigmasqdm_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
double kstart, kend;
double Radius;
// R = MtoR(M);
Radius = MtoR(M);
// now lets do the integral for sigma and scale it with sigma_norm
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius, KBOT_CLASS);
kend = fmin(350.0/Radius, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius;
kend = 350.0/Radius;
}
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
if (user_params_ps->POWER_SPECTRUM == 5){ // for CLASS we do not need to renormalize the sigma integral.
d2fact=1.0;
}
else {
d2fact = M*10000/sigma_z0(M);
}
F.function = &dsigmasq_dm;
F.params = &Radius;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: M=%e",M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
// return sigma_norm * sigma_norm * result /d2fact;
return sigma_norm * sigma_norm * result;
}
/* sheth correction to delta crit */
double sheth_delc(double del, double sig){
return sqrt(SHETH_a)*del*(1. + global_params.SHETH_b*pow(sig*sig/(SHETH_a*del*del), global_params.SHETH_c));
}
/*
FUNCTION dNdM_st(z, M)
Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Sheth, Mo, Torman 2001
*/
double dNdM_st(double growthf, double M){
double sigma, dsigmadm, nuhat;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
nuhat = sqrt(SHETH_a) * Deltac / sigma;
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2./PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0);
}
/*
FUNCTION dNdM_WatsonFOF(z, M)
Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
The Universial FOF function (Eq. 12) of Watson et al. 2013
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Watson et al. 2013
*/
double dNdM_WatsonFOF(double growthf, double M){
double sigma, dsigmadm, f_sigma;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
f_sigma = Watson_A * ( pow( Watson_beta/sigma, Watson_alpha) + 1. ) * exp( - Watson_gamma/(sigma*sigma) );
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma;
}
/*
FUNCTION dNdM_WatsonFOF_z(z, M)
Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
The Universial FOF function, with redshift evolution (Eq. 12 - 15) of Watson et al. 2013.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Watson et al. 2013
*/
double dNdM_WatsonFOF_z(double z, double growthf, double M){
double sigma, dsigmadm, A_z, alpha_z, beta_z, Omega_m_z, f_sigma;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
Omega_m_z = (cosmo_params_ps->OMm)*pow(1.+z,3.) / ( (cosmo_params_ps->OMl) + (cosmo_params_ps->OMm)*pow(1.+z,3.) + (global_params.OMr)*pow(1.+z,4.) );
A_z = Omega_m_z * ( Watson_A_z_1 * pow(1. + z, Watson_A_z_2 ) + Watson_A_z_3 );
alpha_z = Omega_m_z * ( Watson_alpha_z_1 * pow(1.+z, Watson_alpha_z_2 ) + Watson_alpha_z_3 );
beta_z = Omega_m_z * ( Watson_beta_z_1 * pow(1.+z, Watson_beta_z_2 ) + Watson_beta_z_3 );
f_sigma = A_z * ( pow(beta_z/sigma, alpha_z) + 1. ) * exp( - Watson_gamma_z/(sigma*sigma) );
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma;
}
/*
FUNCTION dNdM(growthf, M)
Computes the Press_schechter mass function at
redshift z (using the growth factor), and dark matter halo mass M (in solar masses).
Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Padmanabhan, pg. 214
*/
double dNdM(double growthf, double M){
double sigma, dsigmadm;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma));
}
/*
FUNCTION FgtrM(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
*/
double FgtrM(double z, double M){
double del, sig;
del = Deltac/dicke(z); //regular spherical collapse delta
sig = sigma_z0(M);
return splined_erfc(del / (sqrt(2)*sig));
}
/*
FUNCTION FgtrM_wsigma(z, sigma_z0(M))
Computes the fraction of mass contained in haloes with mass > M at redshift z.
Requires sigma_z0(M) rather than M to make certain heating integrals faster
*/
double FgtrM_wsigma(double z, double sig){
double del;
del = Deltac/dicke(z); //regular spherical collapse delta
return splined_erfc(del / (sqrt(2)*sig));
}
/*
FUNCTION FgtrM_Watson(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Watson et al (2013) correction
*/
double dFdlnM_Watson_z (double lnM, void *params){
struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
return dNdM_WatsonFOF_z(z, growthf, M) * M * M;
}
double FgtrM_Watson_z(double z, double growthf, double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_Watson_z;
struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = {
.z_obs = z,
.gf_obs = growthf,
};
F.params = ¶meters_gsl_FgtrM;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
/*
FUNCTION FgtrM_Watson(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Watson et al (2013) correction
*/
double dFdlnM_Watson (double lnM, void *params){
double growthf = *(double *)params;
double M = exp(lnM);
return dNdM_WatsonFOF(growthf, M) * M * M;
}
double FgtrM_Watson(double growthf, double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_Watson;
F.params = &growthf;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M=%e",growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
double dFdlnM_General(double lnM, void *params){
struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassFunction;
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf, M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M;
}
/*
FUNCTION FgtrM_General(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
*/
double FgtrM_General(double z, double M){
double del, sig, growthf;
int status;
growthf = dicke(z);
struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = {
.z_obs = z,
.gf_obs = growthf,
};
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_General;
F.params = ¶meters_gsl_FgtrM;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
double dNion_General(double lnM, void *params){
struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassTurnover = vals.Mdrop;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar10 = vals.frac_star;
double Fesc10 = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar, Fesc, MassFunction;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1/Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf,M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M * exp(-MassTurnover/M) * Fstar * Fesc;
}
double Nion_General(double z, double M_Min, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc){
double growthf;
growthf = dicke(z);
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = {
.z_obs = z,
.gf_obs = growthf,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc,
};
int status;
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
F.function = &dNion_General;
F.params = ¶meters_gsl_SFR;
lower_limit = log(M_Min);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e Alpha_star=%e Alpha_esc=%e",z,growthf,MassTurnover,Alpha_star,Alpha_esc);
LOG_ERROR("data: Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / ((cosmo_params_ps->OMm)*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
double dNion_General_MINI(double lnM, void *params){
struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassTurnover = vals.Mdrop;
double MassTurnover_upper = vals.Mdrop_upper;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar7_MINI = vals.frac_star;
double Fesc7_MINI = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar, Fesc, MassFunction;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1/Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf,M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M * exp(-MassTurnover/M) * exp(-M/MassTurnover_upper) * Fstar * Fesc;
}
double Nion_General_MINI(double z, double M_Min, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar7_MINI, double Fesc7_MINI, double Mlim_Fstar, double Mlim_Fesc){
double growthf;
int status;
growthf = dicke(z);
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = {
.z_obs = z,
.gf_obs = growthf,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar7_MINI,
.frac_esc = Fesc7_MINI,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc,
};
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
F.function = &dNion_General_MINI;
F.params = ¶meters_gsl_SFR;
lower_limit = log(M_Min);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occurred!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e MassTurnover_upper=%e",z,growthf,MassTurnover,MassTurnover_upper);
LOG_ERROR("data: Alpha_star=%e Alpha_esc=%e Fstar7_MINI=%e Fesc7_MINI=%e Mlim_Fstar=%e Mlim_Fesc=%e",Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / ((cosmo_params_ps->OMm)*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
/* returns the "effective Jeans mass" in Msun
corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */
double M_J_WDM(){
double z_eq, fudge=60;
if (!(global_params.P_CUTOFF))
return 0;
z_eq = 3600*(cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15;
return fudge*3.06e8 * (1.5/global_params.g_x) * sqrt((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15) * pow(global_params.M_WDM, -4) * pow(z_eq/3000.0, 1.5);
}
float erfcc(float x)
{
double t,q,ans;
q=fabs(x);
t=1.0/(1.0+0.5*q);
ans=t*exp(-q*q-1.2655122+t*(1.0000237+t*(0.374092+t*(0.0967842+
t*(-0.1862881+t*(0.2788681+t*(-1.13520398+t*(1.4885159+
t*(-0.82215223+t*0.17087277)))))))));
return x >= 0.0 ? ans : 2.0-ans;
}
double splined_erfc(double x){
if (x < 0){
return 1.0;
}
// TODO: This could be wrapped in a Try/Catch to try the fast way and if it doesn't
// work, use the slow way.
return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c
if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1))
return erfcc(x);
else
return exp(gsl_spline_eval(erfc_spline, x, erfc_acc));
}
void gauleg(float x1, float x2, float x[], float w[], int n)
//Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n,
//containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula.
{
int m,j,i;
double z1,z,xm,xl,pp,p3,p2,p1;
m=(n+1)/2;
xm=0.5*(x2+x1);
xl=0.5*(x2-x1);
for (i=1;i<=m;i++) {
//High precision is a good idea for this routine.
//The roots are symmetric in the interval, so we only have to find half of them.
//Loop over the desired roots.
z=cos(3.141592654*(i-0.25)/(n+0.5));
//Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method.
do {
p1=1.0;
p2=0.0;
for (j=1;j<=n;j++) {
//Loop up the recurrence relation to get the Legendre polynomial evaluated at z.
p3=p2;
p2=p1;
p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j;
}
//p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2,
//the polynomial of one lower order.
pp=n*(z*p1-p2)/(z*z-1.0);
z1=z;
z=z1-p1/pp;
} while (fabs(z-z1) > EPS2);
x[i]=xm-xl*z;
x[n+1-i]=xm+xl*z;
w[i]=2.0*xl/((1.0-z*z)*pp*pp);
w[n+1-i]=w[i];
}
}
void initialiseSigmaMInterpTable(float M_Min, float M_Max)
{
int i;
float Mass;
if (Mass_InterpTable == NULL){
Mass_InterpTable = calloc(NMass,sizeof(float));
Sigma_InterpTable = calloc(NMass,sizeof(float));
dSigmadm_InterpTable = calloc(NMass,sizeof(float));
}
#pragma omp parallel shared(Mass_InterpTable,Sigma_InterpTable,dSigmadm_InterpTable) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NMass;i++) {
Mass_InterpTable[i] = log(M_Min) + (float)i/(NMass-1)*( log(M_Max) - log(M_Min) );
Sigma_InterpTable[i] = sigma_z0(exp(Mass_InterpTable[i]));
dSigmadm_InterpTable[i] = log10(-dsigmasqdm_z0(exp(Mass_InterpTable[i])));
}
}
for(i=0;i<NMass;i++) {
if(isfinite(Mass_InterpTable[i]) == 0 || isfinite(Sigma_InterpTable[i]) == 0 || isfinite(dSigmadm_InterpTable[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in initialiseSigmaMInterpTable");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
MinMass = log(M_Min);
mass_bin_width = 1./(NMass-1)*( log(M_Max) - log(M_Min) );
inv_mass_bin_width = 1./mass_bin_width;
}
void freeSigmaMInterpTable()
{
free(Mass_InterpTable);
free(Sigma_InterpTable);
free(dSigmadm_InterpTable);
Mass_InterpTable = NULL;
}
void nrerror(char error_text[])
{
LOG_ERROR("Numerical Recipes run-time error...");
LOG_ERROR("%s",error_text);
Throw(MemoryAllocError);
}
float *vector(long nl, long nh)
/* allocate a float vector with subscript range v[nl..nh] */
{
float *v;
v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
if(!v) nrerror("allocation failure in vector()");
return v - nl + NR_END;
}
void free_vector(float *v, long nl, long nh)
/* free a float vector allocated with vector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[])
/*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with
x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating
function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains
the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or
ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary
condition for a natural spline, with zero second derivative on that boundary.*/
{
int i,k;
float p,qn,sig,un,*u;
int na,nb,check;
u=vector(1,n-1);
if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural"
y2[1]=u[1]=0.0;
else { // or else to have a specified first derivative.
y2[1] = -0.5;
u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1);
}
for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm.
sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary
na = 1;
nb = 1;
check = 0;
while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]);
}
p=sig*y2[i-1]+2.0; //storage of the decomposed
y2[i]=(sig-1.0)/p; // factors.
u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]);
u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p;
if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) {
na = 0;
nb = 0;
check = 0;
while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
}
u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]);
u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p;
}
}
if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural"
qn=un=0.0;
else { //or else to have a specified first derivative.
qn=0.5;
un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1]));
}
y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0);
for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal
y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm.
}
free_vector(u,1,n-1);
}
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y)
/*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order),
and given the array y2a[1..n], which is the output from spline above, and given a value of
x, this routine returns a cubic-spline interpolated value y.*/
{
void nrerror(char error_text[]);
int klo,khi,k;
float h,b,a;
klo=1; // We will find the right place in the table by means of
khi=n; //bisection. This is optimal if sequential calls to this
while (khi-klo > 1) { //routine are at random values of x. If sequential calls
k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better
if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if
else klo=k; //they remain appropriate on the next call.
} // klo and khi now bracket the input value of x.
h=xa[khi]-xa[klo];
if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct.
a=(xa[khi]-x)/h;
b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated.
*y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0;
}
unsigned long *lvector(long nl, long nh)
/* allocate an unsigned long vector with subscript range v[nl..nh] */
{
unsigned long *v;
v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long)));
if(!v) nrerror("allocation failure in lvector()");
return v - nl + NR_END;
}
void free_lvector(unsigned long *v, long nl, long nh)
/* free an unsigned long vector allocated with lvector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
/* dnbiasdM */
double dnbiasdM(double M, float z, double M_o, float del_o){
double sigsq, del, sig_one, sig_o;
if ((M_o-M) < TINY){
LOG_ERROR("In function dnbiasdM: M must be less than M_o!\nAborting...\n");
Throw(ValueError);
}
del = Deltac/dicke(z) - del_o;
if (del < 0){
LOG_ERROR(" In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n");
Throw(ValueError);
}
sig_o = sigma_z0(M_o);
sig_one = sigma_z0(M);
sigsq = sig_one*sig_one - sig_o*sig_o;
return -(RHOcrit*cosmo_params_ps->OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M);
}
/*
calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias
*/
double FgtrM_bias(double z, double M, double del_bias, double sig_bias){
double del, sig, sigsmallR;
sigsmallR = sigma_z0(M);
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/* Uses sigma parameters instead of Mass for scale */
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){
double del, sig;
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/* redshift derivative of the growth function at z */
double ddicke_dz(double z){
float dz = 1e-10;
double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz;
return (dicke(z+dz)-dicke(z))/dz;
}
/* compute a mass limit where the stellar baryon fraction and the escape fraction exceed unity */
float Mass_limit (float logM, float PL, float FRAC) {
return FRAC*pow(pow(10.,logM)/1e10,PL);
}
void bisection(float *x, float xlow, float xup, int *iter){
*x=(xlow + xup)/2.;
++(*iter);
}
float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC){
int i, iter, max_iter=200;
float rel_tol=0.001;
float logMlow, logMupper, x, x1;
iter = 0;
logMlow = log10(Mmin);
logMupper = log10(Mmax);
if (PL < 0.) {
if (Mass_limit(logMlow,PL,FRAC) <= 1.) {
return Mmin;
}
}
else if (PL > 0.) {
if (Mass_limit(logMupper,PL,FRAC) <= 1.) {
return Mmax;
}
}
else
return 0;
bisection(&x, logMlow, logMupper, &iter);
do {
if((Mass_limit(logMlow,PL,FRAC)-1.)*(Mass_limit(x,PL,FRAC)-1.) < 0.)
logMupper = x;
else
logMlow = x;
bisection(&x1, logMlow, logMupper, &iter);
if(fabs(x1-x) < rel_tol) {
return pow(10.,x1);
}
x = x1;
}
while(iter < max_iter);
// Got to max_iter without finding a solution.
LOG_ERROR("Failed to find a mass limit to regulate stellar fraction/escape fraction is between 0 and 1.");
LOG_ERROR(" The solution does not converge or iterations are not sufficient.");
// Throw(ParameterError);
Throw(MassDepZetaError);
return(0.0);
}
int initialise_ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) {
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
lnMhalo_param = calloc(nbins,sizeof(double));
Muv_param = calloc(nbins,sizeof(double));
Mhalo_param = calloc(nbins,sizeof(double));
LF_spline_acc = gsl_interp_accel_alloc();
LF_spline = gsl_spline_alloc(gsl_interp_cspline, nbins);
init_ps();
int status;
Try initialiseSigmaMInterpTable(0.999*Mhalo_min,1.001*Mhalo_max);
Catch(status) {
LOG_ERROR("\t...called from initialise_ComputeLF");
return(status);
}
initialised_ComputeLF = true;
return(0);
}
void cleanup_ComputeLF(){
free(lnMhalo_param);
free(Muv_param);
free(Mhalo_param);
gsl_spline_free (LF_spline);
gsl_interp_accel_free(LF_spline_acc);
freeSigmaMInterpTable();
initialised_ComputeLF = 0;
}
int ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params,
struct FlagOptions *flag_options, int component, int NUM_OF_REDSHIFT_FOR_LF, float *z_LF, float *M_TURNs, double *M_uv_z, double *M_h_z, double *log10phi) {
/*
This is an API-level function and thus returns an int status.
*/
int status;
Try{ // This try block covers the whole function.
// This NEEDS to be done every time, because the actual object passed in as
// user_params, cosmo_params etc. can change on each call, freeing up the memory.
initialise_ComputeLF(nbins, user_params,cosmo_params,astro_params,flag_options);
int i,i_z;
int i_unity, i_smth, mf, nbins_smth=7;
double dlnMhalo, lnMhalo_i, SFRparam, Muv_1, Muv_2, dMuvdMhalo;
double Mhalo_i, lnMhalo_min, lnMhalo_max, lnMhalo_lo, lnMhalo_hi, dlnM, growthf;
double f_duty_upper, Mcrit_atom;
float Fstar, Fstar_temp;
double dndm;
int gsl_status;
gsl_set_error_handler_off();
if (astro_params->ALPHA_STAR < -0.5)
LOG_WARNING(
"ALPHA_STAR is %f, which is unphysical value given the observational LFs.\n"\
"Also, when ALPHA_STAR < -.5, LFs may show a kink. It is recommended to set ALPHA_STAR > -0.5.",
astro_params->ALPHA_STAR
);
mf = user_params_ps->HMF;
lnMhalo_min = log(Mhalo_min*0.999);
lnMhalo_max = log(Mhalo_max*1.001);
dlnMhalo = (lnMhalo_max - lnMhalo_min)/(double)(nbins - 1);
for (i_z=0; i_z<NUM_OF_REDSHIFT_FOR_LF; i_z++) {
growthf = dicke(z_LF[i_z]);
Mcrit_atom = atomic_cooling_threshold(z_LF[i_z]);
i_unity = -1;
for (i=0; i<nbins; i++) {
// generate interpolation arrays
lnMhalo_param[i] = lnMhalo_min + dlnMhalo*(double)i;
Mhalo_i = exp(lnMhalo_param[i]);
if (component == 1)
Fstar = astro_params->F_STAR10*pow(Mhalo_i/1e10,astro_params->ALPHA_STAR);
else
Fstar = astro_params->F_STAR7_MINI*pow(Mhalo_i/1e7,astro_params->ALPHA_STAR_MINI);
if (Fstar > 1.) Fstar = 1;
if (i_unity < 0) { // Find the array number at which Fstar crosses unity.
if (astro_params->ALPHA_STAR > 0.) {
if ( (1.- Fstar) < FRACT_FLOAT_ERR ) i_unity = i;
}
else if (astro_params->ALPHA_STAR < 0. && i < nbins-1) {
if (component == 1)
Fstar_temp = astro_params->F_STAR10*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e10,astro_params->ALPHA_STAR);
else
Fstar_temp = astro_params->F_STAR7_MINI*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e7,astro_params->ALPHA_STAR_MINI);
if (Fstar_temp < 1. && (1.- Fstar) < FRACT_FLOAT_ERR) i_unity = i;
}
}
// parametrization of SFR
SFRparam = Mhalo_i * cosmo_params->OMb/cosmo_params->OMm * (double)Fstar * (double)(hubble(z_LF[i_z])*SperYR/astro_params->t_STAR); // units of M_solar/year
Muv_param[i] = 51.63 - 2.5*log10(SFRparam*Luv_over_SFR); // UV magnitude
// except if Muv value is nan or inf, but avoid error put the value as 10.
if ( isinf(Muv_param[i]) || isnan(Muv_param[i]) ) Muv_param[i] = 10.;
M_uv_z[i + i_z*nbins] = Muv_param[i];
}
gsl_status = gsl_spline_init(LF_spline, lnMhalo_param, Muv_param, nbins);
GSL_ERROR(gsl_status);
lnMhalo_lo = log(Mhalo_min);
lnMhalo_hi = log(Mhalo_max);
dlnM = (lnMhalo_hi - lnMhalo_lo)/(double)(nbins - 1);
// There is a kink on LFs at which Fstar crosses unity. This kink is a numerical artefact caused by the derivate of dMuvdMhalo.
// Most of the cases the kink doesn't appear in magnitude ranges we are interested (e.g. -22 < Muv < -10). However, for some extreme
// parameters, it appears. To avoid this kink, we use the interpolation of the derivate in the range where the kink appears.
// 'i_unity' is the array number at which the kink appears. 'i_unity-3' and 'i_unity+12' are related to the range of interpolation,
// which is an arbitrary choice.
// NOTE: This method does NOT work in cases with ALPHA_STAR < -0.5. But, this parameter range is unphysical given that the
// observational LFs favour positive ALPHA_STAR in this model.
// i_smth = 0: calculates LFs without interpolation.
// i_smth = 1: calculates LFs using interpolation where Fstar crosses unity.
if (i_unity-3 < 0) i_smth = 0;
else if (i_unity+12 > nbins-1) i_smth = 0;
else i_smth = 1;
if (i_smth == 0) {
for (i=0; i<nbins; i++) {
// calculate luminosity function
lnMhalo_i = lnMhalo_lo + dlnM*(double)i;
Mhalo_param[i] = exp(lnMhalo_i);
M_h_z[i + i_z*nbins] = Mhalo_param[i];
Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc);
Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc);
dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i));
if (component == 1)
f_duty_upper = 1.;
else
f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom));
if(mf==0) {
log10phi[i + i_z*nbins] = log10( dNdM(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==1) {
log10phi[i + i_z*nbins] = log10( dNdM_st(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==2) {
log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==3) {
log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF_z(z_LF[i_z], growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else{
LOG_ERROR("HMF should be between 0-3, got %d", mf);
Throw(ValueError);
}
if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.)
log10phi[i + i_z*nbins] = -30.;
}
}
else {
lnM_temp = calloc(nbins_smth,sizeof(double));
deriv_temp = calloc(nbins_smth,sizeof(double));
deriv = calloc(nbins,sizeof(double));
for (i=0; i<nbins; i++) {
// calculate luminosity function
lnMhalo_i = lnMhalo_lo + dlnM*(double)i;
Mhalo_param[i] = exp(lnMhalo_i);
M_h_z[i + i_z*nbins] = Mhalo_param[i];
Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc);
Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc);
dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i));
deriv[i] = fabs(dMuvdMhalo);
}
deriv_spline_acc = gsl_interp_accel_alloc();
deriv_spline = gsl_spline_alloc(gsl_interp_cspline, nbins_smth);
// generate interpolation arrays to smooth discontinuity of the derivative causing a kink
// Note that the number of array elements and the range of interpolation are made by arbitrary choices.
lnM_temp[0] = lnMhalo_param[i_unity - 3];
lnM_temp[1] = lnMhalo_param[i_unity - 2];
lnM_temp[2] = lnMhalo_param[i_unity + 8];
lnM_temp[3] = lnMhalo_param[i_unity + 9];
lnM_temp[4] = lnMhalo_param[i_unity + 10];
lnM_temp[5] = lnMhalo_param[i_unity + 11];
lnM_temp[6] = lnMhalo_param[i_unity + 12];
deriv_temp[0] = deriv[i_unity - 3];
deriv_temp[1] = deriv[i_unity - 2];
deriv_temp[2] = deriv[i_unity + 8];
deriv_temp[3] = deriv[i_unity + 9];
deriv_temp[4] = deriv[i_unity + 10];
deriv_temp[5] = deriv[i_unity + 11];
deriv_temp[6] = deriv[i_unity + 12];
gsl_status = gsl_spline_init(deriv_spline, lnM_temp, deriv_temp, nbins_smth);
GSL_ERROR(gsl_status);
for (i=0;i<9;i++){
deriv[i_unity + i - 1] = gsl_spline_eval(deriv_spline, lnMhalo_param[i_unity + i - 1], deriv_spline_acc);
}
for (i=0; i<nbins; i++) {
if (component == 1)
f_duty_upper = 1.;
else
f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom));
if(mf==0)
dndm = dNdM(growthf, Mhalo_param[i]);
else if(mf==1)
dndm = dNdM_st(growthf, Mhalo_param[i]);
else if(mf==2)
dndm = dNdM_WatsonFOF(growthf, Mhalo_param[i]);
else if(mf==3)
dndm = dNdM_WatsonFOF_z(z_LF[i_z], growthf, Mhalo_param[i]);
else{
LOG_ERROR("HMF should be between 0-3, got %d", mf);
Throw(ValueError);
}
log10phi[i + i_z*nbins] = log10(dndm * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / deriv[i]);
if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.)
log10phi[i + i_z*nbins] = -30.;
}
}
}
cleanup_ComputeLF();
} // End try
Catch(status){
return status;
}
return(0);
}
void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max){
//calculates the weightings and the positions for Gauss-Legendre quadrature.
gauleg(log(M_Min),log(M_Max),xi_SFR_Xray,wi_SFR_Xray,n);
}
float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2){
float sigma1, dsigmadm,dsigma_val;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (M1 - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma1 = Sigma_InterpTable[MassBin] + ( M1 - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigma_val = dSigmadm_InterpTable[MassBin] + ( M1 - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigma_val);
}
else {
sigma1 = sigma_z0(exp(M1));
dsigmadm = dsigmasqdm_z0(exp(M1));
}
M1 = exp(M1);
M2 = exp(M2);
sigma1 = sigma1*sigma1;
sigma2 = sigma2*sigma2;
dsigmadm = dsigmadm/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know!
if((sigma1 > sigma2)) {
return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5));
}
else if(sigma1==sigma2) {
return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5));
}
else {
return 0.;
}
}
void initialiseGL_Nion(int n, float M_Min, float M_Max){
//calculates the weightings and the positions for Gauss-Legendre quadrature.
gauleg(log(M_Min),log(M_Max),xi_SFR,wi_SFR,n);
}
double dNion_ConditionallnM_MINI(double lnM, void *params) {
struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params;
double M = exp(lnM); // linear scale
double growthf = vals.gf_obs;
double M2 = vals.Mval; // natural log scale
double sigma2 = vals.sigma2;
double del1 = vals.delta1;
double del2 = vals.delta2;
double MassTurnover = vals.Mdrop;
double MassTurnover_upper = vals.Mdrop_upper;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar7_MINI = vals.frac_star;
double Fesc7_MINI = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
double dNion_ConditionallnM(double lnM, void *params) {
struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params;
double M = exp(lnM); // linear scale
double growthf = vals.gf_obs;
double M2 = vals.Mval; // natural log scale
double sigma2 = vals.sigma2;
double del1 = vals.delta1;
double del2 = vals.delta2;
double MassTurnover = vals.Mdrop;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar10 = vals.frac_star;
double Fesc10 = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) {
if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
return GaussLegendreQuad_Nion_MINI(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) MassTurnover_upper, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES);
}
else{ //standard old code
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
int status;
F.function = &dNion_ConditionallnM_MINI;
F.params = ¶meters_gsl_SFR_con;
lower_limit = M1;
upper_limit = M2;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M2=%e sigma2=%e delta1=%e delta2=%e MassTurnover=%e",growthf,M2,sigma2,delta1,delta2,MassTurnover);
LOG_ERROR("data: MassTurnover_upper=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover_upper,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
if(delta2 > delta1) {
result = 1.;
return result;
}
else {
return result;
}
}
}
double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) {
if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
return GaussLegendreQuad_Nion(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES);
}
else{ //standard
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
F.function = &dNion_ConditionallnM;
F.params = ¶meters_gsl_SFR_con;
lower_limit = M1;
upper_limit = M2;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M1=%e M2=%e sigma2=%e delta1=%e delta2=%e",growthf,M1,M2,sigma2,delta1,delta2);
LOG_ERROR("data: MassTurnover=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
if(delta2 > delta1) {
result = 1.;
return result;
}
else {
return result;
}
}
}
float Nion_ConditionallnM_GL_MINI(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){
float M = exp(lnM);
float growthf = parameters_gsl_SFR_con.gf_obs;
float M2 = parameters_gsl_SFR_con.Mval;
float sigma2 = parameters_gsl_SFR_con.sigma2;
float del1 = parameters_gsl_SFR_con.delta1;
float del2 = parameters_gsl_SFR_con.delta2;
float MassTurnover = parameters_gsl_SFR_con.Mdrop;
float MassTurnover_upper = parameters_gsl_SFR_con.Mdrop_upper;
float Alpha_star = parameters_gsl_SFR_con.pl_star;
float Alpha_esc = parameters_gsl_SFR_con.pl_esc;
float Fstar7_MINI = parameters_gsl_SFR_con.frac_star;
float Fesc7_MINI = parameters_gsl_SFR_con.frac_esc;
float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar;
float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc;
float Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
float Nion_ConditionallnM_GL(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){
float M = exp(lnM);
float growthf = parameters_gsl_SFR_con.gf_obs;
float M2 = parameters_gsl_SFR_con.Mval;
float sigma2 = parameters_gsl_SFR_con.sigma2;
float del1 = parameters_gsl_SFR_con.delta1;
float del2 = parameters_gsl_SFR_con.delta2;
float MassTurnover = parameters_gsl_SFR_con.Mdrop;
float Alpha_star = parameters_gsl_SFR_con.pl_star;
float Alpha_esc = parameters_gsl_SFR_con.pl_esc;
float Fstar10 = parameters_gsl_SFR_con.frac_star;
float Fesc10 = parameters_gsl_SFR_con.frac_esc;
float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar;
float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc;
float Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
//JBM: Same as above but for minihaloes. Has two cutoffs, lower and upper.
float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES) {
double result, nu_lower_limit, nu_higher_limit, nupivot;
int i;
double integrand, x;
integrand = 0.;
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar7_MINI,
.frac_esc = Fesc7_MINI,
.LimitMass_Fstar = Mlim_Fstar_MINI,
.LimitMass_Fesc = Mlim_Fesc_MINI
};
if(delta2 > delta1*0.9999) {
result = 1.;
return result;
}
if(FAST_FCOLL_TABLES){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
if(MassTurnover_upper <= MassTurnover){
return 1e-40; //in sharp cut it's zero
}
double delta_arg = pow( (delta1 - delta2)/growthf , 2.);
double LogMass=log(MassTurnover);
int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_lower_limit = delta_arg/(sigmaM1 * sigmaM1 - sigma2 * sigma2);
LogMass = log(MassTurnover_upper);
MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM2 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_higher_limit = delta_arg/(sigmaM2*sigmaM2-sigma2*sigma2);
//note we keep nupivot1 just in case very negative delta makes it reach that nu
LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster
int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose.
LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster
MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2);
double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M)
double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot1>nu>nupivot2 (small M)
double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M)
//beta2 fixed by continuity.
// // 3PLs
double fcollres=0.0;
double fcollres_high=0.0; //for the higher threshold to subtract
// re-written for further speedups
if (nu_higher_limit <= nupivot2){ //if both are below pivot2 don't bother adding and subtracting the high contribution
fcollres=(Fcollapprox(nu_lower_limit,beta3))*pow(nupivot2,-beta3);
fcollres_high=(Fcollapprox(nu_higher_limit,beta3))*pow(nupivot2,-beta3);
}
else {
fcollres_high=(Fcollapprox(nu_higher_limit,beta2))*pow(nupivot1,-beta2);
if (nu_lower_limit > nupivot2){
fcollres=(Fcollapprox(nu_lower_limit,beta2))*pow(nupivot1,-beta2);
}
else {
fcollres=(Fcollapprox(nupivot2,beta2))*pow(nupivot1,-beta2);
fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3);
}
}
if (fcollres < fcollres_high){
return 1e-40;
}
return (fcollres-fcollres_high);
}
else{
for(i=1; i<(n+1); i++){
if(Type==1) {
x = xi_SFR_Xray[i];
integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con);
}
if(Type==0) {
x = xi_SFR[i];
integrand += wi_SFR[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con);
}
}
return integrand;
}
}
//JBM: Added the approximation if user_params->FAST_FCOLL_TABLES==True
float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES) {
//Performs the Gauss-Legendre quadrature.
int i;
double result, nu_lower_limit, nupivot;
if(delta2 > delta1*0.9999) {
result = 1.;
return result;
}
double integrand, x;
integrand = 0.;
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
double delta_arg = pow( (delta1 - delta2)/growthf , 2.0);
double LogMass=log(MassTurnover);
int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_lower_limit = delta_arg/(sigmaM1*sigmaM1-sigma2*sigma2);
LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster
int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose.
LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster
MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2);
double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M)
double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot2<nu<nupivot1 (small M)
double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M)
//beta2 fixed by continuity.
double nucrit_sigma2 = delta_arg*pow(sigma2+1e-10,-2.0); //above this nu sigma2>sigma1, so HMF=0. eps added to avoid infinities
// // 3PLs
double fcollres=0.0;
if(nu_lower_limit >= nucrit_sigma2){ //fully in the flat part of sigma(nu), M^alpha is nu-independent.
return 1e-40;
}
else{ //we subtract the contribution from high nu, since the HMF is set to 0 if sigma2>sigma1
fcollres -= Fcollapprox(nucrit_sigma2,beta1)*pow(nupivot1,-beta1);
}
if(nu_lower_limit >= nupivot1){
fcollres+=Fcollapprox(nu_lower_limit,beta1)*pow(nupivot1,-beta1);
}
else{
fcollres+=Fcollapprox(nupivot1,beta1)*pow(nupivot1,-beta1);
if (nu_lower_limit > nupivot2){
fcollres+=(Fcollapprox(nu_lower_limit,beta2)-Fcollapprox(nupivot1,beta2))*pow(nupivot1,-beta2);
}
else {
fcollres+=(Fcollapprox(nupivot2,beta2)-Fcollapprox(nupivot1,beta2) )*pow(nupivot1,-beta2);
fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3);
}
}
if (fcollres<=0.0){
LOG_DEBUG("Negative fcoll? fc=%.1le Mt=%.1le \n",fcollres, MassTurnover);
fcollres=1e-40;
}
return fcollres;
}
else{
for(i=1; i<(n+1); i++){
if(Type==1) {
x = xi_SFR_Xray[i];
integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con);
}
if(Type==0) {
x = xi_SFR[i];
integrand += wi_SFR[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con);
}
}
return integrand;
}
}
#include <gsl/gsl_sf_gamma.h>
//JBM: Integral of a power-law times exponential for EPS: \int dnu nu^beta * exp(-nu/2)/sqrt(nu) from numin to infty.
double Fcollapprox (double numin, double beta){
//nu is deltacrit^2/sigma^2, corrected by delta(R) and sigma(R)
double gg = gsl_sf_gamma_inc(0.5+beta,0.5*numin);
return gg*pow(2,0.5+beta)*pow(2.0*PI,-0.5);
}
void initialise_Nion_General_spline(float z, float min_density, float max_density, float Mmax, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES){
float Mmin = MassTurnover/50.;
double overdense_val, growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
#pragma omp parallel shared(log10_overdense_spline_SFR,log10_Nion_spline,overdense_small_low,overdense_small_high,growthf,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i,overdense_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
overdense_val = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
log10_overdense_spline_SFR[i] = overdense_val;
log10_Nion_spline[i] = GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,pow(10.,overdense_val)-1.,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(fabs(log10_Nion_spline[i]) < 1e-38) {
log10_Nion_spline[i] = 1e-38;
}
log10_Nion_spline[i] = log10(log10_Nion_spline[i]);
if(log10_Nion_spline[i] < -40.){
log10_Nion_spline[i] = -40.;
}
log10_Nion_spline[i] *= ln_10;
}
}
for (i=0; i<NSFR_low; i++){
if(!isfinite(log10_Nion_spline[i])) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
#pragma omp parallel shared(Overdense_spline_SFR,Nion_spline,overdense_large_low,overdense_large_high,growthf,Mmin,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
Nion_spline[i] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(Nion_spline[i]<0.) {
Nion_spline[i]=pow(10.,-40.0);
}
}
}
for(i=0;i<NSFR_high;i++) {
if(!isfinite(Nion_spline[i])) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_Nion_General_spline_MINI(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){
double growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i,j;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
for (i=0; i<NSFR_low; i++){
log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
}
for (i=0;i<NSFR_high;i++) {
Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
for (i=0;i<NMTURN;i++){
Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min));
Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI));
}
#pragma omp parallel shared(log10_Nion_spline,growthf,Mmax,sigma2,log10_overdense_spline_SFR,Mturns,Mturns_MINI,\
Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,ln_10,log10_Nion_spline_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns[j],Alpha_star,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES));
if(log10_Nion_spline[i+j*NSFR_low] < -40.){
log10_Nion_spline[i+j*NSFR_low] = -40.;
}
log10_Nion_spline[i+j*NSFR_low] *= ln_10;
log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\
Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES));
if(log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){
log10_Nion_spline_MINI[i+j*NSFR_low] = -40.;
}
log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
if(isfinite(log10_Nion_spline[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(log10_Nion_spline_MINI[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(Nion_spline,growthf,Mmin,Mmax,sigma2,Overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(
growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],
Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES
);
if(Nion_spline[i+j*NSFR_high]<0.) {
Nion_spline[i+j*NSFR_high]=pow(10.,-40.0);
}
Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(
growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],
Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,
Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES
);
if(Nion_spline_MINI[i+j*NSFR_high]<0.) {
Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0);
}
}
}
}
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
if(isfinite(Nion_spline[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(Nion_spline_MINI[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_Nion_General_spline_MINI_prev(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){
double growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i,j;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
for (i=0; i<NSFR_low; i++){
prev_log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
}
for (i=0;i<NSFR_high;i++) {
prev_Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
for (i=0;i<NMTURN;i++){
Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min));
Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI));
}
#pragma omp parallel shared(prev_log10_Nion_spline,growthf,Mmax,sigma2,prev_log10_overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_log10_Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
prev_log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns[j],\
Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES));
if(prev_log10_Nion_spline[i+j*NSFR_low] < -40.){
prev_log10_Nion_spline[i+j*NSFR_low] = -40.;
}
prev_log10_Nion_spline[i+j*NSFR_low] *= ln_10;
prev_log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\
Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES));
if(prev_log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){
prev_log10_Nion_spline_MINI[i+j*NSFR_low] = -40.;
}
prev_log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
if(isfinite(prev_log10_Nion_spline[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(prev_log10_Nion_spline_MINI[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(prev_Nion_spline,growthf,Mmin,Mmax,sigma2,prev_Overdense_spline_SFR,Mturns,\
Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_Nion_spline_MINI,Mturns_MINI,\
Mcrit_atom,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
prev_Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,prev_Overdense_spline_SFR[i],\
Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(prev_Nion_spline[i+j*NSFR_high]<0.) {
prev_Nion_spline[i+j*NSFR_high]=pow(10.,-40.0);
}
prev_Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(growthf,Mmin,Mmax,sigma2,Deltac,\
prev_Overdense_spline_SFR[i],Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,\
Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES);
if(prev_Nion_spline_MINI[i+j*NSFR_high]<0.) {
prev_Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0);
}
}
}
}
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
if(isfinite(prev_Nion_spline[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(prev_Nion_spline_MINI[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_Nion_Ts_spline(
int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Alpha_esc,
float Fstar10, float Fesc10
){
int i;
float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fesc;
if (z_val == NULL){
z_val = calloc(Nbin,sizeof(double));
Nion_z_val = calloc(Nbin,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10);
#pragma omp parallel shared(z_val,Nion_z_val,zmin,zmax, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Nion_z_val[i] = Nion_General(z_val[i], Mmin, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc);
}
}
for (i=0; i<Nbin; i++){
if(isfinite(Nion_z_val[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_Nion_Ts_spline_MINI(
int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10,
float Fesc10, float Fstar7_MINI, float Fesc7_MINI
){
int i,j;
float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fesc, Mlim_Fstar_MINI, Mlim_Fesc_MINI, Mcrit_atom_val;
if (z_val == NULL){
z_val = calloc(Nbin,sizeof(double));
Nion_z_val = calloc(Nbin,sizeof(double));
Nion_z_val_MINI = calloc(Nbin*NMTURN,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
Mlim_Fesc_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc7_MINI * pow(1e3, Alpha_esc));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
#pragma omp parallel shared(z_val,Nion_z_val,Nbin,zmin,zmax,Mmin,Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,\
Nion_z_val_MINI,MassTurnover,Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI) \
private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Mcrit_atom_val = atomic_cooling_threshold(z_val[i]);
Nion_z_val[i] = Nion_General(z_val[i], Mmin, Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc);
for (j=0; j<NMTURN; j++){
Nion_z_val_MINI[i+j*Nbin] = Nion_General_MINI(z_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, Alpha_esc, Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI);
}
}
}
for (i=0; i<Nbin; i++){
if(isfinite(Nion_z_val[i])==0) {
i = Nbin;
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (j=0; j<NMTURN; j++){
if(isfinite(Nion_z_val_MINI[i+j*Nbin])==0){
j = NMTURN;
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_spline(int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Fstar10){
int i;
float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar;
if (z_X_val == NULL){
z_X_val = calloc(Nbin,sizeof(double));
SFRD_val = calloc(Nbin,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
#pragma omp parallel shared(z_X_val,SFRD_val,zmin,zmax, MassTurn, Alpha_star, Fstar10, Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
SFRD_val[i] = Nion_General(z_X_val[i], Mmin, MassTurn, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.);
}
}
for (i=0; i<Nbin; i++){
if(isfinite(SFRD_val[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_SFRD_spline_MINI(int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI){
int i,j;
float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fstar_MINI, Mcrit_atom_val;
if (z_X_val == NULL){
z_X_val = calloc(Nbin,sizeof(double));
SFRD_val = calloc(Nbin,sizeof(double));
SFRD_val_MINI = calloc(Nbin*NMTURN,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
#pragma omp parallel shared(z_X_val,zmin,zmax,Nbin,SFRD_val,Mmin, Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\
SFRD_val_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \
private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Mcrit_atom_val = atomic_cooling_threshold(z_X_val[i]);
SFRD_val[i] = Nion_General(z_X_val[i], Mmin, Mcrit_atom_val, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.);
for (j=0; j<NMTURN; j++){
SFRD_val_MINI[i+j*Nbin] = Nion_General_MINI(z_X_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, 0., Fstar7_MINI, 1.,Mlim_Fstar_MINI,0.);
}
}
}
for (i=0; i<Nbin; i++){
if(isfinite(SFRD_val[i])==0) {
i = Nbin;
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (j=0; j<NMTURN; j++){
if(isfinite(SFRD_val_MINI[i+j*Nbin])==0) {
j = NMTURN;
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_Conditional_table(
int Nfilter, float min_density[], float max_density[], float growthf[], float R[],
float MassTurnover, float Alpha_star, float Fstar10, bool FAST_FCOLL_TABLES
){
double overdense_val;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION;
double overdense_small_high, overdense_small_low;
float Mmin,Mmax,Mlim_Fstar,sigma2;
int i,j,k,i_tot;
float ln_10;
ln_10 = log(10);
Mmin = MassTurnover/50.;
Mmax = RtoM(R[Nfilter-1]);
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mmin = log(Mmin);
for (i=0; i<NSFR_high;i++) {
overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
float MassBinLow;
int MassBin;
for (j=0; j < Nfilter; j++) {
Mmax = RtoM(R[j]);
initialiseGL_Nion_Xray(NGL_SFR, MassTurnover/50., Mmax);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
if(min_density[j]*growthf[j] < -1.) {
overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
}
else {
overdense_small_low = min_density[j]*growthf[j];
}
overdense_small_high = max_density[j]*growthf[j];
if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION;
}
for (i=0; i<NSFR_low; i++) {
overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
overdense_low_table[i] = pow(10.,overdense_val);
}
#pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
log10_SFRD_z_low_table[j][i] = GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
if(fabs(log10_SFRD_z_low_table[j][i]) < 1e-38) {
log10_SFRD_z_low_table[j][i] = 1e-38;
}
log10_SFRD_z_low_table[j][i] = log10(log10_SFRD_z_low_table[j][i]);
log10_SFRD_z_low_table[j][i] += 10.0;
log10_SFRD_z_low_table[j][i] *= ln_10;
}
}
for (i=0; i<NSFR_low; i++){
if(isfinite(log10_SFRD_z_low_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
#pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
SFRD_z_high_table[j][i] *= pow(10., 10.0);
}
}
for(i=0;i<NSFR_high;i++) {
if(isfinite(SFRD_z_high_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_Conditional_table_MINI(
int Nfilter, float min_density[], float max_density[], float growthf[], float R[],
float Mcrit_atom[], float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI, bool FAST_FCOLL_TABLES
){
double overdense_val;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION;
double overdense_small_high, overdense_small_low;
float Mmin,Mmax,Mlim_Fstar,sigma2,Mlim_Fstar_MINI;
int i,j,k,i_tot;
float ln_10;
ln_10 = log(10);
Mmin = global_params.M_MIN_INTEGRAL;
Mmax = RtoM(R[Nfilter-1]);
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
Mmin = log(Mmin);
for (i=0; i<NSFR_high;i++) {
overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
float MassBinLow;
int MassBin;
for (j=0; j < Nfilter; j++) {
Mmax = RtoM(R[j]);
initialiseGL_Nion_Xray(NGL_SFR, global_params.M_MIN_INTEGRAL, Mmax);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
if(min_density[j]*growthf[j] < -1.) {
overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
}
else {
overdense_small_low = min_density[j]*growthf[j];
}
overdense_small_high = max_density[j]*growthf[j];
if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION;
}
for (i=0; i<NSFR_low; i++) {
overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
overdense_low_table[i] = pow(10.,overdense_val);
}
#pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\
log10_SFRD_z_low_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI,ln_10) \
private(i,k) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
log10_SFRD_z_low_table[j][i] = log10(GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES));
if(log10_SFRD_z_low_table[j][i] < -50.){
log10_SFRD_z_low_table[j][i] = -50.;
}
log10_SFRD_z_low_table[j][i] += 10.0;
log10_SFRD_z_low_table[j][i] *= ln_10;
for (k=0; k<NMTURN; k++){
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover[k], Mcrit_atom[j],Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES));
if(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] < -50.){
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = -50.;
}
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] += 10.0;
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
if(isfinite(log10_SFRD_z_low_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (k=0; k<NMTURN; k++){
if(isfinite(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,\
Mlim_Fstar,SFRD_z_high_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \
private(i,k) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],\
Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
if (SFRD_z_high_table[j][i] < 1e-50){
SFRD_z_high_table[j][i] = 1e-50;
}
SFRD_z_high_table[j][i] *= pow(10., 10.0);
for (k=0; k<NMTURN; k++){
SFRD_z_high_table_MINI[j][i+k*NSFR_high] = Nion_ConditionalM_MINI(growthf[j],Mmin,Mmax,sigma2,Deltac,\
overdense_high_table[i],MassTurnover[k],Mcrit_atom[j],\
Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES);
if (SFRD_z_high_table_MINI[j][i+k*NSFR_high] < 1e-50){
SFRD_z_high_table_MINI[j][i+k*NSFR_high] = 1e-50;
}
}
}
}
for(i=0;i<NSFR_high;i++) {
if(isfinite(SFRD_z_high_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (k=0; k<NMTURN; k++){
if(isfinite(SFRD_z_high_table_MINI[j][i+k*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
}
// The volume filling factor at a given redshift, Q(z), or find redshift at a given Q, z(Q).
//
// The evolution of Q can be written as
// dQ/dt = n_{ion}/dt - Q/t_{rec},
// where n_{ion} is the number of ionizing photons per baryon. The averaged recombination time is given by
// t_{rec} ~ 0.93 Gyr * (C_{HII}/3)^-1 * (T_0/2e4 K)^0.7 * ((1+z)/7)^-3.
// We assume the clumping factor of C_{HII}=3 and the IGM temperature of T_0 = 2e4 K, following
// Section 2.1 of Kuhlen & Faucher-Gigue`re (2012) MNRAS, 423, 862 and references therein.
// 1) initialise interpolation table
// -> initialise_Q_value_spline(NoRec, M_TURN, ALPHA_STAR, ALPHA_ESC, F_STAR10, F_ESC10)
// NoRec = 0: Compute dQ/dt with the recombination time.
// NoRec = 1: Ignore recombination.
// 2) find Q value at a given z -> Q_at_z(z, &(Q))
// or find z at a given Q -> z_at_Q(Q, &(z)).
// 3) free memory allocation -> free_Q_value()
// Set up interpolation table for the volume filling factor, Q, at a given redshift z and redshift at a given Q.
int InitialisePhotonCons(struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct AstroParams *astro_params, struct FlagOptions *flag_options)
{
/*
This is an API-level function for initialising the photon conservation.
*/
int status;
Try{ // this try wraps the whole function.
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
init_ps();
// To solve differentail equation, uses Euler's method.
// NOTE:
// (1) With the fiducial parameter set,
// when the Q value is < 0.9, the difference is less than 5% compared with accurate calculation.
// When Q ~ 0.98, the difference is ~25%. To increase accuracy one can reduce the step size 'da', but it will increase computing time.
// (2) With the fiducial parameter set,
// the difference for the redshift where the reionization end (Q = 1) is ~0.2 % compared with accurate calculation.
float ION_EFF_FACTOR,M_MIN,M_MIN_z0,M_MIN_z1,Mlim_Fstar, Mlim_Fesc;
double a_start = 0.03, a_end = 1./(1. + global_params.PhotonConsEndCalibz); // Scale factors of 0.03 and 0.17 correspond to redshifts of ~32 and ~5.0, respectively.
double C_HII = 3., T_0 = 2e4;
double reduce_ratio = 1.003;
double Q0,Q1,Nion0,Nion1,Trec,da,a,z0,z1,zi,dadt,ans,delta_a,zi_prev,Q1_prev;
double *z_arr,*Q_arr;
int Nmax = 2000; // This is the number of step, enough with 'da = 2e-3'. If 'da' is reduced, this number should be checked.
int cnt, nbin, i, istart;
int fail_condition, not_mono_increasing, num_fails;
int gsl_status;
z_arr = calloc(Nmax,sizeof(double));
Q_arr = calloc(Nmax,sizeof(double));
//set the minimum source mass
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10;
M_MIN = astro_params->M_TURN/50.;
Mlim_Fstar = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_STAR, astro_params->F_STAR10);
Mlim_Fesc = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_ESC, astro_params->F_ESC10);
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN,1e20);
}
}
else {
ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR;
}
fail_condition = 1;
num_fails = 0;
// We are going to come up with the analytic curve for the photon non conservation correction
// This can be somewhat numerically unstable and as such we increase the sampling until it works
// If it fails to produce a monotonically increasing curve (for Q as a function of z) after 10 attempts we crash out
while(fail_condition!=0) {
a = a_start;
if(num_fails < 3) {
da = 3e-3 - ((double)num_fails)*(1e-3);
}
else {
da = 1e-3 - ((double)num_fails - 2.)*(1e-4);
}
delta_a = 1e-7;
zi_prev = Q1_prev = 0.;
not_mono_increasing = 0;
if(num_fails>0) {
for(i=0;i<Nmax;i++) {
z_arr[i] = 0.;
Q_arr[i] = 0.;
}
}
cnt = 0;
Q0 = 0.;
while (a < a_end) {
zi = 1./a - 1.;
z0 = 1./(a+delta_a) - 1.;
z1 = 1./(a-delta_a) - 1.;
// Ionizing emissivity (num of photons per baryon)
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
Nion0 = ION_EFF_FACTOR*Nion_General(z0, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10,
Mlim_Fstar, Mlim_Fesc);
Nion1 = ION_EFF_FACTOR*Nion_General(z1, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10,
Mlim_Fstar, Mlim_Fesc);
}
else {
//set the minimum source mass
if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM
M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 1.22);
M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 1.22);
}
else { // ionized IGM
M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 0.6);
M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 0.6);
}
if(M_MIN_z0 < M_MIN_z1) {
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z0),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN_z0,1e20);
}
}
else {
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z1),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN_z1,1e20);
}
}
Nion0 = ION_EFF_FACTOR*FgtrM_General(z0,M_MIN_z0);
Nion1 = ION_EFF_FACTOR*FgtrM_General(z1,M_MIN_z1);
freeSigmaMInterpTable();
}
// With scale factor a, the above equation is written as dQ/da = n_{ion}/da - Q/t_{rec}*(dt/da)
if (!global_params.RecombPhotonCons) {
Q1 = Q0 + ((Nion0-Nion1)/2/delta_a)*da; // No Recombination
}
else {
dadt = Ho*sqrt(cosmo_params_ps->OMm/a + global_params.OMr/a/a + cosmo_params_ps->OMl*a*a); // da/dt = Ho*a*sqrt(OMm/a^3 + OMr/a^4 + OMl)
Trec = 0.93 * 1e9 * SperYR * pow(C_HII/3.,-1) * pow(T_0/2e4,0.7) * pow((1.+zi)/7.,-3);
Q1 = Q0 + ((Nion0-Nion1)/2./delta_a - Q0/Trec/dadt)*da;
}
// Curve is no longer monotonically increasing, we are going to have to exit and start again
if(Q1 < Q1_prev) {
not_mono_increasing = 1;
break;
}
zi_prev = zi;
Q1_prev = Q1;
z_arr[cnt] = zi;
Q_arr[cnt] = Q1;
cnt = cnt + 1;
if (Q1 >= 1.0) break; // if fully ionized, stop here.
// As the Q value increases, the bin size decreases gradually because more accurate calculation is required.
if (da < 7e-5) da = 7e-5; // set minimum bin size.
else da = pow(da,reduce_ratio);
Q0 = Q1;
a = a + da;
}
// A check to see if we ended up with a monotonically increasing function
if(not_mono_increasing==0) {
fail_condition = 0;
}
else {
num_fails += 1;
if(num_fails>10) {
LOG_ERROR("Failed too many times.");
// Throw ParameterError;
Throw(PhotonConsError);
}
}
}
cnt = cnt - 1;
istart = 0;
for (i=1;i<cnt;i++){
if (Q_arr[i-1] == 0. && Q_arr[i] != 0.) istart = i-1;
}
nbin = cnt - istart;
N_analytic = nbin;
// initialise interploation Q as a function of z
z_Q = calloc(nbin,sizeof(double));
Q_value = calloc(nbin,sizeof(double));
Q_at_z_spline_acc = gsl_interp_accel_alloc ();
Q_at_z_spline = gsl_spline_alloc (gsl_interp_cspline, nbin);
for (i=0; i<nbin; i++){
z_Q[i] = z_arr[cnt-i];
Q_value[i] = Q_arr[cnt-i];
}
gsl_set_error_handler_off();
gsl_status = gsl_spline_init(Q_at_z_spline, z_Q, Q_value, nbin);
GSL_ERROR(gsl_status);
Zmin = z_Q[0];
Zmax = z_Q[nbin-1];
Qmin = Q_value[nbin-1];
Qmax = Q_value[0];
// initialise interpolation z as a function of Q
double *Q_z = calloc(nbin,sizeof(double));
double *z_value = calloc(nbin,sizeof(double));
z_at_Q_spline_acc = gsl_interp_accel_alloc ();
z_at_Q_spline = gsl_spline_alloc (gsl_interp_linear, nbin);
for (i=0; i<nbin; i++){
Q_z[i] = Q_value[nbin-1-i];
z_value[i] = z_Q[nbin-1-i];
}
gsl_status = gsl_spline_init(z_at_Q_spline, Q_z, z_value, nbin);
GSL_ERROR(gsl_status);
free(z_arr);
free(Q_arr);
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
freeSigmaMInterpTable;
}
LOG_DEBUG("Initialised PhotonCons.");
} // End of try
Catch(status){
return status;
}
return(0);
}
// Function to construct the spline for the calibration curve of the photon non-conservation
int PhotonCons_Calibration(double *z_estimate, double *xH_estimate, int NSpline){
int status;
Try{
if(xH_estimate[NSpline-1] > 0.0 && xH_estimate[NSpline-2] > 0.0 && xH_estimate[NSpline-3] > 0.0 && xH_estimate[0] <= global_params.PhotonConsStart) {
initialise_NFHistory_spline(z_estimate,xH_estimate,NSpline);
}
}
Catch(status){
return status;
}
return(0);
}
// Function callable from Python to know at which redshift to start sampling the calibration curve (to minimise function calls)
int ComputeZstart_PhotonCons(double *zstart) {
int status;
double temp;
Try{
if((1.-global_params.PhotonConsStart) > Qmax) {
// It is possible that reionisation never even starts
// Just need to arbitrarily set a high redshift to perform the algorithm
temp = 20.;
}
else {
z_at_Q(1. - global_params.PhotonConsStart,&(temp));
// Multiply the result by 10 per-cent to fix instances when this isn't high enough
temp *= 1.1;
}
}
Catch(status){
return(status); // Use the status to determine if something went wrong.
}
*zstart = temp;
return(0);
}
void determine_deltaz_for_photoncons() {
int i, j, increasing_val, counter, smoothing_int;
double temp;
float z_cal, z_analytic, NF_sample, returned_value, NF_sample_min, gradient_analytic, z_analytic_at_endpoint, const_offset, z_analytic_2, smoothing_width;
float bin_width, delta_NF, val1, val2, extrapolated_value;
LOG_DEBUG("Determining deltaz for photon cons.");
// Number of points for determine the delta z correction of the photon non-conservation
N_NFsamples = 100;
// Determine the change in neutral fraction to calculate the gradient for the linear extrapolation of the photon non-conservation correction
delta_NF = 0.025;
// A width (in neutral fraction data points) in which point we average over to try and avoid sharp features in the correction (removes some kinks)
// Effectively acts as filtering step
smoothing_width = 35.;
// The photon non-conservation correction has a threshold (in terms of neutral fraction; global_params.PhotonConsEnd) for which we switch
// from using the exact correction between the calibrated (21cmFAST all flag options off) to analytic expression to some extrapolation.
// This threshold is required due to the behaviour of 21cmFAST at very low neutral fractions, which cause extreme behaviour with recombinations on
// A lot of the steps and choices are not completely rubust, just chosed to smooth/average the data to have smoother resultant reionisation histories
// Determine the number of extrapolated points required, if required at all.
if(calibrated_NF_min < global_params.PhotonConsEnd) {
// We require extrapolation, set minimum point to the threshold, and extrapolate beyond.
NF_sample_min = global_params.PhotonConsEnd;
// Determine the number of extrapolation points (to better smooth the correction) between the threshod (global_params.PhotonConsEnd) and a
// point close to zero neutral fraction (set by global_params.PhotonConsAsymptoteTo)
// Choice is to get the delta neutral fraction between extrapolated points to be similar to the cadence in the exact correction
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - calibrated_NF_min)/( global_params.PhotonConsStart - NF_sample_min );
}
else {
N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - global_params.PhotonConsAsymptoteTo)/( global_params.PhotonConsStart - NF_sample_min );
}
N_extrapolated = (int)floor( N_extrapolated ) - 1; // Minus one as the zero point is added below
}
else {
// No extrapolation required, neutral fraction never reaches zero
NF_sample_min = calibrated_NF_min;
N_extrapolated = 0;
}
// Determine the bin width for the sampling of the neutral fraction for the correction
bin_width = ( global_params.PhotonConsStart - NF_sample_min )/((float)N_NFsamples - 1.);
// allocate memory for arrays required to determine the photon non-conservation correction
deltaz = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
deltaz_smoothed = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
NeutralFractions = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
// Go through and fill the data points (neutral fraction and corresponding delta z between the calibrated and analytic curves).
for(i=0;i<N_NFsamples;i++) {
NF_sample = NF_sample_min + bin_width*(float)i;
// Determine redshift given a neutral fraction for the calibration curve
z_at_NFHist(NF_sample,&(temp));
z_cal = temp;
// Determine redshift given a neutral fraction for the analytic curve
z_at_Q(1. - NF_sample,&(temp));
z_analytic = temp;
deltaz[i+1+N_extrapolated] = fabs( z_cal - z_analytic );
NeutralFractions[i+1+N_extrapolated] = NF_sample;
}
// Determining the end-point (lowest neutral fraction) for the photon non-conservation correction
if(calibrated_NF_min >= global_params.PhotonConsEnd) {
increasing_val = 0;
counter = 0;
// Check if all the values of delta z are increasing
for(i=0;i<(N_NFsamples-1);i++) {
if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) {
counter += 1;
}
}
// If all the values of delta z are increasing, then some of the smoothing of the correction done below cannot be performed
if(counter==(N_NFsamples-1)) {
increasing_val = 1;
}
// Since we never have reionisation, need to set an appropriate end-point for the correction
// Take some fraction of the previous point to determine the end-point
NeutralFractions[0] = 0.999*NF_sample_min;
if(increasing_val) {
// Values of delta z are always increasing with decreasing neutral fraction thus make the last point slightly larger
deltaz[0] = 1.001*deltaz[1];
}
else {
// Values of delta z are always decreasing with decreasing neutral fraction thus make the last point slightly smaller
deltaz[0] = 0.999*deltaz[1];
}
}
else {
// Ok, we are going to be extrapolating the photon non-conservation (delta z) beyond the threshold
// Construct a linear curve for the analytic function to extrapolate to the new endpoint
// The choice for doing so is to ensure the corrected reionisation history is mostly smooth, and doesn't
// artificially result in kinks due to switching between how the delta z should be calculated
z_at_Q(1. - (NeutralFractions[1+N_extrapolated] + delta_NF),&(temp));
z_analytic = temp;
z_at_Q(1. - NeutralFractions[1+N_extrapolated],&(temp));
z_analytic_2 = temp;
// determine the linear curve
// Multiplitcation by 1.1 is arbitrary but effectively smooths out most kinks observed in the resultant corrected reionisation histories
gradient_analytic = 1.1*( delta_NF )/( z_analytic - z_analytic_2 );
const_offset = ( NeutralFractions[1+N_extrapolated] + delta_NF ) - gradient_analytic * z_analytic;
// determine the extrapolation end point
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
extrapolated_value = calibrated_NF_min;
}
else {
extrapolated_value = global_params.PhotonConsAsymptoteTo;
}
// calculate the delta z for the extrapolated end point
z_at_NFHist(extrapolated_value,&(temp));
z_cal = temp;
z_analytic_at_endpoint = ( extrapolated_value - const_offset )/gradient_analytic ;
deltaz[0] = fabs( z_cal - z_analytic_at_endpoint );
NeutralFractions[0] = extrapolated_value;
// If performing extrapolation, add in all the extrapolated points between the end-point and the threshold to end the correction (global_params.PhotonConsEnd)
for(i=0;i<N_extrapolated;i++) {
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
NeutralFractions[i+1] = calibrated_NF_min + (NF_sample_min - calibrated_NF_min)*(float)(i+1)/((float)N_extrapolated + 1.);
}
else {
NeutralFractions[i+1] = global_params.PhotonConsAsymptoteTo + (NF_sample_min - global_params.PhotonConsAsymptoteTo)*(float)(i+1)/((float)N_extrapolated + 1.);
}
deltaz[i+1] = deltaz[0] + ( deltaz[1+N_extrapolated] - deltaz[0] )*(float)(i+1)/((float)N_extrapolated + 1.);
}
}
// We have added the extrapolated values, now check if they are all increasing or not (again, to determine whether or not to try and smooth the corrected curve
increasing_val = 0;
counter = 0;
for(i=0;i<(N_NFsamples-1);i++) {
if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) {
counter += 1;
}
}
if(counter==(N_NFsamples-1)) {
increasing_val = 1;
}
// For some models, the resultant delta z for extremely high neutral fractions ( > 0.95) seem to oscillate or sometimes drop in value.
// This goes through and checks if this occurs, and tries to smooth this out
// This doesn't occur very often, but can cause an artificial drop in the reionisation history (neutral fraction value) connecting the
// values before/after the photon non-conservation correction starts.
for(i=0;i<(N_NFsamples+N_extrapolated);i++) {
val1 = deltaz[i];
val2 = deltaz[i+1];
counter = 0;
// Check if we have a neutral fraction above 0.95, that the values are decreasing (val2 < val1), that we haven't sampled too many points (counter)
// and that the NF_sample_min is less than around 0.8. That is, if a reasonable fraction of the reionisation history is sampled.
while( NeutralFractions[i+1] > 0.95 && val2 < val1 && NF_sample_min < 0.8 && counter < 100) {
NF_sample = global_params.PhotonConsStart - 0.001*(counter+1);
// Determine redshift given a neutral fraction for the calibration curve
z_at_NFHist(NF_sample,&(temp));
z_cal = temp;
// Determine redshift given a neutral fraction for the analytic curve
z_at_Q(1. - NF_sample,&(temp));
z_analytic = temp;
// Determine the delta z
val2 = fabs( z_cal - z_analytic );
deltaz[i+1] = val2;
counter += 1;
// If after 100 samplings we couldn't get the value to increase (like it should), just modify it from the previous point.
if(counter==100) {
deltaz[i+1] = deltaz[i] * 1.01;
}
}
}
// Store the data in its intermediate state before averaging
for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) {
deltaz_smoothed[i] = deltaz[i];
}
// If we are not increasing for all values, we can smooth out some features in delta z when connecting the extrapolated delta z values
// compared to those from the exact correction (i.e. when we cross the threshold).
if(!increasing_val) {
for(i=0;i<(N_NFsamples+N_extrapolated);i++) {
val1 = deltaz[0];
val2 = deltaz[i+1];
counter = 0;
// Try and find a point which can be used to smooth out any dip in delta z as a function of neutral fraction.
// It can be flat, then drop, then increase. This smooths over this drop (removes a kink in the resultant reionisation history).
// Choice of 75 is somewhat arbitrary
while(val2 < val1 && (counter < 75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated))) {
counter += 1;
val2 = deltaz[i+1+counter];
deltaz_smoothed[i+1] = ( val1 + deltaz[1+(i+1)+counter] )/2.;
}
if(counter==75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated)) {
deltaz_smoothed[i+1] = deltaz[i+1];
}
}
}
// Here we effectively filter over the delta z as a function of neutral fraction to try and minimise any possible kinks etc. in the functional curve.
for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) {
// We are at the end-points, cannot smooth
if(i==0 || i==(N_NFsamples+N_extrapolated)) {
deltaz[i] = deltaz_smoothed[i];
}
else {
deltaz[i] = 0.;
// We are symmetrically smoothing, making sure we have the same number of data points either side of the point we are filtering over
// This determins the filter width when close to the edge of the data ranges
if( (i - (int)floor(smoothing_width/2.) ) < 0) {
smoothing_int = 2*( i ) + (int)((int)smoothing_width%2);
}
else if( (i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) > (N_NFsamples + N_extrapolated) ) {
smoothing_int = ((int)smoothing_width - 1) - 2*((i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) - (N_NFsamples + N_extrapolated) ) + (int)((int)smoothing_width%2);
}
else {
smoothing_int = (int)smoothing_width;
}
// Average (filter) over the delta z values to smooth the result
counter = 0;
for(j=0;j<(int)smoothing_width;j++) {
if(((i - (int)floor((float)smoothing_int/2.) + j)>=0) && ((i - (int)floor((float)smoothing_int/2.) + j) <= (N_NFsamples + N_extrapolated + 1)) && counter < smoothing_int ) {
deltaz[i] += deltaz_smoothed[i - (int)floor((float)smoothing_int/2.) + j];
counter += 1;
}
}
deltaz[i] /= (float)counter;
}
}
N_deltaz = N_NFsamples + N_extrapolated + 1;
// Now, we can construct the spline of the photon non-conservation correction (delta z as a function of neutral fraction)
deltaz_spline_for_photoncons_acc = gsl_interp_accel_alloc ();
deltaz_spline_for_photoncons = gsl_spline_alloc (gsl_interp_linear, N_NFsamples + N_extrapolated + 1);
gsl_set_error_handler_off();
int gsl_status;
gsl_status = gsl_spline_init(deltaz_spline_for_photoncons, NeutralFractions, deltaz, N_NFsamples + N_extrapolated + 1);
GSL_ERROR(gsl_status);
}
float adjust_redshifts_for_photoncons(
struct AstroParams *astro_params, struct FlagOptions *flag_options, float *redshift,
float *stored_redshift, float *absolute_delta_z
) {
int i, new_counter;
double temp;
float required_NF, adjusted_redshift, future_z, gradient_extrapolation, const_extrapolation, temp_redshift, check_required_NF;
LOG_DEBUG("Adjusting redshifts for photon cons.");
if(*redshift < global_params.PhotonConsEndCalibz) {
LOG_ERROR(
"You have passed a redshift (z = %f) that is lower than the enpoint of the photon non-conservation correction "\
"(global_params.PhotonConsEndCalibz = %f). If this behaviour is desired then set global_params.PhotonConsEndCalibz "\
"to a value lower than z = %f.",*redshift,global_params.PhotonConsEndCalibz,*redshift
);
// Throw(ParameterError);
Throw(PhotonConsError);
}
// Determine the neutral fraction (filling factor) of the analytic calibration expression given the current sampled redshift
Q_at_z(*redshift, &(temp));
required_NF = 1.0 - (float)temp;
// Find which redshift we need to sample in order for the calibration reionisation history to match the analytic expression
if(required_NF > global_params.PhotonConsStart) {
// We haven't started ionising yet, so keep redshifts the same
adjusted_redshift = *redshift;
*absolute_delta_z = 0.;
}
else if(required_NF<=global_params.PhotonConsEnd) {
// We have gone beyond the threshold for the end of the photon non-conservation correction
// Deemed to be roughly where the calibration curve starts to approach the analytic expression
if(FirstNF_Estimate <= 0. && required_NF <= 0.0) {
// Reionisation has already happened well before the calibration
adjusted_redshift = *redshift;
}
else {
// We have crossed the NF threshold for the photon conservation correction so now set to the delta z at the threshold
if(required_NF < global_params.PhotonConsAsymptoteTo) {
// This counts the number of times we have exceeded the extrapolated point and attempts to modify the delta z
// to try and make the function a little smoother
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, global_params.PhotonConsAsymptoteTo, deltaz_spline_for_photoncons_acc);
new_counter = 0;
temp_redshift = *redshift;
check_required_NF = required_NF;
// Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR
// In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling
while( check_required_NF < global_params.PhotonConsAsymptoteTo ) {
temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.);
Q_at_z(temp_redshift, &(temp));
check_required_NF = 1.0 - (float)temp;
new_counter += 1;
}
// Now adjust the final delta_z by some amount to smooth if over successive steps
if(deltaz[1] > deltaz[0]) {
*absolute_delta_z = pow( 0.96 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
else {
*absolute_delta_z = pow( 1.04 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
// Check if we go into the future (z < 0) and avoid it
adjusted_redshift = (*redshift) - (*absolute_delta_z);
if(adjusted_redshift < 0.0) {
adjusted_redshift = 0.0;
}
}
else {
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, required_NF, deltaz_spline_for_photoncons_acc);
adjusted_redshift = (*redshift) - (*absolute_delta_z);
}
}
}
else {
// Initialise the photon non-conservation correction curve
if(!photon_cons_allocated) {
determine_deltaz_for_photoncons();
photon_cons_allocated = true;
}
// We have exceeded even the end-point of the extrapolation
// Just smooth ever subsequent point
// Note that this is deliberately tailored to light-cone quantites, but will still work with co-eval cubes
// Though might produce some very minor discrepancies when comparing outputs.
if(required_NF < NeutralFractions[0]) {
new_counter = 0;
temp_redshift = *redshift;
check_required_NF = required_NF;
// Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR
// In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling
while( check_required_NF < NeutralFractions[0] ) {
temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.);
Q_at_z(temp_redshift, &(temp));
check_required_NF = 1.0 - (float)temp;
new_counter += 1;
}
if(new_counter > 5) {
LOG_WARNING(
"The photon non-conservation correction has employed an extrapolation for\n"\
"more than 5 consecutive snapshots. This can be unstable, thus please check "\
"resultant history. Parameters are:\n"
);
#if LOG_LEVEL >= LOG_WARNING
writeAstroParams(flag_options, astro_params);
#endif
}
// Now adjust the final delta_z by some amount to smooth if over successive steps
if(deltaz[1] > deltaz[0]) {
*absolute_delta_z = pow( 0.998 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
else {
*absolute_delta_z = pow( 1.002 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
// Check if we go into the future (z < 0) and avoid it
adjusted_redshift = (*redshift) - (*absolute_delta_z);
if(adjusted_redshift < 0.0) {
adjusted_redshift = 0.0;
}
}
else {
// Find the corresponding redshift for the calibration curve given the required neutral fraction (filling factor) from the analytic expression
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, (double)required_NF, deltaz_spline_for_photoncons_acc);
adjusted_redshift = (*redshift) - (*absolute_delta_z);
}
}
// keep the original sampled redshift
*stored_redshift = *redshift;
// This redshift snapshot now uses the modified redshift following the photon non-conservation correction
*redshift = adjusted_redshift;
}
void Q_at_z(double z, double *splined_value){
float returned_value;
if (z >= Zmax) {
*splined_value = 0.;
}
else if (z <= Zmin) {
*splined_value = 1.;
}
else {
returned_value = gsl_spline_eval(Q_at_z_spline, z, Q_at_z_spline_acc);
*splined_value = returned_value;
}
}
void z_at_Q(double Q, double *splined_value){
float returned_value;
if (Q < Qmin) {
LOG_ERROR("The minimum value of Q is %.4e",Qmin);
// Throw(ParameterError);
Throw(PhotonConsError);
}
else if (Q > Qmax) {
LOG_ERROR("The maximum value of Q is %.4e. Reionization ends at ~%.4f.",Qmax,Zmin);
LOG_ERROR("This error can occur if global_params.PhotonConsEndCalibz is close to "\
"the final sampled redshift. One can consider a lower value for "\
"global_params.PhotonConsEndCalibz to mitigate this");
// Throw(ParameterError);
Throw(PhotonConsError);
}
else {
returned_value = gsl_spline_eval(z_at_Q_spline, Q, z_at_Q_spline_acc);
*splined_value = returned_value;
}
}
void free_Q_value() {
gsl_spline_free (Q_at_z_spline);
gsl_interp_accel_free (Q_at_z_spline_acc);
gsl_spline_free (z_at_Q_spline);
gsl_interp_accel_free (z_at_Q_spline_acc);
}
void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline){
int i, counter, start_index, found_start_index;
// This takes in the data for the calibration curve for the photon non-conservation correction
counter = 0;
start_index = 0;
found_start_index = 0;
FinalNF_Estimate = NF_estimate[0];
FirstNF_Estimate = NF_estimate[NSpline-1];
// Determine the point in the data where its no longer zero (basically to avoid too many zeros in the spline)
for(i=0;i<NSpline-1;i++) {
if(NF_estimate[i+1] > NF_estimate[i]) {
if(found_start_index == 0) {
start_index = i;
found_start_index = 1;
}
}
counter += 1;
}
counter = counter - start_index;
N_calibrated = (counter+1);
// Store the data points for determining the photon non-conservation correction
nf_vals = calloc((counter+1),sizeof(double));
z_vals = calloc((counter+1),sizeof(double));
calibrated_NF_min = 1.;
// Store the data, and determine the end point of the input data for estimating the extrapolated results
for(i=0;i<(counter+1);i++) {
nf_vals[i] = NF_estimate[start_index+i];
z_vals[i] = redshifts[start_index+i];
// At the extreme high redshift end, there can be numerical issues with the solution of the analytic expression
if(i>0) {
while(nf_vals[i] <= nf_vals[i-1]) {
nf_vals[i] += 0.000001;
}
}
if(nf_vals[i] < calibrated_NF_min) {
calibrated_NF_min = nf_vals[i];
}
}
NFHistory_spline_acc = gsl_interp_accel_alloc ();
// NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1));
NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1));
gsl_set_error_handler_off();
int gsl_status;
gsl_status = gsl_spline_init(NFHistory_spline, nf_vals, z_vals, (counter+1));
GSL_ERROR(gsl_status);
z_NFHistory_spline_acc = gsl_interp_accel_alloc ();
// z_NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1));
z_NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1));
gsl_status = gsl_spline_init(z_NFHistory_spline, z_vals, nf_vals, (counter+1));
GSL_ERROR(gsl_status);
}
void z_at_NFHist(double xHI_Hist, double *splined_value){
float returned_value;
returned_value = gsl_spline_eval(NFHistory_spline, xHI_Hist, NFHistory_spline_acc);
*splined_value = returned_value;
}
void NFHist_at_z(double z, double *splined_value){
float returned_value;
returned_value = gsl_spline_eval(z_NFHistory_spline, z, NFHistory_spline_acc);
*splined_value = returned_value;
}
int ObtainPhotonConsData(
double *z_at_Q_data, double *Q_data, int *Ndata_analytic, double *z_cal_data,
double *nf_cal_data, int *Ndata_calibration,
double *PhotonCons_NFdata, double *PhotonCons_deltaz, int *Ndata_PhotonCons) {
int i;
*Ndata_analytic = N_analytic;
*Ndata_calibration = N_calibrated;
*Ndata_PhotonCons = N_deltaz;
for(i=0;i<N_analytic;i++) {
z_at_Q_data[i] = z_Q[i];
Q_data[i] = Q_value[i];
}
for(i=0;i<N_calibrated;i++) {
z_cal_data[i] = z_vals[i];
nf_cal_data[i] = nf_vals[i];
}
for(i=0;i<N_deltaz;i++) {
PhotonCons_NFdata[i] = NeutralFractions[i];
PhotonCons_deltaz[i] = deltaz[i];
}
return(0);
}
void FreePhotonConsMemory() {
LOG_DEBUG("Freeing some photon cons memory.");
free(deltaz);
free(deltaz_smoothed);
free(NeutralFractions);
free(z_Q);
free(Q_value);
free(nf_vals);
free(z_vals);
free_Q_value();
gsl_spline_free (NFHistory_spline);
gsl_interp_accel_free (NFHistory_spline_acc);
gsl_spline_free (z_NFHistory_spline);
gsl_interp_accel_free (z_NFHistory_spline_acc);
gsl_spline_free (deltaz_spline_for_photoncons);
gsl_interp_accel_free (deltaz_spline_for_photoncons_acc);
LOG_DEBUG("Done Freeing photon cons memory.");
photon_cons_allocated = false;
}
void FreeTsInterpolationTables(struct FlagOptions *flag_options) {
LOG_DEBUG("Freeing some interpolation table memory.");
freeSigmaMInterpTable();
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
free(z_val); z_val = NULL;
free(Nion_z_val);
free(z_X_val); z_X_val = NULL;
free(SFRD_val);
if (flag_options->USE_MINI_HALOS){
free(Nion_z_val_MINI);
free(SFRD_val_MINI);
}
}
else{
free(FgtrM_1DTable_linear);
}
LOG_DEBUG("Done Freeing interpolation table memory.");
interpolation_tables_allocated = false;
}
|
State Before: 𝕜 : Type u_1
inst✝⁴ : NontriviallyNormedField 𝕜
F : Type u_2
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace 𝕜 F
E : Type ?u.53911
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
n✝ : ℕ
f : 𝕜 → F
s : Set 𝕜
x : 𝕜
n : ℕ∞
m : ℕ
h : ContDiffWithinAt 𝕜 n f s x
hmn : ↑m < n
hs : UniqueDiffOn 𝕜 (insert x s)
⊢ DifferentiableWithinAt 𝕜 (iteratedDerivWithin m f s) s x State After: no goals Tactic: simpa only [iteratedDerivWithin_eq_equiv_comp,
LinearIsometryEquiv.comp_differentiableWithinAt_iff] using
h.differentiableWithinAt_iteratedFDerivWithin hmn hs
|
-- Inductively constructed substitution maps
module SOAS.ContextMaps.Inductive {T : Set} where
open import SOAS.Common
open import SOAS.Context
open import SOAS.Sorting
open import SOAS.Families.Core {T}
open import SOAS.Variable
private
variable
α : T
Γ Δ : Ctx
𝒳 𝒴 : Familyₛ
-- A list of terms in context Δ for every variable in context Γ
data Sub (𝒳 : Familyₛ) : Ctx → Ctx → Set where
• : Sub 𝒳 ∅ Δ
_◂_ : 𝒳 α Δ → Sub 𝒳 Γ Δ → Sub 𝒳 (α ∙ Γ) Δ
infixl 120 _◂_
infix 150 _⟩
pattern _⟩ t = t ◂ •
-- Functorial mapping
Sub₁ : (f : 𝒳 ⇾̣ 𝒴) → Sub 𝒳 Γ Δ → Sub 𝒴 Γ Δ
Sub₁ f • = •
Sub₁ f (x ◂ σ) = f x ◂ Sub₁ f σ
-- Conversion between inductive substitutions and context maps
module _ {𝒳 : Familyₛ} where
index : Sub 𝒳 Γ Δ → Γ ~[ 𝒳 ]↝ Δ
index • ()
index (t ◂ σ) new = t
index (t ◂ σ) (old v) = index σ v
tabulate : Γ ~[ 𝒳 ]↝ Δ → Sub 𝒳 Γ Δ
tabulate {Γ = ∅} σ = •
tabulate {Γ = α ∙ Γ} σ = σ new ◂ tabulate (σ ∘ old)
ix∘tab≈id : (σ : Γ ~[ 𝒳 ]↝ Δ) (v : ℐ α Γ)
→ index (tabulate σ) v ≡ σ v
ix∘tab≈id {Γ = α ∙ Γ} σ new = refl
ix∘tab≈id {Γ = α ∙ Γ} σ (old v) = ix∘tab≈id (σ ∘ old) v
tab∘ix≈id : (σ : Sub 𝒳 Γ Δ) → tabulate (index σ) ≡ σ
tab∘ix≈id • = refl
tab∘ix≈id (x ◂ σ) rewrite tab∘ix≈id σ = refl
-- Naturality conditions
tabulate-nat : (f : 𝒳 ⇾̣ 𝒴)(σ : Γ ~[ 𝒳 ]↝ Δ)
→ tabulate {𝒴} (f ∘ σ) ≡ Sub₁ f (tabulate {𝒳} σ)
tabulate-nat {Γ = ∅} f σ = refl
tabulate-nat {Γ = α ∙ Γ} f σ = cong (f (σ new) ◂_) (tabulate-nat f (σ ∘ old))
index-nat : (f : 𝒳 ⇾̣ 𝒴)(σ : Sub 𝒳 Γ Δ)(v : ℐ α Γ)
→ index (Sub₁ f σ) v ≡ f (index σ v)
index-nat f (x ◂ σ) new = refl
index-nat f (x ◂ σ) (old v) = index-nat f σ v
|
lemma connected_Ioo[simp]: "connected {a<..<b}" for a b :: "'a::linear_continuum_topology"
|
## REMEMBER TO UPDATE TESTS FOR BOTH THE N-GMRES and the O-ACCEL TEST SETS
@testset "N-GMRES" begin
method = NGMRES
solver = method()
skip = ("Trigonometric", )
run_optim_tests(solver; skip = skip,
iteration_exceptions = (("Penalty Function I", 10000), ),
show_name = debug_printing)
# Specialized tests
prob = MVP.UnconstrainedProblems.examples["Rosenbrock"]
df = OnceDifferentiable(MVP.objective(prob),
MVP.gradient(prob),
prob.initial_x)
@test solver.nlpreconopts.iterations == 1
@test solver.nlpreconopts.allow_f_increases == true
defopts = Optim.default_options(solver)
@test defopts == Dict(:allow_f_increases => true)
state = Optim.initial_state(solver, Optim.Options(;defopts...), df,
prob.initial_x)
@test state.x === state.nlpreconstate.x
@test state.x_previous === state.nlpreconstate.x_previous
@test size(state.X) == (length(state.x), solver.wmax)
@test size(state.R) == (length(state.x), solver.wmax)
@test size(state.Q) == (solver.wmax, solver.wmax)
@test size(state.ξ) == (solver.wmax,)
@test state.curw == 1
@test size(state.A) == (solver.wmax, solver.wmax)
@test length(state.b) == solver.wmax
@test length(state.xA) == length(state.x)
# Test that tracing doesn't throw errors
res = optimize(df, prob.initial_x, solver,
Optim.Options(extended_trace=true, store_trace=true;
defopts...))
@test Optim.converged(res)
# The bounds are due to different systems behaving differently
# TODO: is it a bad idea to hardcode these?
@test 64 < Optim.iterations(res) < 84
@test 234 < Optim.f_calls(res) < 286
@test 234 < Optim.g_calls(res) < 286
@test Optim.minimum(res) < 1e-10
@test_throws AssertionError method(manifold=Optim.Sphere(), nlprecon = GradientDescent())
for nlprec in (LBFGS, BFGS)
solver = method(nlprecon=nlprec())
clear!(df)
res = optimize(df, prob.initial_x, solver)
if !Optim.converged(res)
display(res)
end
@test Optim.converged(res)
@test Optim.minimum(res) < 1e-10
end
# O-ACCEL handles the InitialConstantChange functionality in a special way,
# so we should test that it works well.
for nlprec in (GradientDescent(),
GradientDescent(alphaguess = LineSearches.InitialConstantChange()))
solver = method(nlprecon = nlprec,
alphaguess = LineSearches.InitialConstantChange())
clear!(df)
res = optimize(df, prob.initial_x, solver)
if !Optim.converged(res)
display(res)
end
@test Optim.converged(res)
@test Optim.minimum(res) < 1e-10
end
end
@testset "O-ACCEL" begin
method = OACCEL
solver = method()
skip = ("Trigonometric", )
run_optim_tests(solver; skip = skip,
iteration_exceptions = (("Penalty Function I", 10000), ),
show_name = debug_printing)
prob = MVP.UnconstrainedProblems.examples["Rosenbrock"]
df = OnceDifferentiable(MVP.objective(prob),
MVP.gradient(prob),
prob.initial_x)
@test solver.nlpreconopts.iterations == 1
@test solver.nlpreconopts.allow_f_increases == true
defopts = Optim.default_options(solver)
@test defopts == Dict(:allow_f_increases => true)
state = Optim.initial_state(solver, Optim.Options(;defopts...), df,
prob.initial_x)
@test state.x === state.nlpreconstate.x
@test state.x_previous === state.nlpreconstate.x_previous
@test size(state.X) == (length(state.x), solver.wmax)
@test size(state.R) == (length(state.x), solver.wmax)
@test size(state.Q) == (solver.wmax, solver.wmax)
@test size(state.ξ) == (solver.wmax, 2)
@test state.curw == 1
@test size(state.A) == (solver.wmax, solver.wmax)
@test length(state.b) == solver.wmax
@test length(state.xA) == length(state.x)
# Test that tracing doesn't throw errors
res = optimize(df, prob.initial_x, solver,
Optim.Options(extended_trace=true, store_trace=true;
defopts...))
@test Optim.converged(res)
# The bounds are due to different systems behaving differently
# TODO: is it a bad idea to hardcode these?
@test 72 < Optim.iterations(res) < 88
@test 245 < Optim.f_calls(res) < 292
@test 245 < Optim.g_calls(res) < 292
@test Optim.minimum(res) < 1e-10
@test_throws AssertionError method(manifold=Optim.Sphere(), nlprecon = GradientDescent())
for nlprec in (LBFGS, BFGS)
solver = method(nlprecon=nlprec())
clear!(df)
res = optimize(df, prob.initial_x, solver)
if !Optim.converged(res)
display(res)
end
@test Optim.converged(res)
@test Optim.minimum(res) < 1e-10
end
# O-ACCEL handles the InitialConstantChange functionality in a special way,
# so we should test that it works well.
for nlprec in (GradientDescent(),
GradientDescent(alphaguess = LineSearches.InitialConstantChange()))
solver = method(nlprecon = nlprec,
alphaguess = LineSearches.InitialConstantChange())
clear!(df)
res = optimize(df, prob.initial_x, solver)
if !Optim.converged(res)
display(res)
end
@test Optim.converged(res)
@test Optim.minimum(res) < 1e-10
end
end
|
(* Author: Tobias Nipkow *)
subsection \<open>Transfer of Tree Analysis to List Representation\<close>
theory Pairing_Heap_List1_Analysis2
imports
Pairing_Heap_List1_Analysis
Pairing_Heap_Tree_Analysis
begin
text\<open>This theory transfers the amortized analysis of the tree-based
pairing heaps to Okasaki's pairing heaps.\<close>
abbreviation "is_root' == Pairing_Heap_List1_Analysis.is_root"
abbreviation "del_min' == Pairing_Heap_List1.del_min"
abbreviation "insert' == Pairing_Heap_List1.insert"
abbreviation "merge' == Pairing_Heap_List1.merge"
abbreviation "pass\<^sub>1' == Pairing_Heap_List1.pass\<^sub>1"
abbreviation "pass\<^sub>2' == Pairing_Heap_List1.pass\<^sub>2"
abbreviation "T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>1' == Pairing_Heap_List1_Analysis.T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>1"
abbreviation "T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>2' == Pairing_Heap_List1_Analysis.T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>2"
fun homs :: "'a heap list \<Rightarrow> 'a tree" where
"homs [] = Leaf" |
"homs (Hp x lhs # rhs) = Node (homs lhs) x (homs rhs)"
fun hom :: "'a heap \<Rightarrow> 'a tree" where
"hom heap.Empty = Leaf" |
"hom (Hp x hs) = (Node (homs hs) x Leaf)"
lemma homs_pass1': "no_Emptys hs \<Longrightarrow> homs(pass\<^sub>1' hs) = pass\<^sub>1 (homs hs)"
apply(induction hs rule: Pairing_Heap_List1.pass\<^sub>1.induct)
subgoal for h1 h2
apply(case_tac h1)
apply simp
apply(case_tac h2)
apply (auto)
done
apply simp
subgoal for h
apply(case_tac h)
apply (auto)
done
done
lemma hom_merge': "\<lbrakk> no_Emptys lhs; Pairing_Heap_List1_Analysis.is_root h\<rbrakk>
\<Longrightarrow> hom (merge' (Hp x lhs) h) = link \<langle>homs lhs, x, hom h\<rangle>"
by(cases h) auto
lemma hom_pass2': "no_Emptys hs \<Longrightarrow> hom(pass\<^sub>2' hs) = pass\<^sub>2 (homs hs)"
by(induction hs rule: homs.induct) (auto simp: hom_merge' is_root_pass2)
lemma del_min': "is_root' h \<Longrightarrow> hom(del_min' h) = del_min (hom h)"
by(cases h)
(auto simp: homs_pass1' hom_pass2' no_Emptys_pass1 is_root_pass2)
lemma insert': "is_root' h \<Longrightarrow> hom(insert' x h) = insert x (hom h)"
by(cases h)(auto)
lemma merge':
"\<lbrakk> is_root' h1; is_root' h2 \<rbrakk> \<Longrightarrow> hom(merge' h1 h2) = merge (hom h1) (hom h2)"
apply(cases h1)
apply(simp)
apply(cases h2)
apply(auto)
done
lemma T_pass1': "no_Emptys hs \<Longrightarrow> T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>1' hs = T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>1(homs hs)"
apply(induction hs rule: Pairing_Heap_List1.pass\<^sub>1.induct)
subgoal for h1 h2
apply(case_tac h1)
apply simp
apply(case_tac h2)
apply (auto)
done
apply simp
subgoal for h
apply(case_tac h)
apply (auto)
done
done
lemma T_pass2': "no_Emptys hs \<Longrightarrow> T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>2' hs = T\<^sub>p\<^sub>a\<^sub>s\<^sub>s\<^sub>2(homs hs)"
by(induction hs rule: homs.induct) (auto simp: hom_merge' is_root_pass2)
lemma size_hp: "is_root' h \<Longrightarrow> size_hp h = size (hom h)"
proof(induction h)
case (Hp _ hs) thus ?case
apply(induction hs rule: homs.induct)
apply simp
apply force
apply simp
done
qed simp
interpretation Amortized2
where arity = arity and exec = exec and inv = is_root
and cost = cost and \<Phi> = \<Phi> and U = U
and hom = hom
and exec' = Pairing_Heap_List1_Analysis.exec
and cost' = Pairing_Heap_List1_Analysis.cost and inv' = "is_root'"
and U' = Pairing_Heap_List1_Analysis.U
proof (standard, goal_cases)
case (1 _ f) thus ?case
by (cases f)(auto simp: merge' del_min' numeral_eq_Suc)
next
case (2 ts f)
show ?case
proof(cases f)
case [simp]: Del_min
then obtain h where [simp]: "ts = [h]" using 2 by auto
show ?thesis using 2
by(cases h) (auto simp: is_root_pass2 no_Emptys_pass1)
qed (insert 2,
auto simp: Pairing_Heap_List1_Analysis.is_root_merge numeral_eq_Suc)
next
case (3 t) thus ?case by (cases t) (auto)
next
case (4 ts f) show ?case
proof (cases f)
case [simp]: Del_min
then obtain h where [simp]: "ts = [h]" using 4 by auto
show ?thesis using 4
by (cases h)(auto simp: T_pass1' T_pass2' no_Emptys_pass1 homs_pass1')
qed (insert 4, auto)
next
case (5 _ f) thus ?case by(cases f) (auto simp: size_hp numeral_eq_Suc)
qed
end
|
#ifndef AST_STMT_H
#define AST_STMT_H
#include <boost/optional.hpp>
#include <memory>
#include <vector>
#include "ast.hpp"
#include "../typing/substitution.hpp"
namespace splicpp
{
class ast_id;
class ast_exp;
class ast_fun_call;
class symboltable;
class varcontext;
class typecontext;
class ircontext;
class ir_stmt;
class ast_stmt_mapper;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
/*
Ignore non-virtual-destructor warning
This is a bug in GCC4.6
See http://stackoverflow.com/questions/2571850/why-does-enable-shared-from-this-have-a-non-virtual-destructor
*/
class ast_stmt : public std::enable_shared_from_this<ast_stmt>, public ast
{
public:
enum ast_stmt_type
{
type_stmts,
type_if,
type_while,
type_assignment,
type_fun_call,
type_return
};
ast_stmt(const sloc sl)
: ast(sl)
{}
virtual void assign_ids(const varcontext& c) = 0;
virtual ast_stmt_type type() const = 0;
virtual void pretty_print(std::ostream& s, const uint tab) const = 0;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const = 0;
virtual bool contains_return() const = 0;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const = 0;
virtual void map(ast_stmt_mapper& m) const = 0;
};
#pragma GCC diagnostic pop
class ast_stmt_stmts : public ast_stmt
{
public:
const std::vector<s_ptr<ast_stmt>> stmts;
ast_stmt_stmts(const std::vector<s_ptr<ast_stmt>> stmts, const sloc sl)
: ast_stmt(sl)
, stmts(stmts)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
class ast_stmt_if : public ast_stmt
{
public:
const s_ptr<ast_exp> exp;
const s_ptr<ast_stmt> stmt_true;
const boost::optional<s_ptr<ast_stmt>> stmt_false;
ast_stmt_if(s_ptr<ast_exp> exp, s_ptr<ast_stmt> stmt_true, const sloc sl)
: ast_stmt(sl)
, exp(exp)
, stmt_true(stmt_true)
, stmt_false()
{}
ast_stmt_if(s_ptr<ast_exp> exp, s_ptr<ast_stmt> stmt_true, s_ptr<ast_stmt> stmt_false, const sloc sl)
: ast_stmt(sl)
, exp(exp)
, stmt_true(stmt_true)
, stmt_false(stmt_false)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
class ast_stmt_while : public ast_stmt
{
public:
const s_ptr<ast_exp> exp;
const s_ptr<ast_stmt> stmt;
ast_stmt_while(__decltype(exp) exp, __decltype(stmt) stmt, const sloc sl)
: ast_stmt(sl)
, exp(exp)
, stmt(stmt)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
class ast_stmt_assignment : public ast_stmt
{
public:
const s_ptr<ast_id> id;
const s_ptr<ast_exp> exp;
ast_stmt_assignment(__decltype(id) id, __decltype(exp) exp, const sloc sl)
: ast_stmt(sl)
, id(id)
, exp(exp)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
class ast_stmt_fun_call : public ast_stmt
{
public:
const s_ptr<ast_fun_call> f;
ast_stmt_fun_call(__decltype(f) f, const sloc sl)
: ast_stmt(sl)
, f(f)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
class ast_stmt_return : public ast_stmt
{
public:
const boost::optional<s_ptr<ast_exp>> exp;
ast_stmt_return(const sloc sl)
: ast_stmt(sl)
, exp()
{}
ast_stmt_return(__decltype(exp) exp, const sloc sl)
: ast_stmt(sl)
, exp(exp)
{}
virtual void assign_ids(const varcontext& c);
virtual ast_stmt_type type() const;
virtual void pretty_print(std::ostream& s, const uint tab) const;
virtual substitution infer_type(const typecontext& c, const s_ptr<const sl_type> t) const;
virtual bool contains_return() const;
virtual s_ptr<const ir_stmt> translate(const ircontext& c) const;
virtual void map(ast_stmt_mapper& m) const;
};
}
#endif
|
Require Import SOL.
Require Import Decidable Enumerable.
Require Import List.
Import ListNotations.
Inductive full_logic_sym : Type :=
| Conj : full_logic_sym
| Disj : full_logic_sym
| Impl : full_logic_sym.
Inductive full_logic_quant : Type :=
| All : full_logic_quant
| Ex : full_logic_quant.
Instance full_operators : operators :=
{| binop := full_logic_sym ; quantop := full_logic_quant |}.
Instance eqdec_full_logic_sym : eq_dec full_logic_sym.
Proof.
intros x y. unfold dec. decide equality.
Qed.
Instance eqdec_full_logic_quant : Decidable.eq_dec full_logic_quant.
Proof.
intros x y. unfold dec. decide equality.
Qed.
Definition L_binop (n : nat) := [Conj; Impl; Disj].
Instance enum_binop :
list_enumerator__T L_binop binop.
Proof.
intros []; exists 0; cbn; tauto.
Qed.
Definition L_quantop (n : nat) := [All; Ex].
Instance enum_quantop :
list_enumerator__T L_quantop quantop.
Proof.
intros []; exists 0; cbn; tauto.
Qed.
Lemma enumT_binop :
enumerable__T binop.
Proof.
apply enum_enumT. exists L_binop. apply enum_binop.
Qed.
Lemma enumT_quantop :
enumerable__T quantop.
Proof.
apply enum_enumT. exists L_quantop. apply enum_quantop.
Qed.
(** Mutliple quantors and closing operations *)
Require Import Lia.
Section Close.
Fixpoint iter {X} (f : X -> X) n x := match n with O => x | S n' => f (iter f n' x) end.
Lemma iter_switch {X} f n (x : X) :
iter f n (f x) = f (iter f n x).
Proof.
induction n; cbn; congruence.
Qed.
Context {Σf : funcs_signature}.
Context {Σp : preds_signature}.
Definition quant_indi_n op n := iter (@quant_indi _ _ full_operators op) n.
Definition quant_pred_n op ar n := iter (quant_pred op ar) n.
Definition close_indi op phi := quant_indi_n op (proj1_sig (find_bounded_indi phi)) phi.
Definition close_pred' ar op phi := quant_pred_n op ar (proj1_sig (find_bounded_pred ar phi)) phi.
Fixpoint close_pred'' n op phi := match n with 0 => phi | S n => close_pred' n op (close_pred'' n op phi) end.
Definition close_pred op phi := close_pred'' (find_arity_bound_p phi) op phi.
Definition close op phi := close_indi op (close_pred op phi).
Fixpoint forall_n n phi := match n with
| 0 => phi
| S n => quant_indi All (forall_n n phi)
end.
Lemma nat_ind_2 (P : nat -> nat -> Prop) :
(forall x, P x 0) -> (forall x y, P x y -> P y x -> P x (S y)) -> forall x y, P x y.
Proof.
intros H1 H2. intros x y. revert x. induction y.
- apply H1.
- intros x. apply H2. apply IHy. induction x; firstorder.
Qed.
Lemma close_indi_correct op phi :
bounded_indi 0 (close_indi op phi).
Proof.
enough (forall n m, bounded_indi n phi -> bounded_indi (n-m) (iter (quant_indi op) m phi)) as X.
{ unfold close_indi. destruct find_bounded_indi as [n H]. cbn.
specialize (X n n). replace (n-n) with 0 in X by lia. now apply X. }
apply (nat_ind_2 (fun n m => bounded_indi n phi -> bounded_indi (n - m) (iter (quant_indi op) m phi))); cbn.
- intros n B. now replace (n-0) with n by lia.
- intros n m IH _ B. eapply bounded_indi_up. 2: apply IH. lia. exact B.
Qed.
Lemma close_indi_bounded_pred ar n op phi :
bounded_pred ar n phi -> bounded_pred ar n (close_indi op phi).
Proof.
intros H. unfold close_indi. destruct find_bounded_indi as [b B]; cbn. clear B. now induction b.
Qed.
Lemma close_pred'_correct ar op phi :
bounded_pred ar 0 (close_pred' ar op phi).
Proof.
enough (forall n m, bounded_pred ar n phi -> bounded_pred ar (n-m) (iter (quant_pred op ar) m phi)) as X.
{ unfold close_pred'. destruct find_bounded_pred as [n H]. cbn.
specialize (X n n). replace (n-n) with 0 in X by lia. now apply X. }
apply (nat_ind_2 (fun n m => bounded_pred ar n phi -> bounded_pred ar (n - m) (iter (quant_pred op ar) m phi))); cbn.
- intros n B. now replace (n-0) with n by lia.
- intros n m IH _ B. left. split. reflexivity. eapply bounded_pred_up. 2: apply IH. lia. exact B.
Qed.
Lemma close_pred'_bounded_pred ar n m op phi :
bounded_pred ar n phi -> bounded_pred ar n (close_pred' m op phi).
Proof.
intros H. unfold close_pred'. destruct find_bounded_pred as [b B]. cbn.
clear B. induction b; cbn. easy. assert (ar = m \/ ar <> m) as [] by lia.
left. split. easy. eapply bounded_pred_up. 2: apply IHb. lia. now right.
Qed.
Lemma close_pred'_arity_bounded_p n m op phi :
arity_bounded_p n phi -> arity_bounded_p n (close_pred' m op phi).
Proof.
intros H. unfold close_pred'. destruct find_bounded_pred as [b B]. cbn.
clear B. now induction b.
Qed.
Lemma close_pred''_arity_bounded_p n m op phi :
arity_bounded_p n phi -> arity_bounded_p n (close_pred'' m op phi).
Proof.
intros H. induction m; cbn. easy. apply close_pred'_arity_bounded_p, IHm.
Qed.
Lemma close_pred_correct op phi :
forall ar, bounded_pred ar 0 (close_pred op phi).
Proof.
intros ar. assert (ar >= find_arity_bound_p phi \/ ar < find_arity_bound_p phi) as [H|H] by lia.
- eapply bounded_pred_arity_bound. 2: apply H.
apply close_pred''_arity_bounded_p, find_arity_bound_p_correct.
- revert H. enough (forall n, ar < n -> bounded_pred ar 0 (close_pred'' n op phi)) by eauto.
induction n. lia. intros H. assert (ar = n \/ ar < n) as [->|] by lia.
+ apply close_pred'_correct.
+ now apply close_pred'_bounded_pred, IHn.
Qed.
Lemma close_indi_funcfree op phi :
funcfree phi -> funcfree (close_indi op phi).
Proof.
intros F. unfold close_indi. destruct find_bounded_indi as [n B]. cbn.
clear B. now induction n.
Qed.
Lemma close_pred_funcfree op phi :
funcfree phi -> funcfree (close_pred op phi).
Proof.
intros F. unfold close_pred. enough (forall n, funcfree (close_pred'' n op phi)) by easy.
induction n; cbn. apply F. enough (forall psi m, funcfree psi -> funcfree (close_pred' m op psi)) by firstorder.
intros psi m. unfold close_pred'. destruct find_bounded_pred as [b B]. cbn.
clear B. now induction b.
Qed.
Lemma forall_n_funcfree n phi :
funcfree phi -> funcfree (forall_n n phi).
Proof.
now induction n.
Qed.
End Close.
Notation "∀i Phi" := (@quant_indi _ _ full_operators All Phi) (at level 50).
Notation "∃i Phi" := (@quant_indi _ _ full_operators Ex Phi) (at level 50).
Notation "∀f ( ar ) Phi" := (@quant_func _ _ full_operators All ar Phi) (at level 50).
Notation "∃f ( ar ) Phi" := (@quant_func _ _ full_operators Ex ar Phi) (at level 50).
Notation "∀p ( ar ) Phi" := (@quant_pred _ _ full_operators All ar Phi) (at level 50).
Notation "∃p ( ar ) Phi" := (@quant_pred _ _ full_operators Ex ar Phi) (at level 50).
Notation "⊥" := fal.
Notation "A ∧ B" := (@bin _ _ full_operators Conj A B) (at level 41).
Notation "A ∨ B" := (@bin _ _ full_operators Disj A B) (at level 42).
Notation "A '-->' B" := (@bin _ _ full_operators Impl A B) (at level 43, right associativity).
Notation "A '<-->' B" := ((A --> B) ∧ (B --> A)) (at level 43).
Notation "¬ A" := (A --> ⊥) (at level 40).
|
function xyz = bvh2xyz(skel, channels, noOffset)
% BVH2XYZ Compute XYZ values given structure and channels.
% FORMAT
% DESC Computes X, Y, Z coordinates given a BVH skeleton structure and
% an associated set of channels.
% ARG skel : a skeleton for the bvh file.
% ARG channels : the channels for the bvh file.
% ARG noOffset : don't add the offset in.
% RETURN xyz : the point cloud positions for the skeleton.
%
% COPYRIGHT : Neil D. Lawrence, 2005, 2008, 2012
%
% SEEALSO : acclaim2xyz, skel2xyz
% MOCAP
if nargin< 3
noOffset = false;
end
for i = 1:length(skel.tree)
if ~isempty(skel.tree(i).posInd)
xpos = channels(skel.tree(i).posInd(1));
ypos = channels(skel.tree(i).posInd(2));
zpos = channels(skel.tree(i).posInd(3));
else
xpos = 0;
ypos = 0;
zpos = 0;
end
xyzStruct(i) = struct('rotation', [], 'xyz', []);
if nargin < 2 | isempty(skel.tree(i).rotInd)
xangle = 0;
yangle = 0;
zangle = 0;
else
xangle = deg2rad(channels(skel.tree(i).rotInd(1)));
yangle = deg2rad(channels(skel.tree(i).rotInd(2)));
zangle = deg2rad(channels(skel.tree(i).rotInd(3)));
end
thisRotation = rotationMatrix(xangle, yangle, zangle, skel.tree(i).order);
thisPosition = [xpos ypos zpos];
if ~skel.tree(i).parent
xyzStruct(i).rotation = thisRotation;
xyzStruct(i).xyz = thisPosition + skel.tree(i).offset;
else
if ~noOffset
thisPosition = skel.tree(i).offset + thisPosition;
end
xyzStruct(i).xyz = ...
thisPosition*xyzStruct(skel.tree(i).parent).rotation ...
+ xyzStruct(skel.tree(i).parent).xyz;
xyzStruct(i).rotation = thisRotation*xyzStruct(skel.tree(i).parent).rotation;
end
end
xyz = reshape([xyzStruct(:).xyz], 3, length(skel.tree))';
|
module NiLang
using Reexport
@reexport using NiLangCore
import NiLangCore: invtype
using FixedPointNumbers: Q20f43, Fixed
import NiLangCore: empty_global_stacks!, loaddata
export Fixed43
const Fixed43 = Q20f43
include("utils.jl")
include("wrappers.jl")
include("vars.jl")
include("instructs.jl")
include("ulog.jl")
include("complex.jl")
include("autobcast.jl")
include("macros.jl")
include("autodiff/autodiff.jl")
include("stdlib/stdlib.jl")
include("deprecations.jl")
export AD
project_relative_path(xs...) = normpath(joinpath(dirname(dirname(pathof(@__MODULE__))), xs...))
if Base.VERSION >= v"1.4.2"
include("precompile.jl")
_precompile_()
end
end # module
|
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
H : 0 ≤ a
⊢ 0 ≤ a ^ 0
[PROOFSTEP]
rw [pow_zero]
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
H : 0 ≤ a
⊢ 0 ≤ 1
[PROOFSTEP]
exact zero_le_one
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
H : 0 ≤ a
n : ℕ
⊢ 0 ≤ a ^ (n + 1)
[PROOFSTEP]
rw [pow_succ]
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
H : 0 ≤ a
n : ℕ
⊢ 0 ≤ a * a ^ n
[PROOFSTEP]
exact mul_nonneg H (pow_nonneg H _)
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
a2 : 2 ≤ a
b0 : 0 ≤ b
⊢ a + (a + a * b) ≤ a * (2 + b)
[PROOFSTEP]
rw [mul_add, mul_two, add_assoc]
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedSemiring α
a b c d : α
h : 0 < a
⊢ 0 < bit1 a
[PROOFSTEP]
nontriviality
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : OrderedSemiring α
a b c d : α
h : 0 < a
inst✝ : Nontrivial α
⊢ 0 < bit1 a
[PROOFSTEP]
exact bit1_pos h.le
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a✝ b✝ c✝ d : α
src✝¹ : OrderedRing α := inst✝
src✝ : Semiring α := Ring.toSemiring
a b c : α
h : a ≤ b
hc : 0 ≤ c
⊢ c * a ≤ c * b
[PROOFSTEP]
simpa only [mul_sub, sub_nonneg] using OrderedRing.mul_nonneg _ _ hc (sub_nonneg.2 h)
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a✝ b✝ c✝ d : α
src✝¹ : OrderedRing α := inst✝
src✝ : Semiring α := Ring.toSemiring
a b c : α
h : a ≤ b
hc : 0 ≤ c
⊢ a * c ≤ b * c
[PROOFSTEP]
simpa only [sub_mul, sub_nonneg] using OrderedRing.mul_nonneg _ _ (sub_nonneg.2 h) hc
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
h : b ≤ a
hc : c ≤ 0
⊢ c * a ≤ c * b
[PROOFSTEP]
simpa only [neg_mul, neg_le_neg_iff] using mul_le_mul_of_nonneg_left h (neg_nonneg.2 hc)
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
h : b ≤ a
hc : c ≤ 0
⊢ a * c ≤ b * c
[PROOFSTEP]
simpa only [mul_neg, neg_le_neg_iff] using mul_le_mul_of_nonneg_right h (neg_nonneg.2 hc)
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
ha : a ≤ 0
hb : b ≤ 0
⊢ 0 ≤ a * b
[PROOFSTEP]
simpa only [zero_mul] using mul_le_mul_of_nonpos_right ha hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
hb : b ≤ 0
h : a ≤ 1
⊢ b ≤ a * b
[PROOFSTEP]
simpa only [one_mul] using mul_le_mul_of_nonpos_right h hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
hb : b ≤ 0
h : 1 ≤ a
⊢ a * b ≤ b
[PROOFSTEP]
simpa only [one_mul] using mul_le_mul_of_nonpos_right h hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
ha : a ≤ 0
h : b ≤ 1
⊢ a ≤ a * b
[PROOFSTEP]
simpa only [mul_one] using mul_le_mul_of_nonpos_left h ha
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a b c d : α
ha : a ≤ 0
h : 1 ≤ b
⊢ a * b ≤ a
[PROOFSTEP]
simpa only [mul_one] using mul_le_mul_of_nonpos_left h ha
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a✝ b✝ c d a b : α
h : a ≤ b
⊢ b = a + (b - a)
[PROOFSTEP]
simp
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a✝ b✝ c✝ d a b : α
x✝ : ∃ c, c ≥ 0 ∧ b = a + c
c : α
hc : c ≥ 0
h : b = a + c
⊢ a ≤ b
[PROOFSTEP]
rw [h, le_add_iff_nonneg_right]
[GOAL]
α : Type u
β : Type u_1
inst✝ : OrderedRing α
a✝ b✝ c✝ d a b : α
x✝ : ∃ c, c ≥ 0 ∧ b = a + c
c : α
hc : c ≥ 0
h : b = a + c
⊢ 0 ≤ c
[PROOFSTEP]
exact hc
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab : a ≤ b
hc : 0 ≤ c
⊢ c * a ≤ c * b
[PROOFSTEP]
obtain rfl | hab := Decidable.eq_or_lt_of_le hab
[GOAL]
case inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a c : α
hc : 0 ≤ c
hab : a ≤ a
⊢ c * a ≤ c * a
[PROOFSTEP]
rfl
[GOAL]
case inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab✝ : a ≤ b
hc : 0 ≤ c
hab : a < b
⊢ c * a ≤ c * b
[PROOFSTEP]
obtain rfl | hc := Decidable.eq_or_lt_of_le hc
[GOAL]
case inr.inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b : α
hab✝ : a ≤ b
hab : a < b
hc : 0 ≤ 0
⊢ 0 * a ≤ 0 * b
[PROOFSTEP]
simp
[GOAL]
case inr.inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab✝ : a ≤ b
hc✝ : 0 ≤ c
hab : a < b
hc : 0 < c
⊢ c * a ≤ c * b
[PROOFSTEP]
exact (mul_lt_mul_of_pos_left hab hc).le
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab : a ≤ b
hc : 0 ≤ c
⊢ a * c ≤ b * c
[PROOFSTEP]
obtain rfl | hab := Decidable.eq_or_lt_of_le hab
[GOAL]
case inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a c : α
hc : 0 ≤ c
hab : a ≤ a
⊢ a * c ≤ a * c
[PROOFSTEP]
rfl
[GOAL]
case inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab✝ : a ≤ b
hc : 0 ≤ c
hab : a < b
⊢ a * c ≤ b * c
[PROOFSTEP]
obtain rfl | hc := Decidable.eq_or_lt_of_le hc
[GOAL]
case inr.inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b : α
hab✝ : a ≤ b
hab : a < b
hc : 0 ≤ 0
⊢ a * 0 ≤ b * 0
[PROOFSTEP]
simp
[GOAL]
case inr.inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a✝ b✝ c✝ d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝ : StrictOrderedSemiring α := inst✝¹
a b c : α
hab✝ : a ≤ b
hc✝ : 0 ≤ c
hab : a < b
hc : 0 < c
⊢ a * c ≤ b * c
[PROOFSTEP]
exact (mul_lt_mul_of_pos_right hab hc).le
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
H : 0 < a
⊢ 0 < a ^ 0
[PROOFSTEP]
nontriviality
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a b c d : α
H : 0 < a
inst✝ : Nontrivial α
⊢ 0 < a ^ 0
[PROOFSTEP]
rw [pow_zero]
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a b c d : α
H : 0 < a
inst✝ : Nontrivial α
⊢ 0 < 1
[PROOFSTEP]
exact zero_lt_one
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
H : 0 < a
n : ℕ
⊢ 0 < a ^ (n + 1)
[PROOFSTEP]
rw [pow_succ]
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
H : 0 < a
n : ℕ
⊢ 0 < a * a ^ n
[PROOFSTEP]
exact mul_pos H (pow_pos H _)
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a b c d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
h1 : a < c
h2 : b < d
h3 : 0 ≤ a
h4 : 0 ≤ b
b0 : 0 = b
⊢ a * b < c * d
[PROOFSTEP]
rw [← b0, mul_zero]
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedSemiring α
a b c d : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
h1 : a < c
h2 : b < d
h3 : 0 ≤ a
h4 : 0 ≤ b
b0 : 0 = b
⊢ 0 < c * d
[PROOFSTEP]
exact mul_pos (h3.trans_lt h1) (h4.trans_lt h2)
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
⊢ a < c → b < d → 0 ≤ a → 0 ≤ b → a * b < c * d
[PROOFSTEP]
classical exact Decidable.mul_lt_mul''
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
⊢ a < c → b < d → 0 ≤ a → 0 ≤ b → a * b < c * d
[PROOFSTEP]
exact Decidable.mul_lt_mul''
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
hn : 0 < a
hm : 1 < b
⊢ a < b * a
[PROOFSTEP]
convert mul_lt_mul_of_pos_right hm hn
[GOAL]
case h.e'_3
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
hn : 0 < a
hm : 1 < b
⊢ a = 1 * a
[PROOFSTEP]
rw [one_mul]
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
hn : 0 < a
hm : 1 < b
⊢ a < a * b
[PROOFSTEP]
convert mul_lt_mul_of_pos_left hm hn
[GOAL]
case h.e'_3
α : Type u
β : Type u_1
inst✝ : StrictOrderedSemiring α
a b c d : α
hn : 0 < a
hm : 1 < b
⊢ a = a * 1
[PROOFSTEP]
rw [mul_one]
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a✝ b✝ c✝ : α
src✝¹ : StrictOrderedRing α := inst✝
src✝ : Semiring α := Ring.toSemiring
a b c : α
h : a < b
hc : 0 < c
⊢ c * a < c * b
[PROOFSTEP]
simpa only [mul_sub, sub_pos] using StrictOrderedRing.mul_pos _ _ hc (sub_pos.2 h)
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a✝ b✝ c✝ : α
src✝¹ : StrictOrderedRing α := inst✝
src✝ : Semiring α := Ring.toSemiring
a b c : α
h : a < b
hc : 0 < c
⊢ a * c < b * c
[PROOFSTEP]
simpa only [sub_mul, sub_pos] using StrictOrderedRing.mul_pos _ _ (sub_pos.2 h) hc
[GOAL]
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedRing α
a✝ b✝ c : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝¹ : StrictOrderedRing α := inst✝¹
src✝ : Semiring α := Ring.toSemiring
a b : α
ha : 0 ≤ a
hb : 0 ≤ b
⊢ 0 ≤ a * b
[PROOFSTEP]
obtain ha | ha := Decidable.eq_or_lt_of_le ha
[GOAL]
case inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedRing α
a✝ b✝ c : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝¹ : StrictOrderedRing α := inst✝¹
src✝ : Semiring α := Ring.toSemiring
a b : α
ha✝ : 0 ≤ a
hb : 0 ≤ b
ha : 0 = a
⊢ 0 ≤ a * b
[PROOFSTEP]
rw [← ha, zero_mul]
[GOAL]
case inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedRing α
a✝ b✝ c : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝¹ : StrictOrderedRing α := inst✝¹
src✝ : Semiring α := Ring.toSemiring
a b : α
ha✝ : 0 ≤ a
hb : 0 ≤ b
ha : 0 < a
⊢ 0 ≤ a * b
[PROOFSTEP]
obtain hb | hb := Decidable.eq_or_lt_of_le hb
[GOAL]
case inr.inl
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedRing α
a✝ b✝ c : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝¹ : StrictOrderedRing α := inst✝¹
src✝ : Semiring α := Ring.toSemiring
a b : α
ha✝ : 0 ≤ a
hb✝ : 0 ≤ b
ha : 0 < a
hb : 0 = b
⊢ 0 ≤ a * b
[PROOFSTEP]
rw [← hb, mul_zero]
[GOAL]
case inr.inr
α : Type u
β : Type u_1
inst✝¹ : StrictOrderedRing α
a✝ b✝ c : α
inst✝ : DecidableRel fun x x_1 => x ≤ x_1
src✝¹ : StrictOrderedRing α := inst✝¹
src✝ : Semiring α := Ring.toSemiring
a b : α
ha✝ : 0 ≤ a
hb✝ : 0 ≤ b
ha : 0 < a
hb : 0 < b
⊢ 0 ≤ a * b
[PROOFSTEP]
exact (StrictOrderedRing.mul_pos _ _ ha hb).le
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
h : b < a
hc : c < 0
⊢ c * a < c * b
[PROOFSTEP]
simpa only [neg_mul, neg_lt_neg_iff] using mul_lt_mul_of_pos_left h (neg_pos_of_neg hc)
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
h : b < a
hc : c < 0
⊢ a * c < b * c
[PROOFSTEP]
simpa only [mul_neg, neg_lt_neg_iff] using mul_lt_mul_of_pos_right h (neg_pos_of_neg hc)
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a✝ b✝ c a b : α
ha : a < 0
hb : b < 0
⊢ 0 < a * b
[PROOFSTEP]
simpa only [zero_mul] using mul_lt_mul_of_neg_right ha hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
hb : b < 0
h : a < 1
⊢ b < a * b
[PROOFSTEP]
simpa only [one_mul] using mul_lt_mul_of_neg_right h hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
hb : b < 0
h : 1 < a
⊢ a * b < b
[PROOFSTEP]
simpa only [one_mul] using mul_lt_mul_of_neg_right h hb
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
ha : a < 0
h : b < 1
⊢ a < a * b
[PROOFSTEP]
simpa only [mul_one] using mul_lt_mul_of_neg_left h ha
[GOAL]
α : Type u
β : Type u_1
inst✝ : StrictOrderedRing α
a b c : α
ha : a < 0
h : 1 < b
⊢ a * b < a
[PROOFSTEP]
simpa only [mul_one] using mul_lt_mul_of_neg_left h ha
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
⊢ 0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0
[PROOFSTEP]
refine' Decidable.or_iff_not_and_not.2 _
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
⊢ ¬(¬(0 ≤ a ∧ 0 ≤ b) ∧ ¬(a ≤ 0 ∧ b ≤ 0))
[PROOFSTEP]
simp only [not_and, not_le]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
⊢ (0 ≤ a → b < 0) → ¬(a ≤ 0 → 0 < b)
[PROOFSTEP]
intro ab nab
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
ab : 0 ≤ a → b < 0
nab : a ≤ 0 → 0 < b
⊢ False
[PROOFSTEP]
apply
not_lt_of_le hab
_
-- Porting note: for the middle case, we used to have `rfl`, but it is now rejected.
-- https://github.com/leanprover/std4/issues/62
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
ab : 0 ≤ a → b < 0
nab : a ≤ 0 → 0 < b
⊢ a * b < 0
[PROOFSTEP]
rcases lt_trichotomy 0 a with (ha | ha | ha)
[GOAL]
case inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
ab : 0 ≤ a → b < 0
nab : a ≤ 0 → 0 < b
ha : 0 < a
⊢ a * b < 0
[PROOFSTEP]
exact mul_neg_of_pos_of_neg ha (ab ha.le)
[GOAL]
case inr.inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
ab : 0 ≤ a → b < 0
nab : a ≤ 0 → 0 < b
ha : 0 = a
⊢ a * b < 0
[PROOFSTEP]
subst ha
[GOAL]
case inr.inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
b c d : α
hab : 0 ≤ 0 * b
ab : 0 ≤ 0 → b < 0
nab : 0 ≤ 0 → 0 < b
⊢ 0 * b < 0
[PROOFSTEP]
exact ((ab le_rfl).asymm (nab le_rfl)).elim
[GOAL]
case inr.inr
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
hab : 0 ≤ a * b
ab : 0 ≤ a → b < 0
nab : a ≤ 0 → 0 < b
ha : a < 0
⊢ a * b < 0
[PROOFSTEP]
exact mul_neg_of_neg_of_pos ha (nab ha.le)
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
h : 0 < c
⊢ 0 ≤ c * b ↔ 0 ≤ b
[PROOFSTEP]
simpa using (mul_le_mul_left h : c * 0 ≤ c * b ↔ 0 ≤ b)
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
h : 0 < c
⊢ 0 ≤ b * c ↔ 0 ≤ b
[PROOFSTEP]
simpa using (mul_le_mul_right h : 0 * c ≤ b * c ↔ 0 ≤ b)
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ bit0 a ≤ bit0 b ↔ a ≤ b
[PROOFSTEP]
rw [bit0, bit0, ← two_mul, ← two_mul, mul_le_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ bit0 a < bit0 b ↔ a < b
[PROOFSTEP]
rw [bit0, bit0, ← two_mul, ← two_mul, mul_lt_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ 1 ≤ bit1 a ↔ 0 ≤ a
[PROOFSTEP]
rw [bit1, le_add_iff_nonneg_left, bit0, ← two_mul, zero_le_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ 1 < bit1 a ↔ 0 < a
[PROOFSTEP]
rw [bit1, lt_add_iff_pos_left, bit0, ← two_mul, zero_lt_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ 0 ≤ bit0 a ↔ 0 ≤ a
[PROOFSTEP]
rw [bit0, ← two_mul, zero_le_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedSemiring α
a b c d : α
⊢ 0 < bit0 a ↔ 0 < a
[PROOFSTEP]
rw [bit0, ← two_mul, zero_lt_mul_left (zero_lt_two : 0 < (2 : α))]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
src✝ : LinearOrderedRing α := inst✝
⊢ ∀ {a b : α}, a * b = 0 → a = 0 ∨ b = 0
[PROOFSTEP]
intro a b hab
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
hab : a * b = 0
⊢ a = 0 ∨ b = 0
[PROOFSTEP]
refine' Decidable.or_iff_not_and_not.2 fun h => _
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
hab : a * b = 0
h : ¬a = 0 ∧ ¬b = 0
⊢ False
[PROOFSTEP]
revert hab
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
⊢ a * b = 0 → False
[PROOFSTEP]
cases' lt_or_gt_of_ne h.1 with ha ha
[GOAL]
case inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a < 0
⊢ a * b = 0 → False
[PROOFSTEP]
cases' lt_or_gt_of_ne h.2 with hb hb
[GOAL]
case inr
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a > 0
⊢ a * b = 0 → False
[PROOFSTEP]
cases' lt_or_gt_of_ne h.2 with hb hb
[GOAL]
case inl.inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a < 0
hb : b < 0
⊢ a * b = 0 → False
case inl.inr
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a < 0
hb : b > 0
⊢ a * b = 0 → False
case inr.inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a > 0
hb : b < 0
⊢ a * b = 0 → False
case inr.inr
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c : α
src✝ : LinearOrderedRing α := inst✝
a b : α
h : ¬a = 0 ∧ ¬b = 0
ha : a > 0
hb : b > 0
⊢ a * b = 0 → False
[PROOFSTEP]
exacts [(mul_pos_of_neg_of_neg ha hb).ne.symm, (mul_neg_of_neg_of_pos ha hb).ne, (mul_neg_of_pos_of_neg ha hb).ne,
(mul_pos ha hb).ne.symm]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ : α
src✝ : Nontrivial α := inferInstance
a b c : α
ha : a ≠ 0
h : a * b = a * c
⊢ b = c
[PROOFSTEP]
rw [← sub_eq_zero, ← mul_sub] at h
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ : α
src✝ : Nontrivial α := inferInstance
a b c : α
ha : a ≠ 0
h✝ : a * b = a * c
h : a * (b - c) = 0
⊢ b = c
[PROOFSTEP]
exact sub_eq_zero.1 ((eq_zero_or_eq_zero_of_mul_eq_zero h).resolve_left ha)
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ : α
src✝ : Nontrivial α := inferInstance
a b c : α
hb : b ≠ 0
h : a * b = c * b
⊢ a = c
[PROOFSTEP]
rw [← sub_eq_zero, ← sub_mul] at h
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ : α
src✝ : Nontrivial α := inferInstance
a b c : α
hb : b ≠ 0
h✝ : a * b = c * b
h : (a - c) * b = 0
⊢ a = c
[PROOFSTEP]
exact sub_eq_zero.1 ((eq_zero_or_eq_zero_of_mul_eq_zero h).resolve_right hb)
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ a * b < 0 ↔ 0 < a ∧ b < 0 ∨ a < 0 ∧ 0 < b
[PROOFSTEP]
rw [← neg_pos, neg_mul_eq_mul_neg, mul_pos_iff, neg_pos, neg_lt_zero]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
⊢ 0 ≤ a * b ∨ 0 ≤ b * c ∨ 0 ≤ c * a
[PROOFSTEP]
iterate 3 rw [mul_nonneg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
⊢ 0 ≤ a * b ∨ 0 ≤ b * c ∨ 0 ≤ c * a
[PROOFSTEP]
rw [mul_nonneg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ 0 ≤ b * c ∨ 0 ≤ c * a
[PROOFSTEP]
rw [mul_nonneg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ (0 ≤ b ∧ 0 ≤ c ∨ b ≤ 0 ∧ c ≤ 0) ∨ 0 ≤ c * a
[PROOFSTEP]
rw [mul_nonneg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ (0 ≤ b ∧ 0 ≤ c ∨ b ≤ 0 ∧ c ≤ 0) ∨ 0 ≤ c ∧ 0 ≤ a ∨ c ≤ 0 ∧ a ≤ 0
[PROOFSTEP]
have or_a := le_total 0 a
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
or_a : 0 ≤ a ∨ a ≤ 0
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ (0 ≤ b ∧ 0 ≤ c ∨ b ≤ 0 ∧ c ≤ 0) ∨ 0 ≤ c ∧ 0 ≤ a ∨ c ≤ 0 ∧ a ≤ 0
[PROOFSTEP]
have or_b := le_total 0 b
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
or_a : 0 ≤ a ∨ a ≤ 0
or_b : 0 ≤ b ∨ b ≤ 0
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ (0 ≤ b ∧ 0 ≤ c ∨ b ≤ 0 ∧ c ≤ 0) ∨ 0 ≤ c ∧ 0 ≤ a ∨ c ≤ 0 ∧ a ≤ 0
[PROOFSTEP]
have or_c := le_total 0 c
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b✝ c✝ a b c : α
or_a : 0 ≤ a ∨ a ≤ 0
or_b : 0 ≤ b ∨ b ≤ 0
or_c : 0 ≤ c ∨ c ≤ 0
⊢ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) ∨ (0 ≤ b ∧ 0 ≤ c ∨ b ≤ 0 ∧ c ≤ 0) ∨ 0 ≤ c ∧ 0 ≤ a ∨ c ≤ 0 ∧ a ≤ 0
[PROOFSTEP]
exact
Or.elim or_c
(fun (h0 : 0 ≤ c) =>
Or.elim or_b
(fun (h1 : 0 ≤ b) =>
Or.elim or_a (fun (h2 : 0 ≤ a) => Or.inl (Or.inl ⟨h2, h1⟩))
(fun (_ : a ≤ 0) => Or.inr (Or.inl (Or.inl ⟨h1, h0⟩))))
(fun (h1 : b ≤ 0) =>
Or.elim or_a (fun (h3 : 0 ≤ a) => Or.inr (Or.inr (Or.inl ⟨h0, h3⟩)))
(fun (h3 : a ≤ 0) => Or.inl (Or.inr ⟨h3, h1⟩))))
(fun (h0 : c ≤ 0) =>
Or.elim or_b
(fun (h4 : 0 ≤ b) =>
Or.elim or_a (fun (h5 : 0 ≤ a) => Or.inl (Or.inl ⟨h5, h4⟩))
(fun (h5 : a ≤ 0) => Or.inr (Or.inr (Or.inr ⟨h0, h5⟩))))
(fun (h4 : b ≤ 0) =>
Or.elim or_a (fun (_ : 0 ≤ a) => Or.inr (Or.inl (Or.inr ⟨h4, h0⟩)))
(fun (h6 : a ≤ 0) => Or.inl (Or.inr ⟨h6, h4⟩))))
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ a * b ≤ 0 ↔ 0 ≤ a ∧ b ≤ 0 ∨ a ≤ 0 ∧ 0 ≤ b
[PROOFSTEP]
rw [← neg_nonneg, neg_mul_eq_mul_neg, mul_nonneg_iff, neg_nonneg, neg_nonpos]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ -a ≤ a ↔ 0 ≤ a
[PROOFSTEP]
simp [neg_le_iff_add_nonneg, ← two_mul, mul_nonneg_iff, zero_le_one, (zero_lt_two' α).not_le]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ -a < a ↔ 0 < a
[PROOFSTEP]
simp [neg_lt_iff_pos_add, ← two_mul, mul_pos_iff, zero_lt_one, (zero_lt_two' α).not_lt]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ a ≤ -a ↔ - -a ≤ -a
[PROOFSTEP]
rw [neg_neg]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
⊢ a < -a ↔ - -a < -a
[PROOFSTEP]
rw [neg_neg]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
h : c * a < c * b
hc : c ≤ 0
⊢ -c * b < -c * a
[PROOFSTEP]
rwa [neg_mul, neg_mul, neg_lt_neg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
h : a * c < b * c
hc : c ≤ 0
⊢ b * -c < a * -c
[PROOFSTEP]
rwa [mul_neg, mul_neg, neg_lt_neg_iff]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b c a : α
⊢ 0 < a * a ↔ a ≠ 0
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b c a : α
⊢ 0 < a * a → a ≠ 0
[PROOFSTEP]
rintro h rfl
[GOAL]
case mp
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
h : 0 < 0 * 0
⊢ False
[PROOFSTEP]
rw [mul_zero] at h
[GOAL]
case mp
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c : α
h : 0 < 0
⊢ False
[PROOFSTEP]
exact h.false
[GOAL]
case mpr
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b c a : α
⊢ a ≠ 0 → 0 < a * a
[PROOFSTEP]
intro h
[GOAL]
case mpr
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b c a : α
h : a ≠ 0
⊢ 0 < a * a
[PROOFSTEP]
cases' h.lt_or_lt with h h
[GOAL]
case mpr.inl
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a✝ b c a : α
h✝ : a ≠ 0
h : a < 0
⊢ 0 < a * a
case mpr.inr α : Type u β : Type u_1 inst✝ : LinearOrderedRing α a✝ b c a : α h✝ : a ≠ 0 h : 0 < a ⊢ 0 < a * a
[PROOFSTEP]
exacts [mul_pos_of_neg_of_neg h h, mul_pos h h]
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c x y : α
⊢ x * x + y * y = 0 ↔ x = 0 ∧ y = 0
[PROOFSTEP]
rw [add_eq_zero_iff', mul_self_eq_zero, mul_self_eq_zero]
[GOAL]
case ha
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c x y : α
⊢ 0 ≤ x * x
[PROOFSTEP]
apply mul_self_nonneg
[GOAL]
case hb
α : Type u
β : Type u_1
inst✝ : LinearOrderedRing α
a b c x y : α
⊢ 0 ≤ y * y
[PROOFSTEP]
apply mul_self_nonneg
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedCommRing α
a b✝ c✝ d b c : α
ha : 0 ≤ a
hd : 0 ≤ d
ba : b * a ≤ max d b * max c a
cd : c * d ≤ max a c * max b d
⊢ a * b ≤ max a c * max d b
[PROOFSTEP]
simpa [mul_comm, max_comm] using ba
[GOAL]
α : Type u
β : Type u_1
inst✝ : LinearOrderedCommRing α
a b✝ c✝ d b c : α
ha : 0 ≤ a
hd : 0 ≤ d
ba : b * a ≤ max d b * max c a
cd : c * d ≤ max a c * max b d
⊢ d * c ≤ max a c * max d b
[PROOFSTEP]
simpa [mul_comm, max_comm] using cd
|
import .struc
import logic.equiv.defs
import .defs
open propagate_struc profinite
set_option class.instance_max_depth 100
section
open circuit
def xor_map : (boolp.prod boolp).map boolp :=
{ to_fun := λ x, bxor x.1 x.2,
preimage := λ C, C.bind (λ _, (var (sum.inl ())).xor (var (sum.inr ()))),
continuous' := begin
simp [circuit.to_set, eval_bind],
intros x C,
refl
end }
def and_map : (boolp.prod boolp).map boolp :=
{ to_fun := λ x, band x.1 x.2,
preimage := λ C, C.bind (λ _, (var (sum.inl ())).and (var (sum.inr ()))),
continuous' := begin
simp [circuit.to_set, eval_bind],
intros x C,
refl
end }
def or_map : (boolp.prod boolp).map boolp :=
{ to_fun := λ x, bor x.1 x.2,
preimage := λ C, C.bind (λ _, (var (sum.inl ())).or (var (sum.inr ()))),
continuous' := begin
simp [circuit.to_set, eval_bind],
intros x C,
refl
end }
def not_map : boolp.map boolp :=
{ to_fun := bnot,
preimage := λ C, C.bind (λ _, (var ()).not),
continuous' := begin
simp [circuit.to_set, eval_bind],
intros x C,
refl
end }
def bitwise_map2 (op : circuit (fin 2)) :
(boolp.prod boolp).map boolp :=
{ to_fun := λ x, op.eval (λ i, list.nth_le [x.1, x.2] i i.prop),
preimage := λ C, C.bind (λ _, op.map (λ i, list.nth_le [sum.inl (), sum.inr ()] i i.prop)),
continuous' := begin
simp [circuit.to_set, eval_bind, eval_map],
intros x C,
rw [iff_iff_eq],
congr' 2,
ext i,
cases i,
dsimp [boolp, profinite.prod, coe_sort, has_coe_to_sort.coe],
congr' 1,
ext i,
cases i with i hi,
cases i,
simp,
cases i,
simp,
simp [nat.succ_lt_succ_iff] at hi,
contradiction
end }
def bitwise_map3 (op : circuit (fin 3)) :
(boolp.prod (boolp.prod boolp)).map boolp :=
{ to_fun := λ x, op.eval (λ i, list.nth_le [x.1, x.2.1, x.2.2] i i.prop),
preimage := λ C, C.bind (λ x, op.map (λ i, list.nth_le [sum.inl (), sum.inr (sum.inl ()), sum.inr (sum.inr ())] i i.prop)),
continuous' := begin
simp [circuit.to_set, eval_bind, eval_map],
intros x C,
rw [iff_iff_eq],
congr' 2,
ext i,
cases i,
dsimp [boolp, profinite.prod, coe_sort, has_coe_to_sort.coe],
congr' 1,
ext i,
cases i with i hi,
cases i,
simp,
cases i,
simp,
cases i,
simp,
simp [nat.succ_lt_succ_iff] at hi,
contradiction
end }
def const {X Y : profinite} (y : Y) : map X Y :=
{ to_fun := λ _, y,
preimage := λ C, if y ∈ C.to_set then true else false,
continuous' := λ x C, begin
split_ifs;
simp [circuit.to_set, *] at *,
end }
def xor_struc : propagate_struc (boolp.prod boolp) unitp :=
{ init := (),
transition := const (),
output := sndm.comp xor_map }
def and_struc : propagate_struc (boolp.prod boolp) unitp :=
{ init := (),
transition := const (),
output := sndm.comp and_map }
def or_struc : propagate_struc (boolp.prod boolp) unitp :=
{ init := (),
transition := const (),
output := sndm.comp or_map }
def not_struc : propagate_struc boolp unitp :=
{ init := (),
transition := const (),
output := sndm.comp not_map }
def ls_struc (b : bool) : propagate_struc boolp boolp :=
{ init := b,
transition := sndm,
output := fstm }
def add_struc : propagate_struc (boolp.prod boolp) boolp :=
{ init := ff,
transition := bitwise_map3 (or (and (var 0) (var 1)) (or (and (var 0) (var 2)) (and (var 1) (var 2)))),
output := bitwise_map3 (xor (xor (var 0) (var 1)) (var 2)) }
def sub_struc : propagate_struc (boolp.prod boolp) boolp :=
{ init := ff,
transition := bitwise_map3 (or (and (not (var 1)) (var 2)) (and (not (xor (var 1) (var 2))) (var 0))),
output := bitwise_map3 (xor (xor (var 0) (var 1)) (var 2)) }
def neg_struc : propagate_struc boolp boolp :=
{ init := tt,
transition := bitwise_map2 (and (not (var 1)) (var 0)),
output := bitwise_map2 (xor (not (var 1)) (var 0)) }
def incr_struc : propagate_struc boolp boolp :=
{ init := tt,
transition := and_map,
output := xor_map }
def decr_struc : propagate_struc boolp boolp :=
{ init := tt,
transition := bitwise_map2 (and (not (var 1)) (var 0)),
output := xor_map }
def rearrange_prod₁ {W X Y Z : profinite} :
((W.prod X).prod (Y.prod Z)).map ((W.prod Y).prod (X.prod Z)) :=
reindex begin
dsimp [profinite.prod],
refine (equiv.sum_assoc _ _ _).symm.trans _,
refine equiv.trans _ (equiv.sum_assoc _ _ _),
refine equiv.sum_congr _ (equiv.refl _),
refine equiv.trans (equiv.sum_assoc _ _ _) _,
refine equiv.trans _ (equiv.sum_assoc _ _ _).symm,
refine equiv.sum_congr (equiv.refl _) _,
refine equiv.sum_comm _ _
end
def prod_assoc {X Y Z : profinite} :
((X.prod Y).prod Z).map (X.prod (Y.prod Z)) :=
reindex (equiv.sum_assoc _ _ _).symm
def bin_comp {input state₁ state₂ state₃ : profinite} (p : propagate_struc (boolp.prod boolp) state₁)
(q : propagate_struc input state₂) (r : propagate_struc input state₃) :
propagate_struc input (state₁.prod (state₂.prod state₃)) :=
{ init := (p.init, q.init, r.init),
transition := begin
have f₁ := prod_assoc.comp ((prod_mapm (map.id _) ((prod_mapm (map.id _) (diag)).comp
(rearrange_prod₁.comp (prod_mapm q.output r.output)))).comp p.transition),
have f₂ := (prod_mapm (sndm.comp fstm) (map.id _)).comp q.transition,
have f₃ := (prod_mapm (sndm.comp sndm) (map.id _)).comp r.transition,
exact prod_mk f₁ (prod_mk f₂ f₃)
end,
output := begin
refine map.comp _ p.output,
refine prod_assoc.comp _,
refine prod_mapm (map.id _) _,
have := rearrange_prod₁.comp (prod_mapm q.output r.output),
refine map.comp _ this,
refine prod_mapm (map.id _) diag,
end }
@[simp] lemma bin_comp_init {input state₁ state₂ state₃ : profinite}
(p : propagate_struc (boolp.prod boolp) state₁) (q : propagate_struc input state₂)
(r : propagate_struc input state₃) :
(bin_comp p q r).init = (p.init, q.init, r.init) := rfl
@[simp] lemma bin_comp_transition {input state₁ state₂ state₃ : profinite}
(p : propagate_struc (boolp.prod boolp) state₁) (q : propagate_struc input state₂)
(r : propagate_struc input state₃) :
coe_fn (bin_comp p q r).transition = (λ x : (state₁.prod (state₂.prod state₃)).prod input,
(p.transition (x.1.1, q.output (x.1.2.1, x.2), r.output (x.1.2.2, x.2)),
q.transition (x.1.2.1, x.2),
r.transition (x.1.2.2, x.2))) :=
begin
funext i,
rcases i with ⟨⟨i, j, k⟩, x⟩,
dsimp [nth_output, bin_comp, prod_assoc, map.comp, prod_mapm, boolp, function.comp,
map.id, coe_fn, has_coe_to_fun.coe, prod_mk, fstm, sndm, diag, rearrange_prod₁, reindex,
equiv.sum_assoc, equiv.trans, equiv.refl, equiv.sum_congr, equiv.symm, equiv.sum_comm,
profinite.prod, prod_mk_reindex],
simp [inv_proj]
end
@[simp] lemma bin_comp_output {input state₁ state₂ state₃ : profinite}
(p : propagate_struc (boolp.prod boolp) state₁) (q : propagate_struc input state₂)
(r : propagate_struc input state₃) :
coe_fn (bin_comp p q r).output = (λ x : (state₁.prod (state₂.prod state₃)).prod input,
p.output (x.1.1, q.output (x.1.2.1, x.2), r.output (x.1.2.2, x.2))) :=
begin
funext i,
rcases i with ⟨⟨i, j, k⟩, x⟩,
dsimp [nth_output, bin_comp, prod_assoc, map.comp, prod_mapm, boolp, function.comp,
map.id, coe_fn, has_coe_to_fun.coe, prod_mk, fstm, sndm, diag, rearrange_prod₁, reindex,
equiv.sum_assoc, equiv.trans, equiv.refl, equiv.sum_congr, equiv.symm, equiv.sum_comm,
profinite.prod, prod_mk_reindex],
simp [inv_proj]
end
lemma nth_state_bin_comp {input state₁ state₂ state₃ : profinite} (p : propagate_struc (boolp.prod boolp) state₁)
(q : propagate_struc input state₂) (r : propagate_struc input state₃) (x : ℕ → input) (n : ℕ) :
(bin_comp p q r).nth_state x n = (p.nth_state (λ i, (q.nth_output x i, r.nth_output x i)) n,
q.nth_state x n, r.nth_state x n) ∧
(bin_comp p q r).nth_output x n = p.nth_output (λ i, (q.nth_output x i, r.nth_output x i)) n :=
begin
induction n with n ih,
{ simp [nth_state] },
{ simp * }
end
def una_comp {input state₁ state₂ : profinite} (p : propagate_struc boolp state₁)
(q : propagate_struc input state₂) : propagate_struc input (state₁.prod state₂) :=
{ init := (p.init, q.init),
transition := begin
have := p.transition,
have := q.output,
have := q.transition,
refine prod_mk (prod_assoc.comp _) _,
{ refine (prod_mapm (map.id _) q.output).comp p.transition },
{ refine (prod_mapm sndm (map.id _)).comp q.transition }
end,
output := begin
have := p.output,
have := q.output,
have := prod_assoc.comp ((prod_mapm _ q.output).comp p.output),
exact this,
exact map.id _
end }
def propagate_struc.proj (n : ℕ) : propagate_struc twoadic unitp :=
{ init := (),
transition := const (),
output := sndm.comp (projm _ n) }
def propagate_struc.zero : propagate_struc twoadic unitp :=
{ init := (),
transition := const (),
output := const ff }
def propagate_struc.neg_one : propagate_struc twoadic unitp :=
{ init := (),
transition := const (),
output := const tt }
def propagate_struc.one : propagate_struc twoadic boolp :=
{ init := tt,
transition :=
{ to_fun := λ _, ff,
preimage := λ C,
if h : ff ∈ C.to_set
then true
else false,
continuous' := begin
intros,split_ifs;
dsimp [circuit.to_set, set.mem_def, set_of] at *;
simp * at *
end },
output := fstm }
def of_term : term → Σ (state : profinite), propagate_struc twoadic state
| (term.var n) := ⟨unitp, propagate_struc.proj n⟩
| (term.zero) := ⟨unitp, propagate_struc.zero⟩
| (term.one) := ⟨_, propagate_struc.one⟩
| (term.and t₁ t₂) :=
let p₁ := of_term t₁,
p₂ := of_term t₂ in
⟨_, bin_comp and_struc p₁.2 p₂.2⟩
| (term.or t₁ t₂) :=
let p₁ := of_term t₁,
p₂ := of_term t₂ in
⟨_, bin_comp or_struc p₁.2 p₂.2⟩
| (term.neg t) :=
let p := of_term t in
⟨_, una_comp neg_struc p.2⟩
| (term.neg_one) := ⟨_, propagate_struc.neg_one⟩
| (term.add t₁ t₂) :=
let p₁ := of_term t₁,
p₂ := of_term t₂ in
⟨_, bin_comp add_struc p₁.2 p₂.2⟩
| (term.xor t₁ t₂) :=
let p₁ := of_term t₁,
p₂ := of_term t₂ in
⟨_, bin_comp xor_struc p₁.2 p₂.2⟩
| (term.not t) :=
let p := of_term t in
⟨_, una_comp not_struc p.2⟩
| (term.ls t) :=
let p := of_term t in
⟨_, una_comp (ls_struc ff) p.2⟩
| (term.sub t₁ t₂) :=
let p₁ := of_term t₁,
p₂ := of_term t₂ in
⟨_, bin_comp sub_struc p₁.2 p₂.2⟩
| (term.incr t) :=
let p := of_term t in
⟨_, una_comp incr_struc p.2⟩
| (term.decr t) :=
let p := of_term t in
⟨_, una_comp decr_struc p.2⟩
instance : Π (t : term), has_repr (of_term t).1.ι
| (term.var n) := by dsimp [of_term]; apply_instance
| (term.zero) := by dsimp [of_term]; apply_instance
| (term.one) := by dsimp [of_term]; apply_instance
| (term.add t₁ t₂) := by letI := ι.has_repr t₁; letI := ι.has_repr t₂;
dsimp [of_term]; apply_instance
| (term.and t₁ t₂) := by letI := ι.has_repr t₁; letI := ι.has_repr t₂;
dsimp [of_term]; apply_instance
| (term.or t₁ t₂) := by letI := ι.has_repr t₁; letI := ι.has_repr t₂;
dsimp [of_term]; apply_instance
| (term.neg t) := by letI := ι.has_repr t; dsimp [of_term]; apply_instance
| (term.neg_one) := by dsimp [of_term]; apply_instance
| (term.xor t₁ t₂) := by letI := ι.has_repr t₁; letI := ι.has_repr t₂;
dsimp [of_term]; apply_instance
| (term.not t) := by letI := ι.has_repr t; dsimp [of_term]; apply_instance
| (term.ls t) := by letI := ι.has_repr t; dsimp [of_term]; apply_instance
| (term.sub t₁ t₂) := by letI := ι.has_repr t₁; letI := ι.has_repr t₂;
dsimp [of_term]; apply_instance
| (term.incr t) := by letI := ι.has_repr t; dsimp [of_term]; apply_instance
| (term.decr t) := by letI := ι.has_repr t; dsimp [of_term]; apply_instance
def check_eq (t₁ t₂ : term) (n : ℕ) : result :=
decide_if_zeros (of_term (t₁.xor t₂)).2 n
end
open term
set_option profiler true
def x : term := term.var 0
def y : term := term.var 1
def z : term := term.var 2
def a : term := term.var 3
def b : term := term.var 4
def c : term := term.var 5
def d : term := term.var 6
#eval check_eq (x +- x) 0 2
#eval check_eq (x - y) (x + -y) 2
#eval check_eq (x + 1) x.incr 2
#eval check_eq (x - 1) x.decr 2
#eval check_eq (x.xor x) term.zero 1
#eval check_eq (x + y) (y + x) 1
#eval check_eq ((x + y) + z) (x + (y + z)) 2
#eval check_eq (not (xor x y)) (and x y - or x y - 1) 2
-- #eval (bitwise_struc bxor).nth_output (λ _, (tt, tt)) 0
open term
|
State Before: α : Type u_1
inst✝² : Lattice α
inst✝¹ : OrderBot α
inst✝ : IsModularLattice α
a b c : α
h : Disjoint b c
hsup : Disjoint a (b ⊔ c)
⊢ Disjoint (a ⊔ b) c State After: α : Type u_1
inst✝² : Lattice α
inst✝¹ : OrderBot α
inst✝ : IsModularLattice α
a b c : α
h : Disjoint b c
hsup : Disjoint a (b ⊔ c)
⊢ Disjoint c (b ⊔ a) Tactic: rw [disjoint_comm, sup_comm] State Before: α : Type u_1
inst✝² : Lattice α
inst✝¹ : OrderBot α
inst✝ : IsModularLattice α
a b c : α
h : Disjoint b c
hsup : Disjoint a (b ⊔ c)
⊢ Disjoint c (b ⊔ a) State After: α : Type u_1
inst✝² : Lattice α
inst✝¹ : OrderBot α
inst✝ : IsModularLattice α
a b c : α
h : Disjoint b c
hsup : Disjoint a (b ⊔ c)
⊢ Disjoint (c ⊔ b) a Tactic: apply Disjoint.disjoint_sup_right_of_disjoint_sup_left h.symm State Before: α : Type u_1
inst✝² : Lattice α
inst✝¹ : OrderBot α
inst✝ : IsModularLattice α
a b c : α
h : Disjoint b c
hsup : Disjoint a (b ⊔ c)
⊢ Disjoint (c ⊔ b) a State After: no goals Tactic: rwa [sup_comm, disjoint_comm] at hsup
|
from __future__ import annotations
import numpy as np
from ..core import Solver
from ..nbcompat import numba
from ..nbcompat.zeros import j_newton, newton_hd
from ..util import classproperty
class Multistep(Solver, abstract=True):
GROUP = "Multistep"
FIXED_STEP = True
FIRST_STEPPER_CLS = "RungeKutta45"
# class attributes
A: np.ndarray # (n,)
B: np.ndarray # (n,)
Bn: float
order: float
ORDER: int
@classproperty
def ORDER(cls):
return cls.A.size
@classproperty
def IMPLICIT(cls):
return cls.Bn == 0
@classproperty
def LEN_HISTORY(cls):
return max(cls.ORDER, 2)
def __init__(self, *args, first_stepper_cls="auto", **kwargs):
super().__init__(*args, **kwargs)
if first_stepper_cls is None or self.ORDER == 1:
# We push 1 less because one was done at Solver.s
for _ in range(self.LEN_HISTORY - 1):
self.cache.push(self.t, self.y, self.f)
return
if first_stepper_cls == "auto":
first_stepper_cls = self.FIRST_STEPPER_CLS
if isinstance(first_stepper_cls, str):
import nbkode
first_stepper_cls = getattr(nbkode, first_stepper_cls)
if first_stepper_cls.FIXED_STEP:
# For fixed step solver we do N steps with the step size.
solver = first_stepper_cls(self.rhs, self.t, self.y, h=self.h)
for _ in range(self.ORDER - 1):
solver.step()
self.cache.push(solver.t, solver.y, solver.f)
else:
# For variable step solver we run N times until h, 2h, 3h .. (ORDER - 1)h
solver = first_stepper_cls(self.rhs, self.t, self.y)
ts, ys = solver.run(np.arange(1, self.ORDER) * self.h)
for t, y in zip(ts, ys):
self.cache.push(t, y, self.rhs(t, y))
@staticmethod
def _fixed_step():
raise NotImplementedError
@classmethod
def _step_builder(cls):
fixed_step = cls._fixed_step
@numba.njit
def _step(rhs, cache, h):
t, y = fixed_step(rhs, cache, h)
cache.push(t, y, rhs(t, y))
return _step
@property
def _step_args(self):
return self.rhs, self.cache, self.h
class ExplicitMultistep(Multistep, abstract=True):
Bn = 0
@classmethod
def _fixed_step_builder(cls):
A, B = cls.A, cls.B
if A.size < cls.LEN_HISTORY or B.size < cls.LEN_HISTORY:
deltaA = cls.LEN_HISTORY - A.size
deltaB = cls.LEN_HISTORY - B.size
@numba.njit
def _fixed_step(rhs, cache, h):
t_new = cache.t + h
y_new = h * B @ cache.fs[deltaB:] - A @ cache.ys[deltaA:]
return t_new, y_new
else:
@numba.njit
def _fixed_step(rhs, cache, h):
t_new = cache.t + h
y_new = h * B @ cache.fs - A @ cache.ys
return t_new, y_new
return _fixed_step
class ImplicitMultistep(Multistep, abstract=True):
@classmethod
def _fixed_step_builder(cls):
A, B, Bn = cls.A, cls.B, cls.Bn
@numba.njit(inline="always")
def implicit_root(y, rhs, t, h_Bn, K):
return y - h_Bn * rhs(t, y) - K
if A.size < cls.LEN_HISTORY or B.size < cls.LEN_HISTORY:
deltaA = cls.LEN_HISTORY - A.size
deltaB = cls.LEN_HISTORY - B.size
@numba.njit
def _fixed_step(rhs, cache, h):
t_new = cache.t + h
K = h * B @ cache.fs[deltaB:] - A @ cache.ys[deltaA:]
if cache.y.size == 1:
y_new = j_newton(
implicit_root,
cache.y,
args=(rhs, t_new, h * Bn, K),
)
else:
y_new = newton_hd(
implicit_root,
cache.y,
args=(rhs, t_new, h * Bn, K),
)
return t_new, y_new
else:
@numba.njit
def _fixed_step(rhs, cache, h):
t_new = cache.t + h
K = h * B @ cache.fs - A @ cache.ys
if cache.y.size == 1:
y_new = j_newton(
implicit_root,
cache.y,
args=(rhs, t_new, h * Bn, K),
)
else:
y_new = newton_hd(
implicit_root,
cache.y,
args=(rhs, t_new, h * Bn, K),
)
return t_new, y_new
return _fixed_step
|
||| A heterogeneous list. This was introduced in
||| chapter *Functor and Friends* and is also used in
||| later chapters
module Data.HList
import Data.Fin
%default total
public export
data HList : (ts : List Type) -> Type where
Nil : HList Nil
(::) : (v : t) -> (vs : HList ts) -> HList (t :: ts)
public export
head : HList (t :: ts) -> t
head (v :: _) = v
public export
tail : HList (t :: ts) -> HList ts
tail (_ :: t) = t
public export
(++) : HList xs -> HList ys -> HList (xs ++ ys)
[] ++ ws = ws
(v :: vs) ++ ws = v :: (vs ++ ws)
public export
indexList : (as : List a) -> Fin (length as) -> a
indexList (x :: _) FZ = x
indexList (_ :: xs) (FS y) = indexList xs y
indexList [] x impossible
public export
index : (ix : Fin (length ts)) -> HList ts -> indexList ts ix
index FZ (v :: _) = v
index (FS x) (_ :: vs) = index x vs
index ix [] impossible
|
% This is part of the TFTB Tutorial.
% Copyright (C) 1996 CNRS (France) and Rice University (US).
% See the file tutorial.tex for copying conditions.
This chapter presents some useful definitions that constitute the
background of time-frequency analysis (most of the information presented in
this tutorial are extracted from \cite{FLA93}). After a brief recall on
time-domain and frequency-domain representations, we introduce the concepts
of time and frequency localizations, time-bandwidth product and the
constraint associated to this product (the Heisenberg-Gabor
inequality). Then, the instantaneous frequency and the group delay are
presented as a first solution to the problem of time localization of the
spectrum. We carry on by defining non-stationarity from its opposite,
stationarity, and show how to synthesize such non-stationary signals with
the toolbox. Finally, we show that in the case of multi-component signals,
these mono-dimensional functions (instantaneous frequency and group delay)
are not sufficient to represent these signals ; a two-dimensional
description (function of time {\em and} frequency) is necessary.
\section{Time representation and frequency representation}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The time representation is usually the first (and the most natural)
description of a signal we consider, since almost all physical signals are
obtained by receivers recording variations with time.
The frequency representation, obtained by the {\it Fourier transform}
\index{Fourier transform}
\[X(\nu) = \int_{-\infty}^{+\infty} x(t)\ e^{-j2\pi \nu t}\ dt,\]
is also a very powerful way to describe a signal, mainly because the
relevance of the concept of frequency is shared by many domains (physics,
astronomy, economics, biology \ldots) in which periodic events occur.
But if we look more carefully at the spectrum $X(\nu)$, it can be viewed
as the coefficient function obtained by expanding the signal $x(t)$ into
the family of infinite waves, $\exp\{j2\pi \nu t\}$, which are completely
unlocalized in time. Thus, the spectrum essentially tells us which
frequencies are contained in the signal, as well as their corresponding
amplitudes and phases, but does not tell us at which times these
frequencies occur.
\section{Localization and the Heisenberg-Gabor\\ principle}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\markright{Localization and the Heisenberg-Gabor principle}
A simple way to characterize a signal simultaneously in time and in
frequency is to consider its mean localizations and dispersions in each of
these representations. This can be obtained by considering $|x(t)|^2$ and
$|X(\nu)|^2$ as probability distributions, and looking at their mean values
and standard deviations : \index{average time}\index{average
frequency}\index{time spreading}\index{frequency spreading}
%\begin{xalignat}{2}
$$
\begin{array}{rcll}
t_m &=& \frac{1}{E_x}\ \int_{-\infty}^{+\infty} t\ |x(t)|^2\ dt &
\mbox{\it average time}\\
\nu_m &=& \frac{1}{E_x}\ \int_{-\infty}^{+\infty} \nu\ |X(\nu)|^2\ d\nu &
\mbox{\it average frequency}\\
T^2 &=& \frac{
4\pi}{E_x}\ \int_{-\infty}^{+\infty} (t-t_m)^2\ |x(t)|^2\ dt
& \mbox{\it time spreading}\\
B^2 &=& \frac{4\pi}{E_x}\ \int_{-\infty}^{+\infty} (\nu-\nu_m)^2\
|X(\nu)|^2\ d\nu & \mbox{\it frequency spreading}
\end{array}
$$
%\end{eqnarray*}
%\end{xalignat}
where $E_x$ is the \index{energy} {\it energy} of the signal, assumed to be
finite (bounded) :
\[E_x = \int_{-\infty}^{+\infty} |x(t)|^2\ dt < + \infty.\]
Then a signal can be characterized in the time-frequency plane by its mean
position $(t_m, \nu_m)$ and a domain of main energy localization whose area
is proportional to the {\it time-bandwidth product} $T\times B$.\\
\index{time-bandwidth product}
\subsection{Example 1}
%'''''''''''''''''''''
\label{ex1}
These time and frequency localizations can be evaluated thanks to the
M-files \index{\ttfamily loctime}{\ttfamily loctime.m} and \index{\ttfamily
locfreq}{\ttfamily locfreq.m} of the Toolbox. The first one gives the
average time center ($t_m$) and the duration ($T$) of a signal, and the
second one the average normalized frequency ($\nu_m$) and the normalized
bandwidth ($B$). For example, for a linear chirp with a gaussian amplitude
modulation, we obtain (see fig. \ref{Ns2fig1})\,:
\begin{verbatim}
>> sig=fmlin(256).*amgauss(256);
>> [tm,T]=loctime(sig) ---> tm=128 T=32
>> [num,B]=locfreq(sig) ---> num=0.249 B=0.0701
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=8cm
\centerline{\epsfbox{figure/ns2fig1.eps}}
\caption{\label{Ns2fig1}Linear chirp with a gaussian amplitude modulation}
\end{figure}
One interesting property of this product $T\times B$ is that it is lower
bounded :
\[T \times B \geq 1.\]
\index{Heisenberg-Gabor inequality} This constraint, known as the {\it
Heisenberg-Gabor inequality}, illustrates the fact that a signal can not
have simultaneously an arbitrarily small support in time and in
frequency. This property is a consequence of the definition of the Fourier
transform. The lower bound $T\times B = 1$ is reached for gaussian
functions :
\[x(t) = C \exp{[-\alpha(t - t_m)^2 + j2\pi \nu_m(t-t_m)]}\]
%with $C \in \mathbb{R}$, $\alpha \in \mathbb{R}_{+}$. Therefore, the
with $C \in \Rset$, $\alpha \in \Rset_{+}$. Therefore, the
gaussian signals are those which minimize the time-bandwidth product
according to the Heisenberg-Gabor inequality.\\
\subsection{Example 2}
%'''''''''''''''''''''
To check the Heisenberg-Gabor inequality numerically, we consider a
gaussian signal and calculate its time-bandwidth product (see
fig. \ref{Ns2fig2})\,:
\begin{verbatim}
>> sig=amgauss(256);
>> [tm,T]=loctime(sig);
>> [fm,B]=locfreq(sig);
>> [T,B,T*B] ---> T=32 B=0.0312 T*B=1
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=8cm
\centerline{\epsfbox{figure/ns2fig2.eps}}
\caption{\label{Ns2fig2}gaussian signal : lower bound of the
Heisenberg-Gabor inequality}
\end{figure}
Hence, the time-bandwidth product obtained, when using the file
\index{\ttfamily amgauss}{\ttfamily amgauss.m}, is minimum.
\section{Instantaneous frequency}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\label{anasig}
\index{instantaneous frequency}
\index{analytic signal}
\index{Hilbert transform}
Another way to describe a signal simultaneously in time and in frequency
is to consider its {\it instantaneous frequency}. In order to introduce such a
function, we must define first the concept of {\it analytic signal}.
For any real valued signal $x(t)$, we associate a complex valued signal
$x_a(t)$ defined as
\[x_a(t) = x(t) + j HT(x(t))\]
where $HT(x)$ is the {\it Hilbert transform} of $x$ ($x_a$ can be obtained
using the M-file {\ttfamily hilbert.m} of the Signal Processing
Toolbox). $x_a(t)$ is called the analytic signal associated to $x(t)$. This
definition has a simple interpretation in the frequency domain since $X_a$
is a single-sided Fourier transform where the negative frequency values
have been removed, the strictly positive ones have been doubled, and the DC
component is kept unchanged :
\begin{eqnarray*}
X_a(\nu) = 0 \ \ \ \ \ \ &\mbox{if}& \nu < 0 \\
X_a(\nu) = X(0)\ \ &\mbox{if}& \nu = 0 \\
X_a(\nu) = 2X(\nu) &\mbox{if}& \nu > 0
\end{eqnarray*}
($X$ is the Fourier transform of $x$, and $X_a$ the Fourier transform of
$x_a$). Thus, the analytic signal can be obtained from the real signal by
forcing to zero its spectrum for the negative frequencies, which do not
alter the information content since for a real signal, $X(-\nu)=X^*(\nu)$.
\index{instantaneous amplitude}
From this signal, it is then possible to define in a unique way the
concepts of {\it instantaneous amplitude} and {\it instantaneous frequency} by :
\begin{eqnarray*}
a(t) &=& |x_a(t)| \ \ \ \ \ \ \ \ \ \ \ \mbox{\it instantaneous amplitude} \\
f(t) &=& \frac{1}{2\pi} \frac{d\arg{x_a(t)}}{dt}\ \mbox{\it instantaneous
frequency}
\end{eqnarray*}
An estimation of the instantaneous frequency is given by the M-file
\index{\ttfamily instfreq}{\ttfamily instfreq.m} of the Time-Frequency
toolbox :\\
{\bf Example} (see fig. \ref{Ns3fig1})
\begin{verbatim}
>> sig=fmlin(256); t=(3:256);
>> ifr=instfreq(sig); plotifl(t,ifr);
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns3fig1.eps}}
\caption{\label{Ns3fig1}Estimation of the instantaneous frequency of a
linear chirp}
\end{figure}
As we can see from this plot, the instantaneous frequency shows with
success the evolution with time of the frequency content of this signal.
\section{Group delay}
%~~~~~~~~~~~~~~~~~~~~
\index{group delay}
The instantaneous frequency characterizes a local frequency behavior as a
function of time. In a dual way, the local time behavior as a function of
frequency is described by the {\it group delay} :
\[t_x(\nu) = -\frac{1}{2\pi} \frac{d\arg{X_a(\nu)}}{d \nu}.\]
This quantity measures the average time arrival of the frequency $\nu$. The
M-file \index{\ttfamily sgrpdlay}{\ttfamily sgrpdlay.m} of the
Time-Frequency Toolbox gives an estimation of the group delay of a signal
(do not mistake it for the file {\ttfamily grpdelay.m} of the signal
processing toolbox which gives the group delay of a digital filter). For
example, with signal {\ttfamily sig} of the previous example, we obtain
(see fig. \ref{Ns4fig1})\,:
\begin{verbatim}
>> sig=fmlin(256); fnorm=0:.05:.5;
>> gd=sgrpdlay(sig,fnorm); plot(gd,fnorm);
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns4fig1.eps}}
\caption{\label{Ns4fig1}Estimation of the group delay of the previous chirp}
\end{figure}
Be careful of the fact that in general, instantaneous frequency and group
delay define two different curves in the time-frequency plane. They are
approximatively identical only when the time-bandwidth product $T\times B$
is large. To illustrate this point, let us consider a simple example. We
calculate the instantaneous frequency and group delay of two signals, the
first one having a large $T\times B$ product, and the second one a small
$T\times B$ product (see fig. \ref{Ns4fig2})\,:
\begin{verbatim}
>> t=2:255;
>> sig1=amgauss(256,128,90).*fmlin(256,0,0.5);
>> [tm,T1]=loctime(sig1); [fm,B1]=locfreq(sig1);
>> T1*B1 ---> T1*B1=15.9138
>> ifr1=instfreq(sig1,t); f1=linspace(0,0.5-1/256,256);
>> gd1=sgrpdlay(sig1,f1); plot(t,ifr1,'*',gd1,f1,'-')
>> sig2=amgauss(256,128,30).*fmlin(256,0.2,0.4);
>> [tm,T2]=loctime(sig2); [fm,B2]=locfreq(sig2);
>> T2*B2 ---> T2*B2=1.224
>> ifr2=instfreq(sig2,t); f2=linspace(0.2,0.4,256);
>> gd2=sgrpdlay(sig2,f2); plot(t,ifr2,'*',gd2,f2,'-')
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=8cm
\centerline{\epsfbox{figure/ns4fig2.eps}}
\caption{\label{Ns4fig2}Estimation of the instantaneous frequency (stars)
and group delay (line) of two different chirps with different amplitude
modulations. The first plot corresponds to a large $T\times B$ product
while the second corresponds to a small one}
\end{figure}
On the first plot, the two curves are almost superimposed (i.e. the
instantaneous frequency is the inverse transform of the group delay),
whereas on the second plot, the two curves are clearly different.
\section{About stationarity}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~
\index{stationarity}
Before talking about non-stationarity, which is a 'non-property', we must
define what we call {\it stationarity}.
A deterministic signal is said to be {\it stationary} if it can be
written as a discrete sum of sinusoids :
\begin{eqnarray*}
x(t)&=&\sum_{k \in \Nset} A_k \cos{[2\pi \nu_k t + \Phi_k]} \ \ \ \
\mbox{ for a real signal} \\
x(t)&=&\sum_{k \in \Nset} A_k \exp{[j(2\pi \nu_k t + \Phi_k)]}
\mbox{for a complex signal}
\end{eqnarray*}
i.e. as a sum of elements which have constant instantaneous amplitude and
instantaneous frequency.
In the random case, a signal $x(t)$ is said to be {\it wide-sense
stationary} (or stationary up to the second order) if its expectation is
independent of time and its autocorrelation function $E[x(t_1)x^*(t_2)]$
depends only on the time difference $t_2-t_1$. We can then show that the
associated analytic signal has constant instantaneous amplitude and
frequency expectations, which can be connected to the deterministic case.
\index{non-stationarity}
So a signal is said to be {\it non-stationary} if one of these fundamental
assumptions is no longer valid. For example, a finite duration signal, and
in particular a {\it transient signal} (for which the length is short
compared to the observation duration), is non-stationary.
\section{How to synthesize a mono-component non-stationary signal}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
One part of the Time-Frequency Toolbox is dedicated to the generation of
non-stationary signals. In that part, three groups of M-files are available\,:
\begin{enumerate}
\item The first one allows to synthesize different amplitude
modulations. These M-files begin with the prefix '{\ttfamily am}'. For
example, {\ttfamily amrect.m} computes a rectangular amplitude modulation,
{\ttfamily amgauss.m} a gaussian amplitude modulation \ldots
\item The second one proposes different frequency modulations. These
M-files begin with '{\ttfamily fm}'. For example, {\ttfamily fmconst.m} is
a constant frequency modulation, {\ttfamily fmhyp.m} a hyperbolic frequency
modulation \ldots
\item The third one is a set of pre-defined signals. Some of them begin
with '{\ttfamily ana}' because these signals are analytic (for example
{\ttfamily anastep, anabpsk, anasing} \ldots), other have special names
({\ttfamily doppler, atoms} \ldots).
\end{enumerate}
The first two groups of files can be combined to produce a large class of
non-stationary signals, multiplying an amplitude modulation and a frequency
modulation.\\
{\bf Examples}
We can multiply the linear frequency modulation of Example 1 (see page
\pageref{ex1}) by a gaussian amplitude modulation (see
fig. \ref{Ns6fig1})\,:
\begin{verbatim}
>> fm1=fmlin(256,0,0.5);
>> am1=amgauss(256);
>> sig1=am1.*fm1; plot(real(sig1));
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns6fig1.eps}}
\caption{\label{Ns6fig1}Mono-component non-stationary signal with a linear
frequency modulation and a gaussian amplitude modulation}
\end{figure}
By default, the signal is centered on the middle (256/2=128), and its
spread is $T=32$. If you want to center it at an other position {\ttfamily
t0}, just replace {\ttfamily am1} by {\ttfamily amgauss(256,t0)}. A second
example can be to multiply a pure frequency (constant frequency modulation)
by a one-sided exponential window starting at {\ttfamily t=100} (see
fig. \ref{Ns6fig2})\,:
\begin{verbatim}
>> fm2=fmconst(256,0.2);
>> am2=amexpo1s(256,100);
>> sig2=am2.*fm2; plot(real(sig2));
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns6fig2.eps}}
\caption{\label{Ns6fig2}Mono-component non-stationary signal with a
constant frequency modulation and a one-sided exponential amplitude
modulation}
\end{figure}
As a third example of mono-component non-stationary signal, we can consider
the M-file \index{\ttfamily doppler}{\ttfamily doppler.m} : this function
generates a modelization of the signal received by a fixed observer from a
moving target emitting a pure frequency (see fig. \ref{Ns6fig3}).
\begin{verbatim}
>> [fm3,am3]=doppler(256,200,4000/60,10,50);
>> sig3=am3.*fm3; plot(real(sig3));
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns6fig3.eps}}
\caption{\label{Ns6fig3}Doppler signal}
\end{figure}
This example corresponds to a target (a car for instance) moving straightly
at the speed of 50\,m/s, and passing at 10\,m from the observer (the
radar\,!). The rotating frequency of the engine is 4000\,revolutions per
minute, and the sampling frequency of the radar is 200\,Hz.\\
In order to have a more realistic modelization of physical signals, we
may need to add some complex noise on these signals. To do so, two M-files
\index{\ttfamily noisecg}({\ttfamily noisecg} an \index{\ttfamily
noisecu}{\ttfamily noisecu}) of the Time-Frequency Toolbox are proposed :
{\ttfamily noisecg.m} generates a complex white or colored gaussian noise,
and {\ttfamily noisecu.m}, a complex white uniform noise. For example, if
we add complex colored gaussian noise on the signal {\ttfamily sig1} with a
signal to noise ratio of -10\,dB (see fig. \ref{Ns6fig4})
\begin{verbatim}
>> noise=noisecg(256,.8);
>> sign=sigmerge(sig1,noise,-10); plot(real(sign));
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns6fig4.eps}}
\caption{\label{Ns6fig4}Gaussian transient signal ({\ttfamily sig1})
embedded in a -10\,dB colored gaussian noise}
\end{figure}
the deterministic signal {\ttfamily sig1} is now almost imperceptible from
the noise.
\section{What about multi-component non-stationary signals ?}
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The notion of instantaneous frequency implicitly assumes that, at each
time instant, there exists only a single frequency component. A dual
restriction applies to the group delay : the implicit assumption is that a
given frequency is concentrated around a single time instant. Thus, if
these assumptions are no longer valid, which is the case for most of the
multi-component signals, the result obtained using the instantaneous
frequency or the group delay is meaningless.\\
{\bf Example}
For example, let us consider the superposition of two linear frequency
modulations :
\begin{verbatim}
>> N=128; x1=fmlin(N,0,0.2); x2=fmlin(N,0.3,0.5);
>> x=x1+x2;
\end{verbatim}
At each time instant $t$, an ideal time-frequency representation should
represent two different frequencies with the same amplitude. The results
obtained using the instantaneous frequency and the group delay are of
course completely different, and therefore irrelevant (see
fig. \ref{Ns7fig1})\,:
\begin{verbatim}
>> ifr=instfreq(x); subplot(211); plot(ifr);
>> fn=0:0.01:0.5; gd=sgrpdlay(x,fn);
>> subplot(212); plot(gd,fn);
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=6cm
\centerline{\epsfbox{figure/ns7fig1.eps}}
\caption{\label{Ns7fig1}Estimation of the instantaneous frequency (first
plot) and group-delay (second plot) of a multi-component signal}
\end{figure}
So these one-dimensional representations, instantaneous frequency and group
delay, are not sufficient to represent all the non-stationary signals. A
further step has to be made towards two-dimensional mixed representations,
jointly in time and in frequency. Even if no gain of information can be
expected since it is all contained in the time or in the frequency
representation, we can obtain a better structuring of this information, and
an improvement in the intelligibility of the representation.
To have an idea of what can be made with a time-frequency decomposition,
let us anticipate the following and have a look at the result obtained on
this signal with the Short Time Fourier Transform (see
fig. \ref{Ns7fig2})\,:
\begin{verbatim}
>> tfrstft(x);
\end{verbatim}
\begin{figure}[htb]
\epsfxsize=10cm
\epsfysize=8cm
\centerline{\epsfbox{figure/ns7fig2.eps}}
\caption{\label{Ns7fig2}Squared modulus of the short-time Fourier transform
of the previous multi-component non-stationary signal}
\end{figure}
Here two ``time-frequency components'' can be clearly seen, located around
the locus of the two frequency modulations.
|
import Lean.Elab.Tactic
set_option autoImplicit false
noncomputable section Theory -- :)
open Classical
def Set (α : Type _) : Type _ :=
α → Prop
def Set.range' (n : Nat) : Set Nat :=
fun i => i ≤ n
def Nat.divides (a b : Nat) : Prop :=
∃ k, a * k = b
def Nat.prime (n : Nat) : Prop :=
n ≠ 1 ∧ ∀ a b, n.divides (a * b) → n.divides a ∨ n.divides b
def List.noDups {α : Type _} : List α → Prop
| [] => True
| (x :: xs) => x ∉ xs ∧ xs.noDups
def List.toSet {α : Type _} (l : List α) : Set α :=
fun x => x ∈ l
def List.sum : List Nat → Nat
| [] => 0
| (x :: xs) => x + xs.sum
def Set.finsum {α : Type _} (s : Set α) (f : α → Nat) : Nat :=
if h : ∃ (l : List α), l.noDups ∧ l.toSet = s
then ((choose h).map f).sum
else 0
def cite {α : Type _} (p : Prop) (x y : α) : α :=
if p then x else y
@[inherit_doc ite] syntax (name := termCIfThenElse)
ppRealGroup(ppRealFill(ppIndent("cif " term " then") ppSpace term)
ppDedent(ppSpace) ppRealFill("else " term)) : term
macro_rules
| `(cif $c then $t else $e) => do
let mvar ← Lean.withRef c `(?m)
`(let_mvar% ?m := $c; wait_if_type_mvar% ?m; cite $mvar $t $e)
def Nat.sumPrimes (n : Nat) : Nat :=
(Set.range' n).finsum fun i => cif i.prime then i else 0
end Theory
section Computation
def Req {α : Type _} (x y : α) :=
x = y
def Riff (p : Prop) (q : Bool) := p ↔ (q = true)
def Rfun {α α' β β'} (R : α → β → Prop) (R' : α' → β' → Prop) : (α → α') → (β → β') → Prop :=
fun f g => ∀ ⦃a b⦄, R a b → R' (f a) (g b)
infixr:50 " ⇨ " => Rfun
def Nat.dividesImpl (a b : Nat) : Bool :=
if a == 0
then b == 0
else b == a * (b / a)
def Nat.primeImpl (n : Nat) : Bool :=
n > 1 ∧ ! n.any fun i => i > 1 ∧ i.dividesImpl n
axiom Nat.primeImplCorrect (n : Nat) :
Riff n.prime n.primeImpl
axiom RprimeImpl : (Req ⇨ Riff) Nat.prime Nat.primeImpl
-- #eval ((List.range 100).filter Nat.primeImpl).length
--def Set.range'Impl₂ (n : Nat) : Impl (Set.range' n) (Nat → Bool) := fun i => i ≤ n
def RSetAsList {α : Type _} (x : Set α) (y : List α) : Prop :=
y.noDups ∧ y.toSet = x
def RSetAsFunBool {α : Type _} (x : Set α) (y : α → Bool) :=
∀ a, x a ↔ y a
def Rdepfun {α β : Type _} {α' : α → Type _} {β' : β → Type _}
(R : α → β → Prop) (R' : (a : α) → (b : β) → α' a → β' b → Prop) : ((a : α) → α' a) → ((b : β) → β' b) → Prop :=
fun f g => ∀ ⦃a b⦄, R a b → R' a b (f a) (g b)
infixr:50 " ⇨ᵈ " => Rdepfun
theorem Rap {α α' β β'} {R : α → β → Prop} {R' : α' → β' → Prop}
{f : α → α'} (g : β → β') {x : α} (y : β) :
(R ⇨ R') f g → R x y → R' (f x) (g y) :=
fun h₁ h₂ => h₁ h₂
theorem RAP {Α Α' Β Β'} {r : Α → Β → Prop} {r' : Α' → Β' → Prop}
{F : Α → Α'} (G : Β → Β') {X : Α} (Y : Β) :
(r ⇨ r') F G → r X Y → r' (F X) (G Y) :=
fun H₁ H₂ => H₁ H₂
axiom Set.range'Impl₁ (n : Nat) : RSetAsList (Set.range' n) (List.range (n+1))
-- Use this in conjunction with ap rule for ⇒
-- axiom Set.finsumImpl : (RSetAsList ⇒ (Req ⇒ Req) ⇒ Req) (Set.finsum) (λ l g => l.map g |>.sum)
-- to derive the following:
axiom Set.finsumImpl {α : Type _} (s : Set α) (l : List α) (f : α → Nat) (g : α → Nat) :
RSetAsList s l →
(∀ j k, Req j k → Req (f j) (g k)) → -- (Req ⇒ Req) f g
Req (s.finsum f) (l.map g |>.sum)
theorem Set.finsumImpl₀ {α : Type _} :
(RSetAsList ⇨ (Req ⇨ Req) ⇨ Req) (@Set.finsum (α := α)) (λ l g => l.map g |>.sum) :=
by
intro s s' hs f f' hf
apply Set.finsumImpl <;> assumption
theorem if_classical {p : Prop} {hp : Decidable p} [hp' : Decidable p] {α : Type _} (x y : α) :
@ite α p hp x y = @ite α p hp' x y :=
by
have : hp = hp' := Subsingleton.elim _ _
rw [this]
axiom if_classical' {p : Prop} (p' : Prop) {hp : Decidable p} (hp' : Decidable p') {α : Type _} (x y : α) :
(p ↔ p') →
Req (if p then x else y) (if p' then x else y)
-- TODO: Generalize x, y to x', y' on RHS
-- axiom _ : (Riff ⇒ Req ⇒ Req ⇒ Req) ite (λ p' x y, if p' then x else y)
-- axiom _ {R} : (Riff ⇒ R ⇒ R ⇒ R) ite (λ p' x y, if p' then x else y)
axiom if_classical'' {p : Prop} (p' : Bool) {hp : Decidable p} {α : Type _} (x y : α) :
Riff p p' →
Req (if p then x else y) (if p' then x else y)
def id2 {α β} : (α → β) → α → β := id
def Rtriv (α β : Type _) (x : α) (y : β) : Prop := True
axiom Rcite {α β} {R : α → β → Prop} :
(Riff ⇨ R ⇨ R ⇨ R) cite cond
def BLAH {α β} (f : (α → β) → Sort _) (x : α → β) : f (λ i => x i) → f x := id
def BLAH2 {α β} (f : β → Sort _) (y : α → β) (z : α) : f (y z) → f (y z) := id
def CHANGE (α) : α → α := id
--axiom if_classical₀ {α β} {R : α → β → Prop} :
-- (Riff ⇨ R ⇨ R ⇨ R) (open Classical in fun p x y => if p then x else y) (fun b x y => if b then x else y)
#print if_classical''
def Req.result {α : Type _} {x y : α} (_ : Req x y) : α := y
section
open Lean.Meta Lean.Elab.Tactic
elab "assign " n:term " => " e:term : tactic =>
Lean.Elab.Tactic.withMainContext do
match (← elabTerm n none) with
| .mvar m => m.withContext do
m.assign (← elabTerm e none)
return ()
| _ => return ()
end
def Nat.sumPrimesImpl₁ (n : Nat) : Nat :=
by
have : Req (Nat.sumPrimes n) ?y1 :=
by
unfold Nat.sumPrimes
-- TODO: megahack.
-- Version that worked: explicitly write the correct formula in place of ?RESULT.
-- Versions that didn't work: all others.
refine' Rap (R := Req ⇨ Req) ?fx (?RESULT) ?rfx ?ry
case rfx =>
refine' Rap (R := RSetAsList) ?_ ?_ ?rf ?rx
case rf => refine' Set.finsumImpl₀
case rx => refine' Set.range'Impl₁ _
case ry =>
-- refine_to_fun RESULT
-- assign ?RESULT => (fun j => (?RESULT' : ?_ → ?_) ?_)
let M1 : Nat → Nat → Nat := fun j => cond (primeImpl j) j
let M2 : Nat → Nat := fun j => j
intro j j' hj
dsimp only
refine CHANGE (Req (cite (prime j) j 0) (M1 j' (M2 j'))) ?_
set_option pp.all true in
refine' Rap (R := Req) (R' := Req) _ _ ?rg ?rz₁
case rg =>
refine' Rap (R := Req) (R' := Req ⇨ Req) ?g₁ ?z₂ ?rg₁ ?rz₂
case rg₁ =>
refine' Rap ?_ ?_ ?rg₂ ?rz₃
case rg₂ => exact Rcite
case rz₃ =>
refine' RprimeImpl ?_
assumption
case rz₂ =>
assumption
case rz₁ =>
rfl
-- refine Set.finsumImpl _ ?_ _ ?_ ?rs ?rf
-- case rs =>
-- refine Set.range'Impl₁ _
-- case rf =>
-- intro j k h
-- subst h
-- refine if_classical'' ?_ _ _ ?rp
-- case rp =>
-- refine (Nat.primeImplCorrect j)
exact ?y1
-- -- exact this.result
#eval Nat.sumPrimesImpl₁ 5
#print Nat.sumPrimesImpl₁
#exit
def Nat.sumPrimesImpl' (n : Nat) : Nat :=
by
have : Req (Nat.sumPrimes n) ?y :=
by
unfold sumPrimes
refine Set.finsumImpl _ ?_ _ ?_ ?rs ?rf
case rs =>
refine Set.range'Impl₁ _
case rf =>
intro j k h
subst h
refine if_classical'' ?_ _ _ ?rp
case rp =>
refine (Nat.primeImplCorrect j)
exact ?y
-- exact this.result
#print Nat.sumPrimesImpl'
#eval Nat.sumPrimesImpl' 5000
def Nat.sumPrimesImpl'' (n : Nat) : { y : Nat // Req (n.sumPrimes) y } :=
by
refine ⟨?_, ?r⟩
case r =>
unfold sumPrimes
refine Set.finsumImpl _ ?_ _ ?_ ?rs ?rf
case rs =>
refine Set.range'Impl₁ _
case rf =>
intro j k h
subst h
refine if_classical'' ?_ _ _ ?rp
case rp =>
refine (Nat.primeImplCorrect j)
abbrev Impl {α : Type _} (_a : α) : Type _ := α
def Impl.done {α : Type _} (a : α) : Impl a := a
def Nat.sumPrimesFun (n : Nat) : Nat :=
by
have t : Impl (Nat.sumPrimesImpl'' n).val :=
by
dsimp only [Nat.sumPrimesImpl'']
apply Impl.done
exact t
def Nat.sumPrimesFun' (n : Nat) : Nat :=
by
have : (Nat.sumPrimesImpl'' n).val = ?y :=
by
dsimp only [Nat.sumPrimesImpl'']
exact rfl
clear this
exact ?y
theorem works (n : Nat) : Req n.sumPrimes n.sumPrimesFun' :=
(Nat.sumPrimesImpl'' n).property
#print Nat.sumPrimesFun'
#eval Nat.sumPrimesFun 500
end Computation
|
!!
!! A _BoxArray_ is an array of boxes.
!!
module boxarray_module
use bl_types
use box_module
use list_box_module
implicit none
type boxarray
private
integer :: dim = 0
integer :: nboxes = 0
type(box), pointer :: bxs(:) => Null()
end type boxarray
interface dataptr
module procedure boxarray_dataptr
end interface
interface get_dim
module procedure boxarray_dim
end interface
interface built_q
module procedure boxarray_built_q
end interface
interface copy
module procedure boxarray_build_copy
end interface
interface build
module procedure boxarray_build_v
module procedure boxarray_build_l
module procedure boxarray_build_bx
end interface
interface destroy
module procedure boxarray_destroy
end interface
interface nboxes
module procedure boxarray_nboxes
end interface
interface volume
module procedure boxarray_volume
end interface
interface get_box
module procedure boxarray_get_box
end interface
interface boxarray_maxsize
module procedure boxarray_maxsize_i
module procedure boxarray_maxsize_v
end interface
interface boxarray_grow
module procedure boxarray_grow_n
end interface
interface bbox
module procedure boxarray_bbox
end interface
private :: boxarray_maxsize_l
contains
function boxarray_dataptr(ba) result(r)
type(boxarray), intent(in) :: ba
type(box), pointer :: r(:)
r => ba%bxs
end function boxarray_dataptr
pure function boxarray_built_q(ba) result(r)
logical :: r
type(boxarray), intent(in) :: ba
r = ba%dim /= 0
end function boxarray_built_q
pure function boxarray_dim(ba) result(r)
type(boxarray), intent(in) :: ba
integer :: r
r = ba%dim
end function boxarray_dim
pure function boxarray_get_box(ba, i) result(r)
type(boxarray), intent(in) :: ba
integer, intent(in) :: i
type(box) :: r
r = ba%bxs(i)
end function boxarray_get_box
! kepp
subroutine boxarray_build_copy(ba, ba1)
use bl_error_module
type(boxarray), intent(inout) :: ba
type(boxarray), intent(in) :: ba1
if ( built_q(ba) ) call bl_error("BOXARRAY_BUILD_COPY: already built")
if ( .not. built_q(ba1) ) return
ba%nboxes = size(ba1%bxs)
allocate(ba%bxs(size(ba1%bxs)))
ba%bxs = ba1%bxs
ba%dim = ba1%dim
call boxarray_verify_dim(ba)
end subroutine boxarray_build_copy
subroutine boxarray_build_v(ba, bxs, sort)
use bl_error_module
type(boxarray), intent(inout) :: ba
type(box), intent(in), dimension(:) :: bxs
logical, intent(in), optional :: sort
logical :: lsort
lsort = .false. ; if (present(sort)) lsort = sort
if ( built_q(ba) ) call bl_error("BOXARRAY_BUILD_V: already built")
ba%nboxes = size(bxs)
allocate(ba%bxs(size(bxs)))
ba%bxs = bxs
if ( ba%nboxes > 0 ) then
ba%dim = ba%bxs(1)%dim
end if
call boxarray_verify_dim(ba)
if (lsort) call boxarray_sort(ba) !! make sure all grids are sorted
end subroutine boxarray_build_v
subroutine boxarray_build_bx(ba, bx)
use bl_error_module
type(boxarray), intent(inout) :: ba
type(box), intent(in) :: bx
if ( built_q(ba) ) call bl_error("BOXARRAY_BUILD_BX: already built")
ba%nboxes = 1
allocate(ba%bxs(1))
ba%bxs(1) = bx
ba%dim = bx%dim
call boxarray_verify_dim(ba)
end subroutine boxarray_build_bx
subroutine boxarray_build_l(ba, bl, sort)
use bl_error_module
type(boxarray), intent(inout) :: ba
type(list_box), intent(in) :: bl
logical, intent(in), optional :: sort
type(list_box_node), pointer :: bln
logical :: lsort
integer :: i
!
! Default is to sort.
!
lsort = .true. ; if ( present(sort) ) lsort = sort
if ( built_q(ba) ) call bl_error("BOXARRAY_BUILD_L: already built")
ba%nboxes = size(bl)
allocate(ba%bxs(ba%nboxes))
bln => begin(bl)
i = 1
do while (associated(bln))
ba%bxs(i) = value(bln)
i = i + 1
bln=>next(bln)
end do
if ( ba%nboxes > 0 ) then
ba%dim = ba%bxs(1)%dim
end if
call boxarray_verify_dim(ba)
if ( lsort ) call boxarray_sort(ba)
end subroutine boxarray_build_l
subroutine boxarray_destroy(ba)
type(boxarray), intent(inout) :: ba
if ( associated(ba%bxs) ) then
deallocate(ba%bxs)
ba%bxs => Null()
end if
ba%dim = 0
ba%nboxes = 0
end subroutine boxarray_destroy
subroutine boxarray_sort(ba)
use sort_box_module
type(boxarray), intent(inout) :: ba
call box_sort(ba%bxs)
end subroutine boxarray_sort
subroutine boxarray_verify_dim(ba, stat)
use bl_error_module
type(boxarray), intent(in) :: ba
integer, intent(out), optional :: stat
integer :: i, dm
if ( present(stat) ) stat = 0
if ( ba%nboxes < 1 ) return
dm = ba%dim
if ( dm == 0 ) then
dm = ba%bxs(1)%dim
end if
if ( dm == 0 ) then
call bl_error("BOXARRAY_VERIFY_DIM: dim is zero!")
end if
do i = 1, ba%nboxes
if ( ba%dim /= ba%bxs(i)%dim ) then
if ( present(stat) ) then
stat = 1
return
else
call bl_error("BOXARRAY_VERIFY_DIM: " // &
"ba%dim not equal to some boxes dim: ", ba%dim)
end if
end if
end do
end subroutine boxarray_verify_dim
subroutine boxarray_grow_n(ba, n)
type(boxarray), intent(inout) :: ba
integer, intent(in) :: n
integer :: i
do i = 1, ba%nboxes
ba%bxs(i) = grow(ba%bxs(i), n)
end do
end subroutine boxarray_grow_n
subroutine boxarray_nodalize(ba, nodal)
type(boxarray), intent(inout) :: ba
logical, intent(in), optional :: nodal(:)
integer :: i
do i = 1, ba%nboxes
ba%bxs(i) = box_nodalize(ba%bxs(i), nodal)
end do
end subroutine boxarray_nodalize
subroutine boxarray_box_boundary_n(bao, bx, n)
type(boxarray), intent(out) :: bao
type(box), intent(in) :: bx
integer, intent(in) :: n
type(boxarray) :: baa
call boxarray_build_bx(baa, bx)
call boxarray_boundary_n(bao, baa, n)
call boxarray_destroy(baa)
end subroutine boxarray_box_boundary_n
subroutine boxarray_boundary_n(bao, ba, n)
type(boxarray), intent(out) :: bao
type(boxarray), intent(in) :: ba
integer, intent(in) :: n
call boxarray_build_copy(bao, ba)
call boxarray_grow(bao, n)
call boxarray_diff(bao, ba)
end subroutine boxarray_boundary_n
pure function boxarray_nboxes(ba) result(r)
type(boxarray), intent(in) :: ba
integer :: r
r = ba%nboxes
end function boxarray_nboxes
function boxarray_volume(ba) result(r)
type(boxarray), intent(in) :: ba
integer(kind=ll_t) :: r
integer :: i
r = 0_ll_t
do i = 1, ba%nboxes
r = r + box_volume(ba%bxs(i))
end do
end function boxarray_volume
pure function boxarray_bbox(ba) result(r)
type(boxarray), intent(in) :: ba
type(box) :: r
integer :: i
r = nobox(ba%dim)
do i = 1, ba%nboxes
r = bbox(r, ba%bxs(i))
end do
end function boxarray_bbox
subroutine boxarray_diff(bao, ba)
type(boxarray), intent(inout) :: bao
type(boxarray), intent(in) :: ba
type(list_box) :: bl, bl1, bl2
integer :: i
call build(bl1, ba%bxs)
do i = 1, bao%nboxes
bl2 = boxlist_boxlist_diff(bao%bxs(i), bl1)
call splice(bl, bl2)
end do
call boxarray_destroy(bao)
call boxarray_build_l(bao, bl)
call destroy(bl)
call destroy(bl1)
end subroutine boxarray_diff
subroutine boxarray_maxsize_i(bxa, chunk)
type(boxarray), intent(inout) :: bxa
integer, intent(in) :: chunk
integer :: vchunk(bxa%dim)
vchunk = chunk
call boxarray_maxsize_v(bxa, vchunk)
end subroutine boxarray_maxsize_i
subroutine boxarray_maxsize_v(bxa, chunk)
type(boxarray), intent(inout) :: bxa
integer, intent(in), dimension(:) :: chunk
type(list_box) :: bl
bl = boxarray_maxsize_l(bxa, chunk)
call boxarray_destroy(bxa)
call boxarray_build_l(bxa, bl)
call destroy(bl)
end subroutine boxarray_maxsize_v
function boxarray_maxsize_l(bxa, chunk) result(r)
type(list_box) :: r
type(boxarray), intent(in) :: bxa
integer, intent(in), dimension(:) :: chunk
integer :: i,k
type(list_box_node), pointer :: li
integer :: len(bxa%dim)
integer :: nl, bs, rt, nblk, sz, ex, ks, ps
type(box) :: bxr, bxl
do i = 1, bxa%nboxes
call push_back(r, bxa%bxs(i))
end do
li => begin(r)
do while ( associated(li) )
len = extent(value(li))
do i = 1, bxa%dim
if ( len(i) > chunk(i) ) then
rt = 1
bs = chunk(i)
nl = len(i)
do while ( mod(bs,2) == 0 .AND. mod(nl,2) == 0)
rt = rt * 2
bs = bs/2
nl = nl/2
end do
nblk = nl/bs
if ( mod(nl,bs) /= 0 ) nblk = nblk + 1
sz = nl/nblk
ex = mod(nl,nblk)
do k = 0, nblk-2
if ( k < ex ) then
ks = (sz+1)*rt
else
ks = sz*rt
end if
ps = upb(value(li), i) - ks + 1
call box_chop(value(li), bxr, bxl, i, ps)
call set(li, bxr)
call push_back(r, bxl)
end do
end if
end do
li => next(li)
end do
end function boxarray_maxsize_l
end module boxarray_module
|
/*
* JSONConverterInterpreter_impl.hpp
*
*
* @date 20-10-2018
* @author Teddy DIDE
* @version 1.00
*/
#include <boost/algorithm/string/replace.hpp>
#include "String.hpp"
#include "Array.hpp"
#include "Structure.hpp"
#include "Numeric.hpp"
#define A(str) encode<EncodingT,ansi>(str)
#define C(str) encode<ansi,EncodingT>(str)
NAMESPACE_BEGIN(interp)
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::escapeChar(const typename EncodingT::string_t& str)
{
typename EncodingT::string_t res = str;
boost::algorithm::replace_all(res, "\\", "\\\\");
boost::algorithm::replace_all(res, "\"", "\\\"");
boost::algorithm::replace_all(res, "\r", "\\r");
boost::algorithm::replace_all(res, "\n", "\\n");
boost::algorithm::replace_all(res, "\t", "\\t");
return res;
}
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::unEscapeChar(const typename EncodingT::string_t& str)
{
typename EncodingT::string_t res = str;
boost::algorithm::replace_all(res, "\\\"", "\"");
boost::algorithm::replace_all(res, "\\r", "\r");
boost::algorithm::replace_all(res, "\\n", "\n");
boost::algorithm::replace_all(res, "\\t", "\t");
boost::algorithm::replace_all(res, "\\\\", "\\");
return res;
}
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::shrinkText(const typename EncodingT::string_t& str)
{
size_t i = 0, j = str.size() - 1;
while (i <= j && (str[i] == ' ' || str[i] == '\t' || str[i] == '\r' || str[i] == '\n')) ++i;
while (j > i && (str[j] == ' ' || str[j] == '\t' || str[j] == '\r' || str[j] == '\n')) --j;
typename EncodingT::string_t res;
if (i < j)
{
res = str.substr(i, (j + 1) - i);
}
return res;
}
template <class EncodingT>
size_t JSONConverterInterpreter<EncodingT>::findChar(const typename EncodingT::string_t& str, const typename EncodingT::char_t& ch, size_t start)
{
bool inString = false;
bool escaping = false;
size_t inArray = 0U;
size_t inStructure = 0U;
size_t res = start;
while ((res < str.size()) && (inString || (inArray > 0U) || (inStructure > 0U) || (str[res] != ch)))
{
inString = (inString != ((str[res] == '"') && !escaping));
escaping = (str[res] == '\\') && !escaping;
if (!inString && (str[res] == '['))
{
++inArray;
}
else if (!inString && (str[res] == ']'))
{
--inArray;
}
else if (!inString && (str[res] == '{'))
{
++inStructure;
}
else if (!inString && (str[res] == '}'))
{
--inStructure;
}
++res;
}
return res;
}
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::toNativeText(const boost::shared_ptr< Base<EncodingT> >& object)
{
typename EncodingT::string_t result;
boost::shared_ptr< Numeric<EncodingT> > num;
boost::shared_ptr< String<EncodingT> > str;
boost::shared_ptr< Array<EncodingT> > arr;
boost::shared_ptr< Structure<EncodingT> > st;
if ((num = dynamic_pointer_cast< Numeric<EncodingT> >(object)) != nullptr)
{
result = num->toString();
}
else if ((str = dynamic_pointer_cast< String<EncodingT> >(object)) != nullptr)
{
result = UCS("\"") + escapeChar(str->value()) + UCS("\"");
}
else if ((arr = dynamic_pointer_cast< Array<EncodingT> >(object)) != nullptr)
{
size_t lg = arr->length();
result = UCS("[");
for (size_t i = 0; i < lg; ++i)
{
result += toNativeText(arr->valueAt(i));
if (i != lg-1)
{
result += UCS(",");
}
}
result += UCS("]");
}
else if ((st = dynamic_pointer_cast< Structure<EncodingT> >(object)) != nullptr)
{
size_t lg = st->getFieldsCount();
result = UCS("{");
for (size_t i = 0; i < lg; ++i)
{
auto pairIterator = st->getField(i);
result += UCS("\"") + pairIterator->first + UCS("\":") + toNativeText(pairIterator->second);
if (i != lg-1)
{
result += UCS(",");
}
}
result += UCS("}");
}
else
{
Category * logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "Numeric, String, Array or Structure expected, got " << A(object->getClassName());
}
return result;
}
template <class EncodingT>
boost::shared_ptr< Base<EncodingT> > JSONConverterInterpreter<EncodingT>::fromNativeText(const typename EncodingT::string_t& nativeText)
{
boost::shared_ptr< Base<EncodingT> > obj(new Base<EncodingT>());
long long llvalue;
double dvalue;
if ((nativeText.size() >= 2U) && (nativeText.front() == '{') && (nativeText.back() == '}'))
{
Structure<EncodingT>* st = new Structure<EncodingT>();
typename EncodingT::string_t content = shrinkText(nativeText.substr(1U, nativeText.size() - 2U));
if (!content.empty())
{
size_t parsed = 0U;
while (parsed < content.size())
{
size_t i = findChar(content, ':', parsed);
if (i < content.size())
{
size_t j = findChar(content, ',', i);
typename EncodingT::string_t fieldstr = shrinkText(content.substr(parsed, i - parsed));
if (fieldstr.size() >= 2U)
{
typename EncodingT::string_t fieldName = fieldstr.substr(1U, fieldstr.size() - 2U);
boost::shared_ptr< Base<EncodingT> > fieldValue = fromNativeText(content.substr(i + 1U, j - (i + 1U)));
st->insertField(fieldName, fieldValue);
}
else
{
parsed = i;
Category * logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "Unrecognized JSON text : " << A(fieldstr);
}
if (j < content.size())
{
parsed = j + 1U;
}
else
{
parsed = j;
}
}
else
{
parsed = i;
Category * logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "Unrecognized JSON text : " << A(content);
}
}
}
obj.reset(st);
}
else if ((nativeText.size() >= 2U) && (nativeText.front() == '[') && (nativeText.back() == ']'))
{
Array<EncodingT>* arr = new Array<EncodingT>();
typename EncodingT::string_t content = shrinkText(nativeText.substr(1U, nativeText.size()-2U));
if (!content.empty())
{
size_t parsed = 0U;
while (parsed < content.size())
{
size_t i = findChar(content, ',', parsed);
boost::shared_ptr< Base<EncodingT> > fieldValue = fromNativeText(content.substr(parsed, i - parsed));
arr->addValue(fieldValue);
if (i < content.size())
{
parsed = i + 1U;
}
else
{
parsed = i;
}
}
}
obj.reset(arr);
}
else if ((nativeText.size() >= 2U) && (nativeText.front() == '"') && (nativeText.back() == '"'))
{
obj.reset(new String<EncodingT>(unEscapeChar(nativeText.substr(1U, nativeText.size()-2U))));
}
else if (Convert<long long>::try_parse(nativeText, llvalue))
{
obj.reset(new Numeric<EncodingT>(llvalue));
}
else if (Convert<double>::try_parse(nativeText, dvalue))
{
obj.reset(new Numeric<EncodingT>(dvalue));
}
else
{
Category * logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "Unrecognized JSON text : " << A(nativeText);
}
return obj;
}
template <class EncodingT>
JSONConverterInterpreter<EncodingT>::JSONConverterInterpreter()
{
}
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::toString() const
{
return EncodingT::EMPTY;
}
template <class EncodingT>
boost::shared_ptr< Base<EncodingT> > JSONConverterInterpreter<EncodingT>::clone() const
{
return boost::shared_ptr< Base<EncodingT> >(new JSONConverterInterpreter<EncodingT>());
}
template <class EncodingT>
typename EncodingT::string_t JSONConverterInterpreter<EncodingT>::getClassName() const
{
return UCS("JSONConverter");
}
template <class EncodingT>
boost::shared_ptr< Base<EncodingT> > JSONConverterInterpreter<EncodingT>::invoke(const typename EncodingT::string_t& method, std::vector< boost::shared_ptr< Base<EncodingT> > >& params)
{
boost::shared_ptr< Base<EncodingT> > obj(new Base<EncodingT>());
ParameterArray args, ret;
if (check_parameters_array(params, args))
{
if (tryInvoke(this, UCS("JSONConverter"), method, args, ret) ||
tryInvoke(this, UCS("Base"), method, args, ret))
{
find_parameter(ret, FACTORY_RETURN_PARAMETER, obj);
for (size_t i = 0; i < params.size(); ++i)
{
find_parameter(ret, i, params[i]);
}
}
else
{
Category* logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "Unexpected call in JSONConverter, no method \"" << A(method) << "\" exists.";
}
}
return obj;
}
template <class EncodingT>
boost::shared_ptr< Base<EncodingT> > JSONConverterInterpreter<EncodingT>::toText(const boost::shared_ptr< Base<EncodingT> >& object) const
{
return boost::make_shared< String<EncodingT> >(toNativeText(object));
}
template <class EncodingT>
boost::shared_ptr< Base<EncodingT> > JSONConverterInterpreter<EncodingT>::fromText(const boost::shared_ptr< Base<EncodingT> >& text) const
{
boost::shared_ptr< Base<EncodingT> > obj(new Base<EncodingT>());
boost::shared_ptr< String<EncodingT> > str = dynamic_pointer_cast< String<EncodingT> >(text);
if (str != nullptr)
{
typename EncodingT::string_t nativeText = shrinkText(str->value());
obj = fromNativeText(nativeText);
}
else
{
Category * logger = &Category::getInstance(LOGNAME);
logger->errorStream() << "String expected, got " << A(text->getClassName());
}
return obj;
}
NAMESPACE_END
#undef A
#undef C
|
[STATEMENT]
lemma emp_UNpart01:
assumes "\<And> n. n < length cl \<Longrightarrow> {} \<notin> P n"
shows "{} \<notin> UNpart01 cl dl P - {BrnFT cl dl}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<notin> UNpart01 cl dl P - {BrnFT cl dl}
[PROOF STEP]
using assms emp_UNpart1
[PROOF STATE]
proof (prove)
using this:
?n5 < length cl \<Longrightarrow> {} \<notin> P ?n5
(\<And>n. n < length ?cl \<Longrightarrow> {} \<notin> ?P n) \<Longrightarrow> {} \<notin> UNpart1 ?cl ?dl ?P
goal (1 subgoal):
1. {} \<notin> UNpart01 cl dl P - {BrnFT cl dl}
[PROOF STEP]
unfolding UNpart01_def
[PROOF STATE]
proof (prove)
using this:
?n5 < length cl \<Longrightarrow> {} \<notin> P ?n5
(\<And>n. n < length ?cl \<Longrightarrow> {} \<notin> ?P n) \<Longrightarrow> {} \<notin> UNpart1 ?cl ?dl ?P
goal (1 subgoal):
1. {} \<notin> {BrnFT cl dl} \<union> UNpart1 cl dl P - {BrnFT cl dl}
[PROOF STEP]
by auto
|
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
m : M
⊢ ↑Dual.transpose (↑(dualTensorHom R M M) (f ⊗ₜ[R] m)) =
↑(dualTensorHom R (Dual R M) (Dual R M)) (↑(Dual.eval R M) m ⊗ₜ[R] f)
[PROOFSTEP]
ext f' m'
[GOAL]
case h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
m : M
f' : Dual R M
m' : M
⊢ ↑(↑(↑Dual.transpose (↑(dualTensorHom R M M) (f ⊗ₜ[R] m))) f') m' =
↑(↑(↑(dualTensorHom R (Dual R M) (Dual R M)) (↑(Dual.eval R M) m ⊗ₜ[R] f)) f') m'
[PROOFSTEP]
simp only [Dual.transpose_apply, coe_comp, Function.comp_apply, dualTensorHom_apply, LinearMap.map_smulₛₗ,
RingHom.id_apply, Algebra.id.smul_eq_mul, Dual.eval_apply, LinearMap.smul_apply]
[GOAL]
case h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
m : M
f' : Dual R M
m' : M
⊢ ↑f m' * ↑f' m = ↑f' m * ↑f m'
[PROOFSTEP]
exact mul_comm _ _
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
⊢ prodMap (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) 0 =
↑(dualTensorHom R (M × N) (P × Q)) (comp f (fst R M N) ⊗ₜ[R] ↑(inl R P Q) p)
[PROOFSTEP]
ext
[GOAL]
case hl.h.h₁
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
x✝ : M
⊢ (↑(comp (prodMap (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) 0) (inl R M N)) x✝).fst =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp f (fst R M N) ⊗ₜ[R] ↑(inl R P Q) p)) (inl R M N)) x✝).fst
[PROOFSTEP]
simp only [coe_comp, coe_inl, Function.comp_apply, prodMap_apply, dualTensorHom_apply, fst_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hl.h.h₂
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
x✝ : M
⊢ (↑(comp (prodMap (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) 0) (inl R M N)) x✝).snd =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp f (fst R M N) ⊗ₜ[R] ↑(inl R P Q) p)) (inl R M N)) x✝).snd
[PROOFSTEP]
simp only [coe_comp, coe_inl, Function.comp_apply, prodMap_apply, dualTensorHom_apply, fst_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hr.h.h₁
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
x✝ : N
⊢ (↑(comp (prodMap (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) 0) (inr R M N)) x✝).fst =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp f (fst R M N) ⊗ₜ[R] ↑(inl R P Q) p)) (inr R M N)) x✝).fst
[PROOFSTEP]
simp only [coe_comp, coe_inl, Function.comp_apply, prodMap_apply, dualTensorHom_apply, fst_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hr.h.h₂
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
x✝ : N
⊢ (↑(comp (prodMap (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) 0) (inr R M N)) x✝).snd =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp f (fst R M N) ⊗ₜ[R] ↑(inl R P Q) p)) (inr R M N)) x✝).snd
[PROOFSTEP]
simp only [coe_comp, coe_inl, Function.comp_apply, prodMap_apply, dualTensorHom_apply, fst_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
g : Dual R N
q : Q
⊢ prodMap 0 (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q)) =
↑(dualTensorHom R (M × N) (P × Q)) (comp g (snd R M N) ⊗ₜ[R] ↑(inr R P Q) q)
[PROOFSTEP]
ext
[GOAL]
case hl.h.h₁
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
g : Dual R N
q : Q
x✝ : M
⊢ (↑(comp (prodMap 0 (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q))) (inl R M N)) x✝).fst =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp g (snd R M N) ⊗ₜ[R] ↑(inr R P Q) q)) (inl R M N)) x✝).fst
[PROOFSTEP]
simp only [coe_comp, coe_inr, Function.comp_apply, prodMap_apply, dualTensorHom_apply, snd_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hl.h.h₂
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
g : Dual R N
q : Q
x✝ : M
⊢ (↑(comp (prodMap 0 (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q))) (inl R M N)) x✝).snd =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp g (snd R M N) ⊗ₜ[R] ↑(inr R P Q) q)) (inl R M N)) x✝).snd
[PROOFSTEP]
simp only [coe_comp, coe_inr, Function.comp_apply, prodMap_apply, dualTensorHom_apply, snd_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hr.h.h₁
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
g : Dual R N
q : Q
x✝ : N
⊢ (↑(comp (prodMap 0 (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q))) (inr R M N)) x✝).fst =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp g (snd R M N) ⊗ₜ[R] ↑(inr R P Q) q)) (inr R M N)) x✝).fst
[PROOFSTEP]
simp only [coe_comp, coe_inr, Function.comp_apply, prodMap_apply, dualTensorHom_apply, snd_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
case hr.h.h₂
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
g : Dual R N
q : Q
x✝ : N
⊢ (↑(comp (prodMap 0 (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q))) (inr R M N)) x✝).snd =
(↑(comp (↑(dualTensorHom R (M × N) (P × Q)) (comp g (snd R M N) ⊗ₜ[R] ↑(inr R P Q) q)) (inr R M N)) x✝).snd
[PROOFSTEP]
simp only [coe_comp, coe_inr, Function.comp_apply, prodMap_apply, dualTensorHom_apply, snd_apply, Prod.smul_mk,
LinearMap.zero_apply, smul_zero]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
g : Dual R N
q : Q
⊢ TensorProduct.map (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q)) =
↑(dualTensorHom R (M ⊗[R] N) (P ⊗[R] Q)) (↑(dualDistrib R M N) (f ⊗ₜ[R] g) ⊗ₜ[R] p ⊗ₜ[R] q)
[PROOFSTEP]
ext m n
[GOAL]
case H.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
p : P
g : Dual R N
q : Q
m : M
n : N
⊢ ↑(↑(compr₂ (TensorProduct.mk R M N)
(TensorProduct.map (↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) (↑(dualTensorHom R N Q) (g ⊗ₜ[R] q))))
m)
n =
↑(↑(compr₂ (TensorProduct.mk R M N)
(↑(dualTensorHom R (M ⊗[R] N) (P ⊗[R] Q)) (↑(dualDistrib R M N) (f ⊗ₜ[R] g) ⊗ₜ[R] p ⊗ₜ[R] q)))
m)
n
[PROOFSTEP]
simp only [compr₂_apply, mk_apply, map_tmul, dualTensorHom_apply, dualDistrib_apply, ← smul_tmul_smul]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
n : N
g : Dual R N
p : P
⊢ comp (↑(dualTensorHom R N P) (g ⊗ₜ[R] p)) (↑(dualTensorHom R M N) (f ⊗ₜ[R] n)) =
↑g n • ↑(dualTensorHom R M P) (f ⊗ₜ[R] p)
[PROOFSTEP]
ext m
[GOAL]
case h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
n : N
g : Dual R N
p : P
m : M
⊢ ↑(comp (↑(dualTensorHom R N P) (g ⊗ₜ[R] p)) (↑(dualTensorHom R M N) (f ⊗ₜ[R] n))) m =
↑(↑g n • ↑(dualTensorHom R M P) (f ⊗ₜ[R] p)) m
[PROOFSTEP]
simp only [coe_comp, Function.comp_apply, dualTensorHom_apply, LinearMap.map_smul, RingHom.id_apply,
LinearMap.smul_apply]
[GOAL]
case h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommSemiring R
inst✝⁹ : AddCommMonoid M
inst✝⁸ : AddCommMonoid N
inst✝⁷ : AddCommMonoid P
inst✝⁶ : AddCommMonoid Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
n : N
g : Dual R N
p : P
m : M
⊢ ↑f m • ↑g n • p = ↑g n • ↑f m • p
[PROOFSTEP]
rw [smul_comm]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i : n
⊢ ↑(toMatrix bM bN) (↑(dualTensorHom R M N) (Basis.coord bM j ⊗ₜ[R] ↑bN i)) = stdBasisMatrix i j 1
[PROOFSTEP]
ext i' j'
[GOAL]
case a.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
⊢ ↑(toMatrix bM bN) (↑(dualTensorHom R M N) (Basis.coord bM j ⊗ₜ[R] ↑bN i)) i' j' = stdBasisMatrix i j 1 i' j'
[PROOFSTEP]
by_cases hij : i = i' ∧ j = j'
[GOAL]
case pos
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : i = i' ∧ j = j'
⊢ ↑(toMatrix bM bN) (↑(dualTensorHom R M N) (Basis.coord bM j ⊗ₜ[R] ↑bN i)) i' j' = stdBasisMatrix i j 1 i' j'
[PROOFSTEP]
simp [LinearMap.toMatrix_apply, Finsupp.single_eq_pi_single, hij]
[GOAL]
case neg
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : ¬(i = i' ∧ j = j')
⊢ ↑(toMatrix bM bN) (↑(dualTensorHom R M N) (Basis.coord bM j ⊗ₜ[R] ↑bN i)) i' j' = stdBasisMatrix i j 1 i' j'
[PROOFSTEP]
simp [LinearMap.toMatrix_apply, Finsupp.single_eq_pi_single, hij]
[GOAL]
case neg
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : ¬(i = i' ∧ j = j')
⊢ Pi.single i (Pi.single j' 1 j) i' = 0
[PROOFSTEP]
rw [and_iff_not_or_not, Classical.not_not] at hij
[GOAL]
case neg
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : ¬i = i' ∨ ¬j = j'
⊢ Pi.single i (Pi.single j' 1 j) i' = 0
[PROOFSTEP]
cases' hij with hij hij
[GOAL]
case neg.inl
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : ¬i = i'
⊢ Pi.single i (Pi.single j' 1 j) i' = 0
[PROOFSTEP]
simp [hij]
[GOAL]
case neg.inr
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁴ : CommSemiring R
inst✝¹³ : AddCommMonoid M
inst✝¹² : AddCommMonoid N
inst✝¹¹ : AddCommMonoid P
inst✝¹⁰ : AddCommMonoid Q
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module R P
inst✝⁶ : Module R Q
inst✝⁵ : DecidableEq ι
inst✝⁴ : Fintype ι
b : Basis ι R M
m : Type u_1
n : Type u_2
inst✝³ : Fintype m
inst✝² : Fintype n
inst✝¹ : DecidableEq m
inst✝ : DecidableEq n
bM : Basis m R M
bN : Basis n R N
j : m
i i' : n
j' : m
hij : ¬j = j'
⊢ Pi.single i (Pi.single j' 1 j) i' = 0
[PROOFSTEP]
simp [hij]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
⊢ comp (dualTensorHom R M N)
(∑ i : ι, comp (↑(TensorProduct.mk R ((fun x => Dual R M) i) N) (↑(Basis.dualBasis b) i)) (↑applyₗ (↑b i))) =
LinearMap.id
[PROOFSTEP]
ext f m
[GOAL]
case h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : M →ₗ[R] N
m : M
⊢ ↑(↑(comp (dualTensorHom R M N)
(∑ i : ι,
comp (↑(TensorProduct.mk R ((fun x => Dual R M) i) N) (↑(Basis.dualBasis b) i)) (↑applyₗ (↑b i))))
f)
m =
↑(↑LinearMap.id f) m
[PROOFSTEP]
simp only [applyₗ_apply_apply, coeFn_sum, dualTensorHom_apply, mk_apply, id_coe, id.def, Fintype.sum_apply,
Function.comp_apply, Basis.coe_dualBasis, coe_comp, Basis.coord_apply, ← f.map_smul, (dualTensorHom R M N).map_sum, ←
f.map_sum, b.sum_repr]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
⊢ comp (∑ i : ι, comp (↑(TensorProduct.mk R ((fun x => Dual R M) i) N) (↑(Basis.dualBasis b) i)) (↑applyₗ (↑b i)))
(dualTensorHom R M N) =
LinearMap.id
[PROOFSTEP]
ext f m
[GOAL]
case H.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
f : Dual R M
m : N
⊢ ↑(↑(compr₂ (TensorProduct.mk R (Dual R M) N)
(comp
(∑ i : ι,
comp (↑(TensorProduct.mk R ((fun x => Dual R M) i) N) (↑(Basis.dualBasis b) i)) (↑applyₗ (↑b i)))
(dualTensorHom R M N)))
f)
m =
↑(↑(compr₂ (TensorProduct.mk R (Dual R M) N) LinearMap.id) f) m
[PROOFSTEP]
simp only [applyₗ_apply_apply, coeFn_sum, dualTensorHom_apply, mk_apply, id_coe, id.def, Fintype.sum_apply,
Function.comp_apply, Basis.coe_dualBasis, coe_comp, compr₂_apply, tmul_smul, smul_tmul', ← sum_tmul,
Basis.sum_dual_apply_smul_coord]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
x : Dual R M ⊗[R] N
⊢ ↑(dualTensorHomEquivOfBasis b) x = ↑(dualTensorHom R M N) x
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
x : Dual R M ⊗[R] N
x✝ : M
⊢ ↑(↑(dualTensorHomEquivOfBasis b) x) x✝ = ↑(↑(dualTensorHom R M N) x) x✝
[PROOFSTEP]
rfl
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
x : Dual R M ⊗[R] N
⊢ ↑(LinearEquiv.symm (dualTensorHomEquivOfBasis b)) (↑(dualTensorHom R M N) x) = x
[PROOFSTEP]
rw [← dualTensorHomEquivOfBasis_apply b, LinearEquiv.symm_apply_apply <| dualTensorHomEquivOfBasis (N := N) b]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹⁰ : CommRing R
inst✝⁹ : AddCommGroup M
inst✝⁸ : AddCommGroup N
inst✝⁷ : AddCommGroup P
inst✝⁶ : AddCommGroup Q
inst✝⁵ : Module R M
inst✝⁴ : Module R N
inst✝³ : Module R P
inst✝² : Module R Q
inst✝¹ : DecidableEq ι
inst✝ : Fintype ι
b : Basis ι R M
x : M →ₗ[R] N
⊢ ↑(dualTensorHom R M N) (↑(LinearEquiv.symm (dualTensorHomEquivOfBasis b)) x) = x
[PROOFSTEP]
rw [← dualTensorHomEquivOfBasis_apply b, LinearEquiv.apply_symm_apply]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
⊢ ↑(lTensorHomEquivHomLTensor R M P Q) = lTensorHomToHomLTensor R M P Q
[PROOFSTEP]
classical
-- Porting note: missing decidable for choosing basis
let e := congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
have h : Function.Surjective e.toLinearMap := e.surjective
refine' (cancel_right h).1 _
ext f q m
dsimp [lTensorHomEquivHomLTensor]
simp only [lTensorHomEquivHomLTensor, dualTensorHomEquiv, compr₂_apply, mk_apply, coe_comp, LinearEquiv.coe_toLinearMap,
Function.comp_apply, map_tmul, LinearEquiv.coe_coe, dualTensorHomEquivOfBasis_apply, LinearEquiv.trans_apply,
congr_tmul, LinearEquiv.refl_apply, dualTensorHomEquivOfBasis_symm_cancel_left, leftComm_tmul, dualTensorHom_apply,
tmul_smul]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
⊢ ↑(lTensorHomEquivHomLTensor R M P Q) = lTensorHomToHomLTensor R M P Q
[PROOFSTEP]
let e := congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : P ⊗[R] Dual R M ⊗[R] Q ≃ₗ[R] P ⊗[R] (M →ₗ[R] Q) :=
TensorProduct.congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
⊢ ↑(lTensorHomEquivHomLTensor R M P Q) = lTensorHomToHomLTensor R M P Q
[PROOFSTEP]
have h : Function.Surjective e.toLinearMap := e.surjective
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : P ⊗[R] Dual R M ⊗[R] Q ≃ₗ[R] P ⊗[R] (M →ₗ[R] Q) :=
TensorProduct.congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
h : Function.Surjective ↑↑e
⊢ ↑(lTensorHomEquivHomLTensor R M P Q) = lTensorHomToHomLTensor R M P Q
[PROOFSTEP]
refine' (cancel_right h).1 _
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : P ⊗[R] Dual R M ⊗[R] Q ≃ₗ[R] P ⊗[R] (M →ₗ[R] Q) :=
TensorProduct.congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
h : Function.Surjective ↑↑e
⊢ comp ↑(lTensorHomEquivHomLTensor R M P Q) ↑e = comp (lTensorHomToHomLTensor R M P Q) ↑e
[PROOFSTEP]
ext f q m
[GOAL]
case H.h.H.h.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : P ⊗[R] Dual R M ⊗[R] Q ≃ₗ[R] P ⊗[R] (M →ₗ[R] Q) :=
TensorProduct.congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
h : Function.Surjective ↑↑e
f : P
q : Dual R M
m : Q
x✝ : M
⊢ ↑(↑(↑(compr₂ (TensorProduct.mk R (Dual R M) Q)
(↑(compr₂ (TensorProduct.mk R P (Dual R M ⊗[R] Q)) (comp ↑(lTensorHomEquivHomLTensor R M P Q) ↑e)) f))
q)
m)
x✝ =
↑(↑(↑(compr₂ (TensorProduct.mk R (Dual R M) Q)
(↑(compr₂ (TensorProduct.mk R P (Dual R M ⊗[R] Q)) (comp (lTensorHomToHomLTensor R M P Q) ↑e)) f))
q)
m)
x✝
[PROOFSTEP]
dsimp [lTensorHomEquivHomLTensor]
[GOAL]
case H.h.H.h.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : P ⊗[R] Dual R M ⊗[R] Q ≃ₗ[R] P ⊗[R] (M →ₗ[R] Q) :=
TensorProduct.congr (LinearEquiv.refl R P) (dualTensorHomEquiv R M Q)
h : Function.Surjective ↑↑e
f : P
q : Dual R M
m : Q
x✝ : M
⊢ ↑(↑(dualTensorHomEquivOfBasis (Free.chooseBasis R M))
(↑(leftComm R P (Dual R M) Q)
(f ⊗ₜ[R]
↑(LinearEquiv.symm (dualTensorHomEquivOfBasis (Free.chooseBasis R M)))
(↑(dualTensorHomEquivOfBasis (Free.chooseBasis R M)) (q ⊗ₜ[R] m)))))
x✝ =
f ⊗ₜ[R] ↑(↑(dualTensorHomEquivOfBasis (Free.chooseBasis R M)) (q ⊗ₜ[R] m)) x✝
[PROOFSTEP]
simp only [lTensorHomEquivHomLTensor, dualTensorHomEquiv, compr₂_apply, mk_apply, coe_comp, LinearEquiv.coe_toLinearMap,
Function.comp_apply, map_tmul, LinearEquiv.coe_coe, dualTensorHomEquivOfBasis_apply, LinearEquiv.trans_apply,
congr_tmul, LinearEquiv.refl_apply, dualTensorHomEquivOfBasis_symm_cancel_left, leftComm_tmul, dualTensorHom_apply,
tmul_smul]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
⊢ ↑(rTensorHomEquivHomRTensor R M P Q) = rTensorHomToHomRTensor R M P Q
[PROOFSTEP]
classical
-- Porting note: missing decidable for choosing basis
let e := congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
have h : Function.Surjective e.toLinearMap := e.surjective
refine' (cancel_right h).1 _
ext f p q m
simp only [rTensorHomEquivHomRTensor, dualTensorHomEquiv, compr₂_apply, mk_apply, coe_comp, LinearEquiv.coe_toLinearMap,
Function.comp_apply, map_tmul, LinearEquiv.coe_coe, dualTensorHomEquivOfBasis_apply, LinearEquiv.trans_apply,
congr_tmul, dualTensorHomEquivOfBasis_symm_cancel_left, LinearEquiv.refl_apply, assoc_tmul, dualTensorHom_apply,
rTensorHomToHomRTensor_apply, smul_tmul']
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
⊢ ↑(rTensorHomEquivHomRTensor R M P Q) = rTensorHomToHomRTensor R M P Q
[PROOFSTEP]
let e := congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : (Dual R M ⊗[R] P) ⊗[R] Q ≃ₗ[R] (M →ₗ[R] P) ⊗[R] Q :=
TensorProduct.congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
⊢ ↑(rTensorHomEquivHomRTensor R M P Q) = rTensorHomToHomRTensor R M P Q
[PROOFSTEP]
have h : Function.Surjective e.toLinearMap := e.surjective
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : (Dual R M ⊗[R] P) ⊗[R] Q ≃ₗ[R] (M →ₗ[R] P) ⊗[R] Q :=
TensorProduct.congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
h : Function.Surjective ↑↑e
⊢ ↑(rTensorHomEquivHomRTensor R M P Q) = rTensorHomToHomRTensor R M P Q
[PROOFSTEP]
refine' (cancel_right h).1 _
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : (Dual R M ⊗[R] P) ⊗[R] Q ≃ₗ[R] (M →ₗ[R] P) ⊗[R] Q :=
TensorProduct.congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
h : Function.Surjective ↑↑e
⊢ comp ↑(rTensorHomEquivHomRTensor R M P Q) ↑e = comp (rTensorHomToHomRTensor R M P Q) ↑e
[PROOFSTEP]
ext f p q m
[GOAL]
case H.H.h.h.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
e : (Dual R M ⊗[R] P) ⊗[R] Q ≃ₗ[R] (M →ₗ[R] P) ⊗[R] Q :=
TensorProduct.congr (dualTensorHomEquiv R M P) (LinearEquiv.refl R Q)
h : Function.Surjective ↑↑e
f : Dual R M
p : P
q : Q
m : M
⊢ ↑(↑(↑(↑(compr₂ (TensorProduct.mk R (Dual R M) P)
(compr₂ (TensorProduct.mk R (Dual R M ⊗[R] P) Q) (comp ↑(rTensorHomEquivHomRTensor R M P Q) ↑e)))
f)
p)
q)
m =
↑(↑(↑(↑(compr₂ (TensorProduct.mk R (Dual R M) P)
(compr₂ (TensorProduct.mk R (Dual R M ⊗[R] P) Q) (comp (rTensorHomToHomRTensor R M P Q) ↑e)))
f)
p)
q)
m
[PROOFSTEP]
simp only [rTensorHomEquivHomRTensor, dualTensorHomEquiv, compr₂_apply, mk_apply, coe_comp, LinearEquiv.coe_toLinearMap,
Function.comp_apply, map_tmul, LinearEquiv.coe_coe, dualTensorHomEquivOfBasis_apply, LinearEquiv.trans_apply,
congr_tmul, dualTensorHomEquivOfBasis_symm_cancel_left, LinearEquiv.refl_apply, assoc_tmul, dualTensorHom_apply,
rTensorHomToHomRTensor_apply, smul_tmul']
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
x : P ⊗[R] (M →ₗ[R] Q)
⊢ ↑(lTensorHomEquivHomLTensor R M P Q) x = ↑(lTensorHomToHomLTensor R M P Q) x
[PROOFSTEP]
rw [← LinearEquiv.coe_toLinearMap, lTensorHomEquivHomLTensor_toLinearMap]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
x : (M →ₗ[R] P) ⊗[R] Q
⊢ ↑(rTensorHomEquivHomRTensor R M P Q) x = ↑(rTensorHomToHomRTensor R M P Q) x
[PROOFSTEP]
rw [← LinearEquiv.coe_toLinearMap, rTensorHomEquivHomRTensor_toLinearMap]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
⊢ ↑(homTensorHomEquiv R M N P Q) = homTensorHomMap R M N P Q
[PROOFSTEP]
ext m n
[GOAL]
case H.h.h.H.h.h
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
m : M →ₗ[R] P
n : N →ₗ[R] Q
x✝¹ : M
x✝ : N
⊢ ↑(↑(compr₂ (TensorProduct.mk R M N)
(↑(↑(compr₂ (TensorProduct.mk R (M →ₗ[R] P) (N →ₗ[R] Q)) ↑(homTensorHomEquiv R M N P Q)) m) n))
x✝¹)
x✝ =
↑(↑(compr₂ (TensorProduct.mk R M N)
(↑(↑(compr₂ (TensorProduct.mk R (M →ₗ[R] P) (N →ₗ[R] Q)) (homTensorHomMap R M N P Q)) m) n))
x✝¹)
x✝
[PROOFSTEP]
simp only [homTensorHomEquiv, compr₂_apply, mk_apply, LinearEquiv.coe_toLinearMap, LinearEquiv.trans_apply,
lift.equiv_apply, LinearEquiv.arrowCongr_apply, LinearEquiv.refl_symm, LinearEquiv.refl_apply,
rTensorHomEquivHomRTensor_apply, lTensorHomEquivHomLTensor_apply, lTensorHomToHomLTensor_apply,
rTensorHomToHomRTensor_apply, homTensorHomMap_apply, map_tmul]
[GOAL]
ι : Type w
R : Type u
M : Type v₁
N : Type v₂
P : Type v₃
Q : Type v₄
inst✝¹³ : CommRing R
inst✝¹² : AddCommGroup M
inst✝¹¹ : AddCommGroup N
inst✝¹⁰ : AddCommGroup P
inst✝⁹ : AddCommGroup Q
inst✝⁸ : Module R M
inst✝⁷ : Module R N
inst✝⁶ : Module R P
inst✝⁵ : Module R Q
inst✝⁴ : Free R M
inst✝³ : Module.Finite R M
inst✝² : Free R N
inst✝¹ : Module.Finite R N
inst✝ : Nontrivial R
x : (M →ₗ[R] P) ⊗[R] (N →ₗ[R] Q)
⊢ ↑(homTensorHomEquiv R M N P Q) x = ↑(homTensorHomMap R M N P Q) x
[PROOFSTEP]
rw [← LinearEquiv.coe_toLinearMap, homTensorHomEquiv_toLinearMap]
|
lemma basis_dense: fixes B :: "'a set set" and f :: "'a set \<Rightarrow> 'a" assumes "topological_basis B" and choosefrom_basis: "\<And>B'. B' \<noteq> {} \<Longrightarrow> f B' \<in> B'" shows "\<forall>X. open X \<longrightarrow> X \<noteq> {} \<longrightarrow> (\<exists>B' \<in> B. f B' \<in> X)"
|
# <p style="text-align: center;"> NISQAI: One-Qubit Quantum Classifier </p>
<p style="text-align: center;"> Ryan LaRose, Yousif Almulla, Nic Ezzell, Joe Iosue, Arkin Tikku </p>
<blockquote cite="">
"Quantum computing needs more quantum software engineers to implement, test, and optimize quantum algorithms."
-- Matthias Troyer.
</blockquote>
<blockquote cite="">
"Experimentation on quantum testbeds is needed." -- John Preskill.
</blockquote>
# <p style="text-align: center;"> What is NISQAI? </p>
Quantum machine learning is an exciting but conjectural field that lacks testing on quantum computers. NISQAI is a library written to facilitate research in artificial intelligence on current quantum computers and NISQ (noisy intermediate-scale quantum) devices.
In this notebook, we demonstrate the steps toward a quantum neural network working as a simple classifier. We then demonstrate how easily the NISQAI library can facilitate this task.
# <p style="text-align: center;"> Requirements for this Notebook </p>
Since you're reading this, you probably have a working installation of [Python](https://www.python.org/) and/or [Jupyter](http://jupyter.org/). In order to run this notebook, the following external packages are necessary. Installation instructions can be found by following the hyperlinks for each.
* [NumPy](http://www.numpy.org/)
* [SciPy](https://www.scipy.org/)
* [pyQuil](https://pyquil.readthedocs.io/en/stable/) 2.1.0
* [Matplotlib](https://matplotlib.org/)
* [QuTiP](http://qutip.org/docs/4.1/index.html) 4.1.0 (only for Bloch sphere visualization features)
```python
# builtins
import time
# standard imports
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
# library imports
from pyquil.quil import Program
from pyquil import api
import pyquil.gates as ops
from qutip import Bloch, ket
# option for notebook plotting
%matplotlib inline
```
# <p style="text-align: center;"> Quantum Neural Networks: A Five Step Process </p>
To implement a neural network for classical data on a quantum computer (i.e., a _quantum neural network_), the following steps need to be performed.
1. Data encoding.
* Encode the classical data into a form suitable for the quantum circuit.
1. State preparation.
* Implement a quantum circuit preparing the state of a particular datum.
1. Unitary evolution.
* Implement a parameterized gate sequence to transform the data.
1. Measurement/non-linearity.
* Measure the state of the quantum data after unitary evolution to revert back to classical information. * Non-linearity can be introduced at this point via classical processing.
1. Training/cost function.
* Define a cost function and optimize the parameters of the unitary to minimize the cost.
In the rest of the notebook, we'll go over how to do each of these steps in turn for a simple classification problem. At the end of the notebook, we'll show how easily this can be done with the NISQAI library and discuss other features of NISQAI.
# <p style="text-align: center;"> Problem Description </p>
The specific problem we look it in this notebook falls under the category of _supervised machine learning_. Specifically, we ask:
_Given $N$ data points $p = (x, y) \in \mathbb{R}^2$ with labels $z(p) = 1$ if $x \le 0.5$ and $z(p) = 0$ otherwise, learn the decision boundary and classify new points._
## <p style="text-align: center;"> 1. Data Encoding </p>
In order to run a quantum neural network on classical data, the classical data needs to be suitably encoded to allow it to be "uploaded" to a quantum computer. We demonstrate a simple encoding scheme below for $(x, y)$ Cartesian coordinates distributed uniformly in a unit square.
### <p style="text-align: center;"> Generating Data </p>
First we generate a random set of classical data. For reproducibility, we'll seed our random number generator.
```python
# set random seed for reproducible results
SEED = 1059123109
np.random.seed(seed=SEED)
```
```python
# generate data points distributed uniformly at random in [0, 1) x [0, 1)
npoints = 400
data = np.zeros((npoints, 2))
dx = np.linspace(0, 1, 20)
for j in range(20):
data[j * len(dx) : (j + 1) * len(dx), 0] = dx[j]
data[j * len(dx) : (j + 1) * len(dx), 1] = dx
# npoints = 100
# data = np.random.rand(npoints, 2)
def predicate(point):
"""Returns true if the point satisfies the predicate, else false."""
return True if point[0] <= point[1] else False
# separate the data with a linear boundary y = x
labels = np.array([1 if predicate(p) else 0 for p in data])
# plot the line y = x
xs = np.linspace(0, 1, 100)
ys = 0.5 * np.ones_like(xs)
plt.plot(xs, xs, '--k')
# plot the data with the color key BLUE = 0 = LEFT, GREEN = 1 = RIGHT
for i in range(npoints):
if labels[i] == 0:
ckey = 'g'
else:
ckey = 'b'
plt.scatter(data[i, 0], data[i, 1], color=ckey)
# title and axis lables
#plt.title("Random Data Points in Unit Square", fontweight="bold", fontsize=16)
# put on a grid and show the plot
plt.grid()
# plt.savefig("graph.pdf", format="pdf")
plt.show()
```
### <p style="text-align: center;"> Qubit Encoding </p>
Here we implement a method of data encoding that we call "qubit encoding." Qubit encoding works by writing two bits of classical information into one bit of quantum information. How is this done? Note that any qubit can be written
\begin{equation}
|\psi\rangle = \alpha |0\rangle + \beta |1\rangle
\end{equation}
where $\alpha, \beta \in \mathbb{C}$ satisfying $|\alpha|^2 + |\beta|^2 = 1$. Because of this normalization condition, we may equivalently write
\begin{equation}
|\psi\rangle = \cos(\theta / 2) |0\rangle + e^{i \phi} \sin(\theta / 2)|1\rangle
\end{equation}
We then encode information into the phases $0 \le \theta \le \pi$ and $ 0 \le \phi \le 2 \pi$.
For the $(x, y)$ coordinates of our points, we will use the encoding
\begin{align}
\theta &= \pi x \\
\phi &= 2 \pi y
\end{align}
This is a simple encoding, but the results of the classification will speak to it's effectivenss. Other encodings, for example
\begin{align}
\theta &= \frac{1}{2} \tan^{-1}\left( \frac{y}{x} \right) \\
\phi &= \pi (x^2 + y^2),
\end{align}
can be used for other data sets.
In code, we may make this transformation as follows:
```python
# encode the classical data via a simple linear "qubit encoding"
qdata = np.zeros_like(data)
for (index, point) in enumerate(data):
qdata[index][0] = np.pi * (point[0] + 0.0 * point[1]) / 1.0
qdata[index][1] = 2 * np.pi * (0.0 * point[0] + point[1]) / 1.0
# qdata[index][0] = np.pi * point[0]
# qdata[index][1] = 2 * np.pi * point[1]
# qdata[index][0] = 2 * np.arcsin(point[0])
# qdata[index][1] = 4 * np.arccos(point[1])
```
It's now possible to visualize the data as points on the surface of the Bloch sphere.
```python
# break the data into two lists depending on label and turn into (x, y, z) points
# empty lists to store values
gxs = []
gys = []
gzs = []
bxs = []
bys = []
bzs = []
# loop over all points, convert to cartesian coords, and append to correct list
for (index, point) in enumerate(qdata):
theta = point[0]
phi = point[1]
sin = np.sin(theta)
# convert to cartesian coords for plotting
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
if labels[index] == 0:
gxs.append(x)
gys.append(y)
gzs.append(z)
else:
bxs.append(x)
bys.append(y)
bzs.append(z)
# format points correctly for bloch sphere
lpoints = [gxs, gys, gzs]
rpoints = [bxs, bys, bzs]
# get a Bloch sphere
bloch = Bloch()
# options for Bloch sphere
bloch.frame_color = "grey"
bloch.sphere_color = "white"
bloch.point_size = [40]
bloch.sphere_alpha = 0.50
bloch.point_color = ["g", "b"]
bloch.point_marker = ["o"]
bloch.view = [-55, 10]
# add the points on the Bloch sphere
bloch.add_points(lpoints)
bloch.add_points(rpoints)
# show the Bloch sphere
#bloch.save("lin-comb-bloch.pdf", format="pdf")
%matplotlib notebook
bloch.show()
```
## <p style="text-align: center;"> 2. State Preparation </p>
Now that we have encoded our data, we still need to prepare it in quantum form. Conventionally, all qubits start in the $|0\rangle$ state. The problem we have to solve is:
Given angles $\theta, \phi$, perform the mapping
\begin{equation}
S(\theta, \phi) |0\rangle \rightarrow |\psi\rangle = \cos(\theta / 2) |0\rangle + e^{i \phi} \sin(\theta / 2)|1\rangle
\end{equation}
We call $S$ a _state preparation unitary_ or _state preparation circuit_.
It is clear from the equation above that the matrix representation for $S$ in the computational basis is
\begin{equation}
S(\theta, \phi) = \left[ \begin{matrix}
\cos(\theta / 2) & e^{-i \phi} \sin(\theta / 2)\\
e^{i \phi} \sin(\theta / 2) & - \cos(\theta / 2) \\
\end{matrix} \right]
\end{equation}
On current NISQ computers, circuits need to be as short-depth as possible, meaning they contain as few gates as possible. The "qubit encoding" performed above allows for a constant-depth state preparation circuit for any given values of $\theta, \phi$.
Why is this? In general we need to perform some arbitrary unitary transformation $S \in SU(2)$. It is known that such a unitary can be implemented with five standard rotations that can be implemented by current quantum computers. Namely,
\begin{equation}
S = R_z(\gamma_1) R_x(\pi / 2) R_z(\gamma_2) R_x(\pi / 2) R_z(\gamma_3)
\end{equation}
where $0 \le \gamma_i \le 2 \pi$ for $i = 1, 2, 3$. We can define our gate $S$ above in pyquil and compile it to a sequence of five implementable rotations by using the Quil compiler. First we'll write a function to give us our state preparation matrix.
```python
# define the matrix S as a function of theta and phi
def state_prep_mtx(theta, phi):
"""Returns the state preparation matrix according to the qubit encoding above."""
return np.array([[np.cos(theta / 2), np.exp(-1j * phi) * np.sin(theta / 2)],
[np.exp(1j * phi) * np.sin(theta / 2), - np.cos(theta / 2)]])
```
Next we'll define this as a pyquil gate. We do this by first making a program then defining a gate for a particular encoded data point.
```python
# get a pyquil program
qprog = Program()
# pick a particular encoded data point
theta, phi = qdata[0]
# get the state prep matrix for this point
S = state_prep_mtx(theta, phi)
# define a gate in pyquil for the state preparation
qprog.defgate("S", state_prep_mtx(theta, phi))
```
We can now use this gate in a quantum program to input our classical data into the quantum computer. To do this, we simply write:
```python
# prepare the first encoded data point in quantum form
qprog += ("S", 0)
```
To see our program, we can print it out:
```python
print(qprog)
```
If we measure this qubit now, we get either a zero or a one with probability proportional to it's amplitudes. We'll add this measurement to our program and then run it many times to get an estimate of the probability distribution of it's outcome.
```python
# add a measurement on the qubit encoding our data
creg = qprog.declare("ro", memory_size=1)
qprog += (ops.MEASURE(0, creg[0]))
# print out the new program
print(qprog)
```
```python
# run the program many times
shots = 1000
qvm = api.QVMConnection()
dist = qvm.run(qprog, trials=shots)
```
```python
def histogram(dist):
"""Makes a histogram of the probability of obtaining 0 and 1."""
# get the zero and one probabilities
prob0 = dist.count([0]) / shots
prob1 = dist.count([1]) / shots
# make a bar plot
plt.bar([0, 1], [prob0, prob1])
# make it look nice
plt.grid()
plt.title("Output Probability Distribution", fontsize=16, fontweight="bold")
plt.xlabel("Measurement Outcomes", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.ylim(0, 1)
plt.xticks([0, 1])
# show it
plt.show()
# make a histrogram of the output distribution
histogram(dist)
```
We want our output distribution to move towards either zero or one, depending on what label is assigned to that particular data point. To do this, we implement unitary evolution on the circuit, which corresponds to implementing another sequence of gates in our circuit.
## <p style="text-align: center;"> 3. Unitary Evolution </p>
After the classical data has been encoded (step 1) and prepared into the quantum system (step 2), step 3 is to perform unitary evolution on the quantum state representing the data. In the language of classical learning theory, this corresponds to implementing a layer of the neural network.
In the quantum neural network case, we simply need to implement a sequence of parameterized gates.
First we write a function to easily implement such a unitary.
```python
def unitary(angles):
"""Returns a circuit implementing the unitary Rz(theta0) P Rz(theta1) P Rz(theta2)
where thetea0 = angles[0], theta1 = angles[1], and theta2 = angles[2] and
P = Rx(pi / 2) is a pi / 2 pulse.
"""
return Program(
ops.RZ(angles[0], 0),
ops.RX(np.pi / 2, 0),
ops.RZ(angles[1], 0),
ops.RX(np.pi / 2, 0),
ops.RZ(angles[2], 0)
)
```
Next we use it to add unitary evolution to our quantum neural network for some given angles.
```python
# test angles
angles = 2 * np.pi * np.random.rand(3)
# pop off measurement
qprog.pop()
# append the unitary circuit to our quantum neural network program
qprog += unitary(angles)
qprog += [ops.MEASURE(0, creg[0])]
```
```python
print(qprog)
```
Now we'll run this new circuit with the unitary evolution implemented.
```python
# run the quantum neural network program
histogram(qvm.run(qprog, trials=shots))
```
Now we want to use this unitary evolution to move our output probability towards the desired label. We'll suppose for a moment that this point is supposed to be mapped to the 0 output. We'll define an objective function ```obj_simple(angles)``` to capture the amount of 1 outputs, then we'll train over the angles in the unitary to minimize the objective.
```python
def make_program(pangles, uangles):
"""Returns a pyquil program that prepares the state according
to pangles and applies the unitary according to uangles.
"""
# instantiate a program
qprog = Program()
creg = qprog.declare("ro", memory_size=1)
# define a gate in pyquil for the state preparation
qprog.defgate("S", state_prep_mtx(pangles[0], pangles[1]))
# write the program(
qprog += [("S", 0),
unitary(uangles),
ops.MEASURE(0, creg[0])]
return qprog
def obj_simple(angles, shots=1000, verbose=False):
"""Returns the number of zero outputs of a single training example."""
# make the program
qprog = make_program([theta, phi], angles)
if verbose:
print(qprog)
dist = qvm.run(qprog, trials=shots)
obj = dist.count([1]) / shots
print("The current value of the objective function is:", obj, end="\r")
return obj
```
Now we'll minimize the objective function `obj_simple` by using the modifed `Powell` algorithm builtin to SciPy. The minimization here should take no more than 30 seconds.
```python
out = minimize(obj_simple, x0=2 * np.pi * np.random.rand(3), method="Powell")
```
Now we can verify that the output distribution skews this point towards the "zero bin."
```python
# do the circuit (neural network) with the optimal parameters
%matplotlib inline
opt_angles = out['x']
qprog = make_program([theta, phi], opt_angles)
# show the output distribution
dist = qvm.run(qprog, trials=shots)
histogram(dist)
```
Indeed it does! Here we demonstrated that we can use our neural network to "steer" a point in a particular direction. The idea of training is to "steer" _all_ points in the desired direction. Before we train our quantum neural network, we first discuss the measurement process and it's importance in quantum neural network implementations.
## <p style="text-align: center;"> 4. Measurement </p>
Measurement is used in our circuit above to translate from quantum to classical information. In other quantum neural networks, measurement can be used to introduce non-linearity.
NISQAI treats measurements as both a way to keep circuit-depth low (for implementations on current/near-term quantum computers) as well as introduce non-linearity between layers of a network. Specifically, measuremnets lead to (intermediate) classical data. On a classical computer, we can introduce any non-linear activation function we wish. The new classical data from the output of the activation function can then be prepared into quantum form again via steps 1 and 2 (data encoding and state preparation).
This intermediate classical post-processing is a trademark of NISQAI. It allows for:
1. Short-depth circuits suitable for NISQ computers.
1. Non-linearity between layers of a quantum neural network.
1. Deep learning (many layers) with shallow circuits.
In this simple binary classifier example, we'll restrict to just one layer of the neural network for simplicity.
## <p style="text-align: center;"> 5. Training </p>
We saw above that it's easy to steer one point towards the output we want to get. But training the quantum neural network involves training over all points in the training set. For this, we need to define a cost function that captures the total loss over the entire training set.
To do this, define the _indicator function_ $I(z_i = \hat{z}_i)$ to be 0 if $z_i = \hat{z}_i$ and 1 otherwise. Here, $z_i$ is the exact label of the $i$th training data $(x_i, y_i)$ and $\hat{z}_i$ is the prediction of this label by our neural network. (In code, we use the variables `labels` for $z_i$ and `predictions` for $\hat{z}_i$.)
To define the cost now, we simply sum over all points (say there are $M$ points) in the training data:
\begin{equation}
C = \sum_{i = 1}^{M} I(z_i = \hat{z}_i)
\end{equation}
Note that the cost $C$ depends on the unitary evolution $U$, i.e. $C = C(U)$, because the prediction $\hat{z}_i$ depends on $U$.
We now need to define a cost (objective) function ```obj(...)``` that takes into account all data in the training set. This function will encapsulate everything we've done for a single training point, but now for all of them.
```python
# fraction of total data to use as training data
train_frac = 0.7
def obj(uangles):
"""Returns the objective function C defined above over all training data.
Args:
uangles [type: list<float>]
the angles in the unitary evolution.
rtype: int
"""
# grab some training data from the overall data set
tpoints = int(train_frac * len(qdata))
tdata = qdata[:tpoints]
tlabels = labels[:tpoints]
# initialize a variable to store the output predictions of the neural net
predictions = np.zeros_like(tlabels, dtype=int)
# loop over all training data to get the predictions
for i, pangles in enumerate(tdata):
# write the program
qprog = make_program(pangles, uangles)
# run the program
out = qvm.run(qprog, trials=1000)
# get the output probabilities
p0 = out.count([0])
p1 = out.count([1])
# take the prediction to be max(p0, p1)
if p0 >= p1:
predictions[i] = 0
else:
predictions[i] = 1
# compute the difference of the labels and return the cost
cost = sum(abs(predictions - tlabels)) / tpoints
print("The current value of the cost function is:", cost, end="\r")
return cost
```
Now let's compute the objective function for some random test angles.
```python
# get some random angles
angs = 2 * np.pi * np.random.rand(3)
cost = obj(angs)
```
We don't want just any random angles for our unitary evolution, however. We want to compute the optimal angles, which we can do by minimizing the cost function. Again we'll use the modified ```Powell``` algorithm for minimization.
Training the neural network here will take more time than simply optimizing the output of a single point. On our computer, it takes around 15 seconds to evaluate the objective function for one set of angles. The `Powell` optimization algorithm generally takes on the order of 20-30 cost function evaluations--for us the optimization takes around 15 minutes on average. You can expect this training to take 15-20 minutes.
If you're in a hurry and don't want to wait for this training, we've saved a set of optimal angles from previous attempts. These angles are for a horizontal boundary $x = 0.5$ trained on 70/100 data points. They will only be optimal for the first set of random points generated by the given seed. We reiterate: _Only use these angles if you haven't changed anything in the notebook and only ran each cell once!_
```python
optimal_angles = [7.85082205, 0.01934754, 9.62729993]
```
If you chose to skip the training, you should *not* execute the next two cells. Otherwise, continue through the notebook.
```python
# train the quantum neural network and time how long it takes
start = time.time()
out = minimize(fun=obj, x0=angs, method="Powell")
print("\nTotal training runtime took {} minutes.".format((time.time() - start) / 60))
```
```python
# grab the optimal angles and minimal cost value
optimal_angles = out['x']
fval = out['fun']
# print them out
print(fval)
print(optimal_angles)
```
# <p style="text-align: center;"> Results </p>
Now that we've trained on a subset of our data, we can see how the predictions do on the whole dataset.
First, we'll write a function that performs this task.
```python
def get_all_predictions(angles):
"""Returns a numpy array of all predictions."""
# initialize a variable to store the output predictions of the neural net
zhats = np.zeros_like(labels, dtype=int)
# loop over all data to get predictions
for i, pangles in enumerate(qdata):
# write the program
qprog = make_program(pangles, angles)
# run the program
out = qvm.run(qprog, trials=1000)
# get the output probabilities
p0 = out.count([0])
p1 = out.count([1])
# take the prediction to be max(p0, p1)
if p0 >= p1:
zhats[i] = 0
else:
zhats[i] = 1
return zhats
```
Then we'll call the function to get the results.
```python
# compute all the predictions of the quantum neural network
predictions = get_all_predictions(optimal_angles)
```
We have now trained our neural network and classifeid all data points. Let's display the statistics below to see how our QNN performed.
```python
num = 0
for a in np.linspace(0, 2 * np.pi, 25):
for b in np.linspace(0, 2 * np.pi, 25):
num += 1
# compute all the predictions of the quantum neural network
predictions = get_all_predictions([a, b, 0])
# # compute statistics of the QNN
# ntrain = int(train_frac * npoints)
# ncorrect = npoints - sum(abs(predictions - labels))
# acc = ncorrect / npoints * 100
# # print them out
# print(" Results of quantum neural network classification ".center(80, "="))
# print("Out of {} total data points:".format(npoints))
# print("The QNN was trained on {}% of the total data ({} training points).".format(train_frac * 100, ntrain))
# print("The QNN classified {} data points correctly ({}% accuracy).".format(ncorrect, acc))
# print("".center(80, "="))
# % matplotlib inline
# # plot the points, line y = x, and prediction
# #plt.plot(xs, xs, '--k')
for i in range(npoints):
if predictions[i] == 0:
ckey = 'g'
else:
ckey = 'b'
plt.scatter(data[i, 0], data[i, 1], color=ckey)
plt.grid()
plt.title("a = %0.2f, b = %0.2f" %(a, b), fontsize=16, fontweight="bold")
plt.savefig(str(num) + ".png", format="png")
plt.close()
```
## <p style="text-align: center;"> Discussion of Results </p>
How did your quantum neural network perform? If you followed through this notebook sequentially, you should have seen 100% accuracy in the predictions of the network. A saved plot of our results obtained by running this notebook is included below:
Here, the exact decision boundary (not the learned one) is shown in the plot, and data points are colored according to the quantum neural network predictions. Blue means predicted left of the boundary (0 bin), and green means predicted right of the boundary (1 bin). As can be seen in our example results, all data points are correctly classified.
# <p style="text-align: center;"> Example Done with NISQAI </p>
The above example involved five steps to implementing a neural network on a quantum computer. While the steps aren't hard, programming each one for every particular example is tedious and time consuming. More time could be spent testing quantum neural networks if there were a library to implement all of these steps.
This is where NISQAI comes in. NISQAI is to quantum machine learning what TensorFlow/pyTorch is to classical machine learning. Just like OpenFermion provides a library of code to simplify quantum chemistry on quantum computers, NISQAI provides a library of code to simplify neural networks and other machine learning algorithms on quantum computers.
The above classifier example in NISQAI could be done with the following few lines of code. Here, we assume that ```classical_data``` exists in the program.
## Prototype NISQAI Code
```
# imports
from nisqai import qnn
# get a quantum neural network
network = qnn.qnn(nodes=1, layers=1)
# encode the classical data into quantum form
network.encode(data=classical_data, scheme=qnn.encoding_schemes.SIMPLE_LINEAR)
# add the unitary evolution
network.add_unitary(qnn.ansatze.UNIVERSAL)
# add measurements to the network
network.add_measurements(basis=qnn.bases.Z)
# do the training on 70% of the total data
network.train(optimizer="Powell", fraction=0.7)
# test the neural network on all the data or on new input data
statistics = network.classify()
```
These few lines of code show the power of NISQAI. One of it's greatest strengths is the flexibility to research other quantum neural network implementations, not just the one qubit quantum classifier.
## Features of NISQAI
* Shatters domain barriers.
* Whether you're a machine learning researcher interested in quantum or a quantum researcher interested in machine learning, NISQAI is built for you.
* Powerful builtins.
* Numerous prototypes of quantum neural network architectures, including methods for data encoding, state preparation, unitary evolution, measurement, and training.
* Fully modular.
* For methods like `qnn.add_unitary(...)`, the user can write a function to specify whichever unitary ansatz is desired.
* Loaded with examples.
* Many instructive code snippets and notebooks to get you started in quantum machine learning.
* Open-source.
* NISQAI is currently under development with expected release in early 2019. After this, NISQAI will be free and open-source. Always.
* Cross platform.
* NISQAI works for Windows, Mac, and Linux.
* Python/Matlab interfaces.
* Two of the most popular languages for quantum computing and machine learning researchers.
# Conclusions
In this example, we've seen that a quantum neural network is able to successfully classify classical data in $\mathbb{R}^2$. The qubit encoding strategy is able to write two bits of classical information into one bit of quantum information. Additionally, the state preparation circuit for the qubit encoding is short-depth, particularly useful for NISQ computers. We've demonstrated how to train a quantum neural network using the modified Powell algorithm on a simple example. We've also discussed how the NISQAI library greatly simplifies quantum neural network implementations.
# Acknowledgements
The development of NISQAI is supported by the [unitary.fund](http://unitary.fund/). We thank [Will Zeng](https://twitter.com/wjzeng) for creating and running this program, as well as [John Hering](https://twitter.com/johnhering), Jeff Cordova, Nima Alidoust, and [PLOS](https://www.plos.org/) for sponsoring it.
|
using TableShowUtils
using Test
using DataValues
using Dates
@testset "TableShowUtils" begin
source = [(a=1,b="A"),(a=2,b="B")]
@test sprint(TableShowUtils.printtable, source, "foo file") == """
2x2 foo file
a │ b
──┼──
1 │ A
2 │ B"""
@test sprint(TableShowUtils.printHTMLtable, source) == """
<table><thead><tr><th>a</th><th>b</th></tr></thead><tbody><tr><td>1</td><td>"A"</td></tr><tr><td>2</td><td>"B"</td></tr></tbody></table>"""
@test sprint((stream) -> TableShowUtils.printtable(stream, source, "foo file", force_unknown_rows = true)) == "?x2 foo file\na │ b\n──┼──\n1 │ A\n2 │ B\n... with more rows"
source_with_many_columns = [(a0=1,b0=1,c0=1,a1=1,b1=1,c1=1,a2=1,b2=1,c2=1,a3=1,b3=1,c3=1,a4=1,b4=1,c4=1,a5=1,b5=1,c5=1,a6=1,b6=1,c6=1,a7=1,b7=1,c7=1,a8=1,b8=1,c8=1,a9=1,b9=1,c9=1,a10=1,b10=1,c10=1)]
@test sprint(TableShowUtils.printtable, source_with_many_columns, "foo file") == "1x33 foo file\na0 │ b0 │ c0 │ a1 │ b1 │ c1 │ a2 │ b2 │ c2 │ a3 │ b3 │ c3 │ a4 │ b4 │ c4 │ a5\n───┼────┼────┼────┼────┼────┼────┼────┼────┼────┼────┼────┼────┼────┼────┼───\n1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 \n... with 17 more columns: b5, c5, a6, b6, c6, a7, b7, c7, a8, b8, c8, a9, b9, c9, a10, b10, c10"
source_with_NA = [(a=1,b="A"),(a=2,b=NA)]
@test sprint(TableShowUtils.printtable, source_with_NA, "foo file") == "2x2 foo file\na │ b \n──┼────\n1 │ A \n2 │ #NA"
@test sprint((stream) -> TableShowUtils.printHTMLtable(stream, source, force_unknown_rows = true)) == "<table><thead><tr><th>a</th><th>b</th></tr></thead><tbody><tr><td>1</td><td>"A"</td></tr><tr><td>2</td><td>"B"</td></tr><tr><td>⋮</td><td>⋮</td></tr></tbody></table><p>... with more rows.</p>"
@test sprint(TableShowUtils.printdataresource, source) == "{\"schema\":{\"fields\":[{\"name\":\"a\",\"type\":\"integer\"},{\"name\":\"b\",\"type\":\"string\"}]},\"data\":[{\"a\":1,\"b\":\"A\"},{\"a\":2,\"b\":\"B\"}]}"
@test sprint(TableShowUtils.printdataresource, source_with_NA) == "{\"schema\":{\"fields\":[{\"name\":\"a\",\"type\":\"string\"},{\"name\":\"b\",\"type\":\"string\"}]},\"data\":[{\"a\":1,\"b\":\"A\"},{\"a\":2,\"b\":null}]}"
@test TableShowUtils.julia_type_to_schema_type(AbstractFloat) == "number"
@test TableShowUtils.julia_type_to_schema_type(Bool) == "boolean"
@test TableShowUtils.julia_type_to_schema_type(Dates.Time) == "time"
@test TableShowUtils.julia_type_to_schema_type(Dates.Date) == "date"
@test TableShowUtils.julia_type_to_schema_type(Dates.DateTime) == "datetime"
@test TableShowUtils.julia_type_to_schema_type(DataValues.DataValue{Integer}) == "integer"
end
|
At Foppapedretti we think it is important to use advanced technology in order to provide our products with the maximum functionality and ease of use.
Since 1946 Foppapedretti has manufactured original and creative items, while paying particular attention to the use of materials and the technology within them.
Decorate your dream-house with us.
"It’s great to be a child"
From the very first day, Foppapedretti is with your child.
|
[GOAL]
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
⊢ ∀ (a b c : (i : I) → f i), a * (b + c) = a * b + a * c
[PROOFSTEP]
intros
[GOAL]
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
a✝ b✝ c✝ : (i : I) → f i
⊢ a✝ * (b✝ + c✝) = a✝ * b✝ + a✝ * c✝
[PROOFSTEP]
ext
[GOAL]
case h
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
a✝ b✝ c✝ : (i : I) → f i
x✝ : I
⊢ (a✝ * (b✝ + c✝)) x✝ = (a✝ * b✝ + a✝ * c✝) x✝
[PROOFSTEP]
exact mul_add _ _ _
[GOAL]
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
⊢ ∀ (a b c : (i : I) → f i), (a + b) * c = a * c + b * c
[PROOFSTEP]
intros
[GOAL]
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
a✝ b✝ c✝ : (i : I) → f i
⊢ (a✝ + b✝) * c✝ = a✝ * c✝ + b✝ * c✝
[PROOFSTEP]
ext
[GOAL]
case h
I : Type u
f : I → Type v
x y : (i : I) → f i
i : I
inst✝ : (i : I) → Distrib (f i)
a✝ b✝ c✝ : (i : I) → f i
x✝ : I
⊢ ((a✝ + b✝) * c✝) x✝ = (a✝ * c✝ + b✝ * c✝) x✝
[PROOFSTEP]
exact add_mul _ _ _
|
function [R, tau] = CholeskyMultIdentity(H)
% Implements Cholesky with added multiple of the identity. This attempts to
% to find a scalar tau > 0 such that H + tau * I is sufficiently positive
% definite, where I is the identity matrix.
global numFact % Number of Cholesky factorizations attempted.
% First, we intially try a Cholesky factorization.
[R, fail] = chol(H);
numFact = numFact + 1;
% If it does not fail, we're done.
if fail == 0
tau = 0;
return;
end
% If the initial Cholesky factorization fails, we attempt to find a scalar
% tau > 0 such that H + tau * I is sufficiently positive definite.
beta = 0.001; % Heuristic for increasing tau.
min_H_diag = min(diag(H)); % Smallest diagonal of H.
% If smallest diagonal of H is positive, set tau to 0; otherwise, set to
% nonnegative version of the smallest diagonal plus the beta heuristic.
if min_H_diag > 0
tau = 0;
else
tau = -min_H_diag + beta;
end
I = eye(size(H,1)); % Identity matrix.
% Repeatedly add a tau-multiple of the identity to H until the Cholesky
% factorization succeeds. Upon each failure, double tau.
while 1
[R, fail] = chol(H + tau * I);
numFact = numFact + 1;
if fail == 0
return;
else
% NOTE: In order to decrease number of factorizations, we may want to
% increase tau by a factor of 10 instead of 2.
tau = max(2*tau, beta);
end
end
end
|
[STATEMENT]
lemma covering_space_locally_connected:
fixes p :: "'a::real_normed_vector \<Rightarrow> 'b::real_normed_vector"
assumes "locally connected C" "covering_space C p S"
shows "locally connected S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. locally connected S
[PROOF STEP]
using assms covering_space_locally_connected_eq
[PROOF STATE]
proof (prove)
using this:
locally connected C
covering_space C p S
covering_space ?C ?p ?S \<Longrightarrow> locally connected ?S = locally connected ?C
goal (1 subgoal):
1. locally connected S
[PROOF STEP]
by blast
|
% v_rotro2eu_tab: Calculate tables needed for v_rotro2eu
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 52 different rotation matrix patterns of -1,0,+1: %
% 1- 3: identity matrix rows in order: 123, 231, 312 %
% 1- 3: negated identity matrix rows in order: 132, 213, 321 %
% 7-12: As 1-6 but with rows 2,3 negated %
% 13-18: As 1-6 but with rows 1,3 negated %
% 19-24: As 1-6 but with rows 1,2 negated %
% 25-33: +1 in position (i-24) and 0's in remainder of this row and col %
% 34-42: -1 in position (i-24) and 0's in remainder of this row and col %
% 43-51: 0 in position (i-42) %
% 52: no special symmetry %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
mes=[1:3 10:12 7:9 4:6]; % sign reversal look-up table
rtci=[2 3 5 6 8 9; 3 1 6 4 9 7; 1 2 4 5 7 8]';
rtsi=[3 2 6 5 9 8; 1 3 4 6 7 9; 2 1 5 4 8 7]';
rtr=[1 4 7 2 5 8 3 6 9]; % indices to transpose a vectorized 3x3 matrix
w6=ones(6,1); %
th6=3*w6;
x6=[2 1 2 1 2 1]'; % Index for sin components
scai=[0 0 0 1; 0 0 0 2; 0 0 0 3; 1 -1 0 1; 1 -1 0 2; 1 -1 0 3; 0 0 -1 1; 0 0 -1 2; 0 0 -1 3; -1 1 0 1; -1 1 0 2; -1 1 0 3]'; % [sin; -sin; cos; xyz] for fixed rotations
% create pattersn of non-zero entries
nzpatt=10*ones(3,3,52); % pattern of -1,0,+1
e3=eye(3);
nzpatt(:,:,1)=e3;
nzpatt(:,:,2)=e3([2 3 1],:);
nzpatt(:,:,3)=e3([3 1 2],:);
nzpatt(:,:,4)=-e3([1 3 2],:);
nzpatt(:,:,5)=-e3([2 1 3],:);
nzpatt(:,:,6)=-e3([3 2 1],:);
for j=1:3
f3=-e3;
f3(j,j)=1;
for i=1:6
nzpatt(:,:,i+6*j)=f3*nzpatt(:,:,i);
end
end
for i=1:9
ir=1+mod(i-1,3);
ic=1+(i-ir)/3;
nzpatt(:,ic,i+24)=0;
nzpatt(ir,:,i+24)=0;
nzpatt(ir,ic,i+24)=1;
nzpatt(:,ic,i+33)=0;
nzpatt(ir,:,i+33)=0;
nzpatt(ir,ic,i+33)=-1;
nzpatt(ir,ic,i+42)=0;
end
nzpattv=reshape(nzpatt,9,52); % vectorize the 3x3 matrices
nzpattc=reshape(sum(nzpatt~=0,1),3,52); % number of non-zero elements in each column
% now create transition map
trmap=zeros(52,12); % result of applying transformation j to pattern i
zel=zeros(4,3,52); % elements to zero: [zero; non-zero; sine-sign; targ-sign],transformation,initial pattern
jm='xyz123456789'; % rotation patterns
for i=1:52
for j=1:12
rijv=reshape(v_rotqr2ro(v_roteu2qr(jm(j),pi/3))*nzpatt(:,:,i),9,1); % vectorized result of applying transformation
rijv(abs(abs(abs(rijv)-0.5)-0.5)>1e-8)=10; % set entries to 10 unless close to -1,0,+1
k=find(all(round(rijv)==nzpattv,1),1); % round to integers and find a match
if isempty(k)
error('cannot find match for (%d,%d)');
else
trmap(i,j)=k;
end
if j<=3
icol=mod(find([nzpattc(:,i)==1 & nzpattc(:,k)==2;nzpattc(:,i)==2 & nzpattc(:,k)==3],1)-1,3)+1; % find the column to zero an element
if ~isempty(icol)
irow=(1:3)*(~nzpatt(:,icol,i) & nzpatt(:,icol,k)); % find zero that disappears
jrow=6-j-irow; % find other row involved in rotation
zel(:,j,i)=[3*icol-3+[irow; jrow]; mod(j-irow+1,3)-1; sign(nzpatt(jrow,icol,i))];
end
end
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% print zel
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fid=fopen('zel.txt','w');
fprintf(fid,'zel=reshape([');
for i=1:52
if i>1
fprintf(fid,';\n ');
end
fprintf(fid,' %2d',zel(:,:,i));
end
fprintf(fid,']'',4,3,52);\n');
fclose(fid);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% print trmap
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fid=fopen('trmap.txt','w');
fprintf(fid,'trmap=[');
for i=1:52
if i>1
fprintf(fid,';\n ');
end
fprintf(fid,' %2d',trmap(i,:));
end
fprintf(fid,'];\n');
fclose(fid);
|
lemma prime_odd_int: "prime p \<Longrightarrow> p > (2::int) \<Longrightarrow> odd p"
|
-- -------------------------------------------------------------- [ Lens.idr ]
-- Description : Idris port of Control.Lens
-- Copyright : (c) Huw Campbell
-- --------------------------------------------------------------------- [ EOH ]
module Control.Lens.First
public export
record First (a : Type) where
constructor MkFirst
getFirst : Maybe a
public export
Semigroup (First a) where
(MkFirst f) <+> r = case f of
Nothing => r
Just x => MkFirst f
public export
Monoid (First a) where
neutral = MkFirst Nothing
|
State Before: α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m n✝ : ℕ
hx : x ∈ periodicPts f
n : ℕ
⊢ minimalPeriod f ((f^[n]) x) = minimalPeriod f x State After: α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m n✝ : ℕ
hx : x ∈ periodicPts f
n : ℕ
⊢ IsPeriodicPt f (minimalPeriod f x) ((f^[n]) x)
α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m n✝ : ℕ
hx : x ∈ periodicPts f
n : ℕ
⊢ (f^[n]) x ∈ periodicPts f Tactic: apply
(IsPeriodicPt.minimalPeriod_le (minimalPeriod_pos_of_mem_periodicPts hx) _).antisymm
((isPeriodicPt_of_mem_periodicPts_of_isPeriodicPt_iterate hx
(isPeriodicPt_minimalPeriod f _)).minimalPeriod_le
(minimalPeriod_pos_of_mem_periodicPts _)) State Before: α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m n✝ : ℕ
hx : x ∈ periodicPts f
n : ℕ
⊢ IsPeriodicPt f (minimalPeriod f x) ((f^[n]) x) State After: no goals Tactic: exact (isPeriodicPt_minimalPeriod f x).apply_iterate n State Before: α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m n✝ : ℕ
hx : x ∈ periodicPts f
n : ℕ
⊢ (f^[n]) x ∈ periodicPts f State After: case intro.intro
α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m✝ n✝ n m : ℕ
hm : m > 0
hx : IsPeriodicPt f m x
⊢ (f^[n]) x ∈ periodicPts f Tactic: rcases hx with ⟨m, hm, hx⟩ State Before: case intro.intro
α : Type u_1
β : Type ?u.23216
f fa : α → α
fb : β → β
x y : α
m✝ n✝ n m : ℕ
hm : m > 0
hx : IsPeriodicPt f m x
⊢ (f^[n]) x ∈ periodicPts f State After: no goals Tactic: exact ⟨m, hm, hx.apply_iterate n⟩
|
{-# OPTIONS --safe #-}
open import Relation.Ternary.Separation
module Relation.Ternary.Separation.Allstar
{i} {I : Set i}
{c} {C : Set c} {{rc : RawSep C}} {u} {{sc : IsUnitalSep rc u}}
where
open import Level
open import Data.Product
open import Data.List hiding (concat)
open import Relation.Unary
{- Inductive separating forall over a list -}
module _ {ℓ} where
data Allstar (P : I → Pred C ℓ) : List I → SPred (ℓ ⊔ c ⊔ i) where
nil : ε[ Allstar P [] ]
cons : ∀ {x xs} → ∀[ P x ✴ Allstar P xs ⇒ Allstar P (x ∷ xs) ]
-- not typed well in non-pattern positions
infixr 5 _:⟨_⟩:_
pattern _:⟨_⟩:_ x p xs = cons (x ×⟨ p ⟩ xs)
singleton : ∀ {P x} → ∀[ P x ⇒ Allstar P [ x ] ]
singleton v = cons (v ×⟨ ⊎-idʳ ⟩ nil)
open import Relation.Ternary.Separation.Construct.List I
open import Data.List.Relation.Ternary.Interleaving.Propositional as I
repartition : ∀ {P} {Σ₁ Σ₂ Σ} →
Σ₁ ⊎ Σ₂ ≣ Σ → ∀[ Allstar P Σ ⇒ Allstar P Σ₁ ✴ Allstar P Σ₂ ]
repartition [] nil = nil ×⟨ ⊎-idˡ ⟩ nil
repartition (consˡ σ) (cons (a ×⟨ σ′ ⟩ qx)) =
let
xs ×⟨ σ′′ ⟩ ys = repartition σ qx
_ , τ₁ , τ₂ = ⊎-unassoc σ′ σ′′
in (cons (a ×⟨ τ₁ ⟩ xs)) ×⟨ τ₂ ⟩ ys
repartition (consʳ σ) (cons (a ×⟨ σ′ ⟩ qx)) =
let
xs ×⟨ σ′′ ⟩ ys = repartition σ qx
_ , τ₁ , τ₂ = ⊎-unassoc σ′ (⊎-comm σ′′)
in xs ×⟨ ⊎-comm τ₂ ⟩ (cons (a ×⟨ τ₁ ⟩ ys))
concat : ∀ {P} {Γ₁ Γ₂} → ∀[ Allstar P Γ₁ ✴ Allstar P Γ₂ ⇒ Allstar P (Γ₁ ++ Γ₂) ]
concat (nil ×⟨ s ⟩ env₂) rewrite ⊎-id⁻ˡ s = env₂
concat (cons (v ×⟨ s ⟩ env₁) ×⟨ s' ⟩ env₂) =
let _ , eq₁ , eq₂ = ⊎-assoc s s' in
cons (v ×⟨ eq₁ ⟩ (concat (env₁ ×⟨ eq₂ ⟩ env₂)))
|
# Classifieur logistique
## Concepts
Le classifieur logistique est une classifieur linéaire, $X$ est l'entrée (par exemple les pixels d'une image) et il applique un transformation linéaire pour effectuer ses prédictions $y$.
\begin{align}
WX+b = y
\end{align}
- $W \in \mathbb{R}^{n \times m}$ est une matrice contenant la matrice des poids,
- $X \in \mathbb{R}^{m}$ est un vecteur d'entrée,
- $b \in \mathbb{R}^{n}$ est le biais,
- $y \in \mathbb{R}^{n}$ est la sortie prédite.
L'objectif est de trouver les matrices $W$ et $b$ permettant de réaliser les meilleurs prédictions.
Le problème est que $y$ n'est évidemment pas une probabilité d'appartenance à une classe. $\sum_{i=1}^{n} y_i \ne 1$. Nous allons utiliser la fonction $softmax$ sur les éléments de sortie, pour d'une part normaliser les valeurs de sortie, mais aussi "exagérer" les écarts de probabiblité:
\begin{align}
S(y_i) = \frac{e^{y_i}}{\sum_j e^{y_j}}
\end{align}
Les valeurs obtenues sont appelées **logits**.
Nous avons donc maintenant des probabilités d'appartenance à une classe (ou plutôt une estimation, d'où le ^ sur le vecteur $p$:
\begin{align}
S(WX+b) = \hat{p}
\end{align}
Lors de l'apprentissage, la probabilité de la sortie désirée est donc encodée de la manière triviale suivante: nous utilisons un vecteur ayant une probabilité de $1$ pour la classe désirée, et $0$ pour les autres classes. Cet encodage est appelé **one hot encoding**. Notons $p$ ce vecteur de probabilité désiré.
L'objectif est maintenant de mesurer une "distance" entre le vecteur de probabilité de la sortie désirée, et celle de la sortie effective. La mesure couremment utilisée à ces fins est la **cross entropie**.
\begin{align}
D(\hat{p},p) = - \sum_i p_i log (\hat{p}_i)
\end{align}
**Important** Notez que $\hat{p}$ est placé dans le $log$, ce point est inportant car $p$ contient $n-1$ valeurs $0$... et $log(0)= -\infty$.
## Apprentissage
La première méthode est de minimiser l'erreur moyenne sur tous les exemples:
\begin{align}
\mathcal{L}(X,b) = \frac{1}{n} \sum_i D(S(WX_i+b),P_i)
\end{align}
Ce que nous recherchons, c'est: $argmin_{W,b} \mathcal{L}(W,b)$.
Cf: [Exercice n°1: 1_notmnist.ipynb](1_notmnist.ipynb)
# Mesure de la performance
**Ensemble d'apprentissage**: un ensemble d'exemples utilisés pour l'apprentissage: pour s'adapter aux paramètres du classificateur. Dans le cas des NN, nous utiliserions l'ensemble d'apprentissage pour trouver les poids synaptiques «optimaux».
**Ensemble de validation**: un ensemble d'exemples utilisés pour ajuster les hyper-paramètres paramètres d'un classificateur. Dans le cas des NN, nous utiliserions l'ensemble de validation pour trouver le nombre «optimal» de couches cachées ou déterminer un point d'arrêt pour l'algorithme de rétropropagation.
**Ensemble de tests**: un ensemble d'exemples utilisés uniquement pour évaluer la performance d'un classificateur entièrement formé. Dans le cas des NN, nous utiliserions le test pour estimer le taux d'erreur après avoir choisi le modèle final (taille du NN et poids synaptiques).
Pourquoi séparer les ensembles de test et de validation? Pour éviter un sur-apprentissage par over-fitting des hyper-paramètres du modèle.
En vrac:
- Pour annoncer une amélioration, il doit y avoir 30 données correctement classées dans l'ensemble de validation pour conclure que le modèle s'est amélioré. Soit 0.1% d'amélioration sur 30000 données dans l'ensemble de validation.
# Descente de gradient vs Descente de gradient stochastique
## Description
\begin{align}
\mathcal{L}(X,b) = \frac{1}{n} \sum_i D(S(WX_i+b),P_i)
\end{align}
Il faut calculer $\Delta \mathcal{L}(W,b)$ pour tous les exemples... et de nombreuses fois. Ce processus est top couteux, au lieu de calculer $\mathcal{L}$, nous allons calculer à chaque itération une estimation des cette valeur : $\hat{\mathcal{L}}$ sur un sous ensemble des données d'apprentissage (entre 1 et 1000 données choisies aléatoirement à chaque itération). Cette méthode est appelée descente de gradient stochastique ou **SGD**.
## Contraintes pour la SGD
Pour que la **SGD** fonctionne, il faut:
- Des entrées:
- de moyenne nulle
- de variance faible
- Des poids initialisés:
- de manière aléatoire
- de moyenne nulle
- de variance faible
## Astuce pour la SGD
### Momentum
Utiliser l'inertie (mumentum) plutôt que de mettre à jour les poids simplement avec $\delta_t = - \alpha \Delta \mathcal{L}(W)$, de la manière suivante : $\delta_t = (1-\beta) \Delta \mathcal{L}(W) + \beta \delta_{t-1}$ avec $\beta \in [0,1]$. Une valeur de $\beta = 0.9$ est généralement un bon choix.
### Learning rate decay
L'objectif est de diminuer la taille des pas au fil des itérations. De nombreuses méthodes existent:
- Exponential decay : Il a la forme mathématique $lr = lr_0 e^{- kt}$, où lr, k sont des hyperparamètres et t est le nombre d'itération.
- Step Decay : consiste par exemple à réduire le taux d'apprentissage de moitié toutes les 10 epoch.
- ...
Dans tous les cas si l'apprentissage se passe mal... commencez par diminuer la vitesse d'apprentissage.
**ADAGRAD** est un algorithme d'optimisation des descente de gradient, cet algorithme évite d'avoir à gérer un certain nombre d'hyper paramètres tels que *la vitesse d'apprentissage initiale*, le *momentum*, le *decay*.
Il existe beaucoup [d'autres algorithmes](http://ruder.io/optimizing-gradient-descent/index.html#adagrad) de ce type.
## Eviter le sur-apprentissage
L'objectif des techniques suivantes est de minimiser le sur-apprentissage.
### Early termination
Cette technique consiste à stopper l'apprentissage dès que la précision sur l'ensemble d'apprentissage diminue.
### Techniques de regularisation
Techniques ajoutant explicitement des contraintes sur les poids, par exemple :
- **L2 régularisation**: On ajoute un nouveau terme à la fonction de cout pénalisant des poids trop élevés:
\begin{align}
\mathcal{L}_{reg} = \mathcal{L} + \beta \frac{1}{2} \| W \|_2^2
\end{align}
$\beta$ est donc un autre hyper paramètre.
La dérivée de l'expression de régularisation ajouté est simple à calculer et vaut simplement: $\beta W$.
- **Dropout**: Le réseau neuronal se voit aléatoirement amputé d'une partie de ses neurones pendant la phase d'entrainement (leur valeur de sortie estimée à 0). Ce processus est répété à chaque itération. Une probabilité de $0.5$ est couramment utilisée pour le dropout. Si la technique de dropout ne fonctionne pas sur un modèle, c'est qu'il ne comporte pas suffisamment de poids.
```python
```
|
from p3ui import *
import numpy as np
from matplotlib.image import imread
icon_char = b'\xee\x8b\x88'.decode('utf-8')
def load_texture_data(path):
return (imread(path) * 255).astype(np.uint8)
def show_popup(window):
window.add(Popup(content=Text('button clicked!')))
class TabWidgets(ScrollArea):
def __init__(self, user_interface, assets):
super().__init__(
content=Layout(
width=(100 | percent, 0, 0),
direction=Direction.Vertical,
align_items=Alignment.Stretch,
justify_content=Justification.Start,
children=[
Button(
label='Button',
on_click=lambda: user_interface.add(Popup(content=Text('button clicked!')))
),
Layout(direction=Direction.Horizontal,
align_items=Alignment.End,
children=[
Text(f'Text'),
Text(f'Green Text', color='green'),
Text(f'Red Text', color='#ff0000'),
]),
Button(
label=f"{icon_char} Icon Button",
on_click=lambda: print('icon button clicked')
),
Button(
disabled=True,
label=f"{icon_char} Icon Button",
on_click=lambda: print('icon button clicked')
),
Text(f'Some Text', label='Label'),
CheckBox(
label='CheckBox',
on_change=lambda value: print(f'checkbox value: {value}')
),
InputText(label='InputText', hint="enter sth."),
ComboBox(
label='ComboBox',
options=['aaaa', 'bbbb', 'cccc'],
selected_index=1,
on_change=lambda index: print(f'combo selected {index}')
),
Text(f'Text'),
ProgressBar(label='Progress Bar', value=0.4),
ProgressBar(value=0.3),
ComboBox(
options=['aaaa', 'bbbb', 'cccc'],
selected_index=1,
on_change=lambda index: print(f'combo selected {index}')
),
ScrollArea(
width=(200 | px, 1, 0),
height=(200 | px, 1, 0),
content=Image(
texture=Texture(
load_texture_data(assets.joinpath(
"test.png"
).as_posix())
),
on_mouse_enter=lambda e: print('mouse entered image'),
on_mouse_move=lambda e: print(f'{e.source} {e.x} {e.y}'),
on_mouse_leave=lambda e: print('mouse left image'),
)
),
SliderU8(
min=0, max=100, value=20, label='SliderU8',
on_change=lambda value: print(f'SliderU8 value: {value}')
),
SliderU16(min=0, max=100, value=20, label='SliderU16'),
SliderU32(min=0, max=100, value=20, label='SliderU32'),
SliderU64(min=0, max=100, value=20, label='SliderU64'),
SliderS8(min=0, max=100, value=20, label='SliderS8'),
SliderS16(disabled=True, min=0, max=100, value=20, label='SliderS16'),
SliderS32(min=0, max=100, value=20, label='SliderS32'),
SliderS64(min=0, max=100, value=20, label='SliderS64'),
SliderFloat(min=0, max=100, value=20, label='SliderFloat'),
SliderDouble(min=0, max=100, value=20, label='SliderDouble'),
SliderFloat(
min=0, max=100,
value=20,
label='SliderFloat (formatted)', format="value=%.3f"
),
InputU8(
min=0, max=100, value=20, label='InputU8',
on_change=lambda value: print(f'InputU8 value: {value}')
),
InputU16(disabled=True, min=0, max=100, value=20, label='InputU16', step=1),
InputU32(min=0, max=100, value=20, label='InputU32', step=2),
InputU64(min=0, max=100, value=20, label='InputU64'),
InputS16(min=0, max=100, value=20, label='InputS16'),
InputS8(min=0, max=100, value=20, label='InputS8'),
InputS32(min=0, max=100, value=20, label='InputS32'),
InputS64(min=0, max=100, value=20, label='InputS64'),
InputFloat(min=0, max=100, value=20, label='InputFloat'),
InputDouble(min=0, max=100, value=20, label='InputDouble'),
InputFloat(
min=0, max=100, value=20, label='InputFloat (formatted)'
, format="value=%.3f"
)
]
)
)
|
1982
|
[STATEMENT]
lemma PNT4_imp_PNT5:
assumes "\<theta> \<sim>[at_top] (\<lambda>x. x)"
shows "\<psi> \<sim>[at_top] (\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
define r where "r = (\<lambda>x. \<psi> x - \<theta> x)"
[PROOF STATE]
proof (state)
this:
r = (\<lambda>x. \<psi> x - \<theta> x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have "r \<in> O(\<lambda>x. ln x * sqrt x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<in> O(\<lambda>x. ln x * sqrt x)
[PROOF STEP]
unfolding r_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. \<psi> x - \<theta> x) \<in> O(\<lambda>x. ln x * sqrt x)
[PROOF STEP]
by (fact \<psi>_minus_\<theta>_bigo)
[PROOF STATE]
proof (state)
this:
r \<in> O(\<lambda>x. ln x * sqrt x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
r \<in> O(\<lambda>x. ln x * sqrt x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have "(\<lambda>x::real. ln x * sqrt x) \<in> o(\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln x * sqrt x) \<in> o(\<lambda>x. x)
[PROOF STEP]
by real_asymp
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln x * sqrt x) \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
r \<in> o(\<lambda>x. x)
[PROOF STEP]
have r: "r \<in> o(\<lambda>x. x)"
[PROOF STATE]
proof (prove)
using this:
r \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. r \<in> o(\<lambda>x. x)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
r \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have "(\<lambda>x. \<theta> x + r x) \<sim>[at_top] (\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. \<theta> x + r x) \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
using assms r
[PROOF STATE]
proof (prove)
using this:
\<theta> \<sim>[at_top] (\<lambda>x. x)
r \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. (\<lambda>x. \<theta> x + r x) \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
by (subst asymp_equiv_add_right) auto
[PROOF STATE]
proof (state)
this:
(\<lambda>x. \<theta> x + r x) \<sim>[at_top] (\<lambda>x. x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. \<theta> x + r x) \<sim>[at_top] (\<lambda>x. x)
goal (1 subgoal):
1. \<psi> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
by (simp add: r_def)
[PROOF STATE]
proof (state)
this:
\<psi> \<sim>[at_top] (\<lambda>x. x)
goal:
No subgoals!
[PROOF STEP]
qed
|
{-# OPTIONS --cubical --no-import-sorts --safe --guardedness #-}
module Cubical.Codata.Stream.Base where
open import Cubical.Core.Everything
record Stream (A : Type₀) : Type₀ where
coinductive
constructor _,_
field
head : A
tail : Stream A
|
module js
import GR
pxwidth = 640
pxheight = 480
@static if VERSION < v"0.7.0-DEV.4762"
macro cfunction(f, rt, tup)
:(Base.cfunction($(esc(f)), $(esc(rt)), Tuple{$(esc(tup))...}))
end
end
id_count = 0
js_running = false
mutable struct JSTermWidget
identifier::Int
width::Int
height::Int
visible::Bool
end
function inject_js()
global comm
comm = nothing
_js_fallback = "https://gr-framework.org/downloads/gr-latest.js"
_gr_js = if isfile(joinpath(ENV["GRDIR"], "lib", "gr.js"))
_gr_js = try
_gr_js = open(joinpath(ENV["GRDIR"], "lib", "gr.js")) do f
_gr_js = read(f, String)
_gr_js
end
catch e
nothing
end
_gr_js
end
_jsterm = """
if (typeof grJSTermRunning === 'undefined' || !grJstermReady) {
BOXZOOM_THRESHOLD = 3; // Minimal size in pixels of the boxzoom-box to trigger a boxzoom-event
BOXZOOM_TRIGGER_THRESHHOLD = 1000; // Time to wait (in ms) before triggering boxzoom event instead
// of panning when pressing the left mouse button without moving the mouse
MAX_KERNEL_CONNECTION_ATTEMPTS = 25; // Maximum number of kernel initialisation attempts
KERNEL_CONNECT_WAIT_TIME = 100; // Time to wait between kernel initialisation attempts
RECONNECT_PLOT_TIMEOUT = 100; // Time to wait between attempts to connect to a plot's canvas
RECONNECT_PLOT_MAX_ATTEMPTS = 50; // Maximum number of canvas reconnection attempts
BOXZOOM_FILL_STYLE = '#FFAAAA'; // Fill style of the boxzoom box
BOXZOOM_STROKE_STYLE = '#FF0000'; // Outline style of the boxzoom box
var gr, comm, idcount = 0, widgets = [], jupyterRunning = false;
/**
* Loads a javascript file from `url` and calls `callback` when loading has been finished.
* @param {string} url URL to load script from
* @param {Function} callback Function to call when loading is finished
* @param {number} maxtime Maximum time in ms to wait for script-loading
*/
saveLoad = function(url, callback, maxtime) {
let script = document.createElement('script');
script.onload = function() {
callback();
};
script.onerror = function() {
console.error(url + ' can not be loaded.');
};
script.src = url;
document.head.appendChild(script);
setTimeout(function() {
if (!grJstermReady) {
console.error(url + ' can not be loaded.');
}
}, maxtime);
};
/**
* Sends an object describing a event via jupyter-comm
* @param {Object} data Data describing the event
* @param {string} id Identifikator of the calling plot
*/
sendEvt = function(data, id) {
if (jupyterRunning) {
comm.send({
"type": "evt",
"content": data,
"id": id
});
}
};
/**
* Runs draw-events cached during javascript startup
*/
jsLoaded = function() {
grJstermReady = true;
for (let or in onready) {
or();
onready = [];
}
};
/**
* Sends a canvas-removed-event via jupyter-comm
* @param {number} id The removed plot's id
*/
createCanvas = function(id, width, height) {
if (jupyterRunning) {
comm.send({
"type": "createCanvas",
"id": id,
"width": width,
"height": height
});
}
};
/**
* Sends a save-data-event via jupyter-comm
* @param {Object} data Data to save
* @param {string} id plot identificator
*/
saveData = function(data, id) {
if (jupyterRunning) {
comm.send({
"type": "save",
"content": {
"id": id,
"data": JSON.stringify(data)
}
});
}
};
/**
* Registration/initialisation of the jupyter-comm
* @param {[type]} kernel Jupyter kernel object
*/
registerComm = function(kernel) {
kernel.comm_manager.register_target('jsterm_comm', function(c) {
c.on_msg(function(msg) {
let data = msg.content.data;
if (data.type == 'evt') {
if (typeof widgets[data.id] !== 'undefined') {
widgets[data.id].msgHandleEvent(data);
}
} else if (msg.content.data.type == 'cmd') {
if (typeof data.id !== 'undefined') {
if (typeof widgets[data.id] !== 'undefined') {
widgets[data.id].msgHandleCommand(data);
}
} else {
for (let key in widgets) {
widgets[key].msgHandleCommand(data);
}
}
} else if (data.type == 'draw') {
draw(msg);
}
});
c.on_close(function() {});
window.addEventListener('beforeunload', function(e) {
c.close();
});
comm = c;
});
};
/**
* Function to call when page has been loaded.
* Determines if running in a jupyter environment.
*/
onLoad = function() {
if (typeof Jupyter !== 'undefined') {
jupyterRunning = true;
initKernel(1);
} else {
drawSavedData();
}
};
/**
* Jupyter specific initialisation.
* Retrying maximum `MAX_KERNEL_CONNECTION_ATTEMPTS` times
* @param {number} attempt number of attempt
*/
initKernel = function(attempt) {
let kernel = Jupyter.notebook.kernel;
if (typeof kernel === 'undefined' || kernel == null) {
if (attempt < MAX_KERNEL_CONNECTION_ATTEMPTS) {
setTimeout(function() {
initKernel(attempt + 1);
}, KERNEL_CONNECT_WAIT_TIME);
}
} else {
registerComm(kernel);
Jupyter.notebook.events.on('kernel_ready.Kernel', function() {
registerComm(kernel);
for (let key in widgets) {
widgets[key].connectCanvas();
}
});
drawSavedData();
}
};
/**
* Handles a draw command.
* @param {[type]} msg The input message containing the draw command
*/
draw = function(msg) {
if (!grJstermReady) {
onready.push(function() {
return draw(msg);
});
} else if (!GR.is_ready) {
GR.ready(function() {
return draw(msg);
});
} else {
if (typeof gr === 'undefined') {
let canvas = document.createElement('canvas');
canvas.id = 'jsterm-hidden-canvas';
canvas.width = 640;
canvas.height = 480;
canvas.style = 'display: none;';
document.body.appendChild(canvas);
gr = new GR('jsterm-hidden-canvas');
gr.registermeta(gr.GR_META_EVENT_SIZE, sizeCallback);
gr.registermeta(gr.GR_META_EVENT_NEW_PLOT, newPlotCallback);
gr.registermeta(gr.GR_META_EVENT_UPDATE_PLOT, updatePlotCallback);
}
let arguments = gr.newmeta();
gr.readmeta(arguments, msg.content.data.json);
gr.mergemeta(arguments);
}
};
/**
* Draw data that has been saved in the loaded page
*/
drawSavedData = function() {
let data = document.getElementsByClassName("jsterm-data");
for (let i = 0; i < data.length; i++) {
let msg = data[i].innerText;
draw(JSON.parse(msg));
}
};
if (document.readyState!='loading') {
onLoad();
} else if (document.addEventListener) {
document.addEventListener('DOMContentLoaded', onLoad);
} else document.attachEvent('onreadystatechange', function() {
if (document.readyState=='complete') {
onLoad();
}
});
/**
* Callback for gr-meta's size event. Handles event and resizes canvas if required.
*/
sizeCallback = function(evt) {
widgets[evt.plot_id].resize(evt.width, evt.height);
};
/**
* Callback for gr-meta's new plot event. Handles event and creates new canvas.
*/
newPlotCallback = function(evt) {
if (typeof widgets[evt.plot_id] === 'undefined') {
widgets[evt.plot_id] = new JSTermWidget(evt.plot_id);
}
widgets[evt.plot_id].draw();
};
/**
* Callback for gr-meta's update plot event. Handles event and creates canvas id needed.
*/
updatePlotCallback = function(evt) {
if (typeof widgets[evt.plot_id] === 'undefined') {
console.error('Updated plot does not exist, creating new object. (id', evt.plot_id, ')');
widgets[evt.plot_id] = new JSTermWidget(evt.plot_id);
}
widgets[evt.plot_id].draw();
};
/**
* Creates a JSTermWidget-Object describing and managing a canvas
* @param {number} id The widget's numerical identificator (belonging context in `meta.c`)
* @constructor
*/
JSTermWidget = function(id) {
/**
* Initialize the JSTermWidget
*/
this.init = function() {
this.canvas = undefined;
this.overlayCanvas = undefined;
this.div = undefined;
this.id = id; // context id for meta.c (switchmeta)
this.waiting = false;
this.oncanvas = function() {};
// event handling
this.pinching = false;
this.panning = false;
this.prevMousePos = undefined;
this.boxzoom = false;
this.keepAspectRatio = true;
this.boxzoomTriggerTimeout = undefined;
this.boxzoomPoint = [undefined, undefined];
this.pinchDiff = 0;
this.prevTouches = undefined;
this.sendEvents = false;
this.handleEvents = true;
this.width = 640;
this.height = 480;
};
this.init();
/**
* Resizes the JSTermWidget
* @param {number} width new canvas width in pixels
* @param {number} height new canvas height in pixels
*/
this.resize = function(width, height) {
this.width = width;
this.height = height;
if (this.canvas !== undefined) {
this.canvas.width = width;
this.canvas.height = height;
this.overlayCanvas.width = width;
this.overlayCanvas.height = height;
this.div.style = "position: relative; width: " + width + "px; height: " + height + "px;";
}
this.draw();
};
/**
* Send a event fired by widget via jupyter-comm
* @param {Object} data Event description
*/
this.sendEvt = function(data) {
if (this.sendEvents) {
sendEvt(data, this.id);
}
};
/**
* Calculate coordinates on the canvas of the mouseevent.
* @param {Event} event The mouse event to process
* @return {[number, number]} The calculated [x, y]-coordinates
*/
this.getCoords = function(event) {
let rect = this.canvas.getBoundingClientRect();
//TODO mind the canvas-padding if necessary!
return [Math.floor(event.clientX - rect.left), Math.floor(event.clientY - rect.top)];
};
/**
* Send a event to `meta.c`
* @param {number} mouseargs (Emscripten) address of the argumentcontainer describing a event
*/
this.grEventinput = function(mouseargs) {
gr.switchmeta(this.id);
gr.inputmeta(mouseargs);
gr.current_canvas = this.canvas;
gr.current_context = gr.current_canvas.getContext('2d');
gr.select_canvas();
gr.plotmeta();
};
/**
* Handles a wheel event (zoom)
* @param {number} x x-coordinate on the canvas of the mouse
* @param {number} y y-coordinate on the canvas of the mouse
* @param {number} angle_delta angle the wheel has been turned
*/
this.handleWheel = function(x, y, angle_delta) {
if (typeof this.boxzoomTriggerTimeout !== 'undefined') {
clearTimeout(this.boxzoomTriggerTimeout);
}
let mouseargs = gr.newmeta();
gr.meta_args_push(mouseargs, "x", "i", [x]);
gr.meta_args_push(mouseargs, "y", "i", [y]);
gr.meta_args_push(mouseargs, "angle_delta", "d", [angle_delta]);
this.grEventinput(mouseargs);
};
/**
* Handles a wheel event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleWheel = function (event) {
let coords = this.getCoords(event);
this.sendEvt({
"x": coords[0],
"y": coords[1],
"angle_delta": event.deltaY,
"event": "mousewheel",
});
if (this.handleEvents) {
this.handleWheel(coords[0], coords[1], event.deltaY);
}
event.preventDefault();
};
/**
* Handles a mousedown event
* @param {number} x x-coordinate on the canvas of the mouse
* @param {number} y y-coordinate on the canvas of the mouse
* @param {number} button Integer indicating the button pressed (0: left, 1: middle/wheel, 2: right)
* @param {Boolean} ctrlKey Boolean indicating if the ctrl-key is pressed
*/
this.handleMousedown = function(x, y, button, ctrlKey) {
if (typeof this.boxzoomTriggerTimeout !== 'undefined') {
clearTimeout(this.boxzoomTriggerTimeout);
}
if (button == 0) {
this.overlayCanvas.style.cursor = 'move';
this.panning = true;
this.boxzoom = false;
this.prevMousePos = [x, y];
this.boxzoomTriggerTimeout = setTimeout(function() {this.startBoxzoom(x, y, ctrlKey);}.bind(this), BOXZOOM_TRIGGER_THRESHHOLD);
} else if (button == 2) {
this.startBoxzoom(x, y, ctrlKey);
}
};
/**
* Handles a mousedown event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleMousedown = function (event) {
let coords = this.getCoords(event);
this.sendEvt({
"x": coords[0],
"y": coords[1],
"button": event.button,
"ctrlKey": event.ctrlKey,
"event": "mousedown",
});
if (this.handleEvents) {
this.handleMousedown(coords[0], coords[1], event.button, event.ctrlKey);
}
event.preventDefault();
};
/**
* Initiate the boxzoom on the canvas.
* @param {number} x x-coordinate of the mouse
* @param {number} y y-coordinate of the mouse
* @param {Boolean} ctrlKey Boolean indicating if the ctrl-key is pressed
*/
this.startBoxzoom = function(x, y, ctrlKey) {
this.panning = false;
this.boxzoom = true;
if (ctrlKey) {
this.keepAspectRatio = false;
}
this.boxzoomPoint = [x, y];
this.overlayCanvas.style.cursor = 'nwse-resize';
};
/**
* Handles a mouseup event
* @param {number} x x-coordinate on the canvas of the mouse
* @param {number} y y-coordinate on the canvas of the mouse
* @param {number} button Integer indicating the button pressed (0: left, 1: middle/wheel, 2: right)
*/
this.handleMouseup = function(x, y, button) {
if (typeof this.boxzoomTriggerTimeout !== 'undefined') {
clearTimeout(this.boxzoomTriggerTimeout);
}
if (this.boxzoom) {
if ((Math.abs(this.boxzoomPoint[0] - x) >= BOXZOOM_THRESHOLD) && (Math.abs(this.boxzoomPoint[1] - y) >= BOXZOOM_THRESHOLD)) {
let mouseargs = gr.newmeta();
let diff = [x - this.boxzoomPoint[0], y - this.boxzoomPoint[1]];
gr.meta_args_push(mouseargs, "x1", "i", [this.boxzoomPoint[0]]);
gr.meta_args_push(mouseargs, "x2", "i", [this.boxzoomPoint[0] + diff[0]]);
gr.meta_args_push(mouseargs, "y1", "i", [this.boxzoomPoint[1]]);
gr.meta_args_push(mouseargs, "y2", "i", [this.boxzoomPoint[1] + diff[1]]);
if (this.keepAspectRatio) {
gr.meta_args_push(mouseargs, "keep_aspect_ratio", "i", [1]);
} else {
gr.meta_args_push(mouseargs, "keep_aspect_ratio", "i", [0]);
}
this.grEventinput(mouseargs);
}
}
this.prevMousePos = undefined;
this.overlayCanvas.style.cursor = 'auto';
this.panning = false;
this.boxzoom = false;
this.boxzoomPoint = [undefined, undefined];
this.keepAspectRatio = true;
let context = this.overlayCanvas.getContext('2d');
context.clearRect(0, 0, this.overlayCanvas.width, this.overlayCanvas.height);
};
/**
* Handles a mouseup event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleMouseup = function(event) {
let coords = this.getCoords(event);
this.sendEvt({
"x": coords[0],
"y": coords[1],
"button": event.button,
"event": "mouseup",
});
if (this.handleEvents) {
this.handleMouseup(coords[0], coords[1], event.button);
}
event.preventDefault();
};
/**
* Handles a touchstart event triggered by tapping the touchscreen
* @param {Event} event The fired touch event
*/
this.touchHandleTouchstart = function(event) {
if (event.touches.length == 1) {
let coords = this.getCoords(event.touches[0]);
this.handleMousedown(coords[0], coords[1], 0, false);
} else if (event.touches.length == 2) {
this.pinching = true;
this.pinchDiff = Math.abs(event.touches[0].clientX - event.touches[1].clientX) + Math.abs(event.touches[0].clientY - event.touches[1].clientY);
let c1 = this.getCoords(event.touches[0]);
let c2 = this.getCoords(event.touches[1]);
this.prevTouches = [c1, c2];
} else if (event.touches.length == 3) {
let coords1 = this.getCoords(event.touches[0]);
let coords2 = this.getCoords(event.touches[1]);
let coords3 = this.getCoords(event.touches[2]);
let x = 1 / 3 * coords1[0] + coords2[0] + coords3[0];
let y = 1 / 3 * coords1[1] + coords2[1] + coords3[1];
this.handleDoubleclick(x, y);
}
event.preventDefault();
};
/**
* Handles a touchend event
* @param {Event} event The fired touch event
*/
this.touchHandleTouchend = function(event) {
this.handleMouseleave();
};
/**
* Handles a touchmove event triggered by moving fingers on the touchscreen
* @param {Event} event The fired touch event
*/
this.touchHandleTouchmove = function(event) {
if (event.touches.length == 1) {
let coords = this.getCoords(event.touches[0]);
this.handleMousemove(coords[0], coords[1]);
} else if (this.pinching && event.touches.length == 2) {
let c1 = this.getCoords(event.touches[0]);
let c2 = this.getCoords(event.touches[1]);
let diff = Math.sqrt(Math.pow(Math.abs(c1[0] - c2[0]), 2) + Math.pow(Math.abs(c1[1] - c2[1]), 2));
if (typeof this.pinchDiff !== 'undefined' && typeof this.prevTouches !== 'undefined') {
let factor = this.pinchDiff / diff;
let mouseargs = gr.newmeta();
gr.meta_args_push(mouseargs, "x", "i", [(c1[0] + c2[0]) / 2]);
gr.meta_args_push(mouseargs, "y", "i", [(c1[1] + c2[1]) / 2]);
gr.meta_args_push(mouseargs, "factor", "d", [factor]);
this.grEventinput(mouseargs);
let panmouseargs = gr.newmeta();
gr.meta_args_push(panmouseargs, "x", "i", [(c1[0] + c2[0]) / 2]);
gr.meta_args_push(panmouseargs, "y", "i", [(c1[1] + c2[1]) / 2]);
gr.meta_args_push(panmouseargs, "xshift", "i", [(c1[0] - this.prevTouches[0][0] + c2[0] - this.prevTouches[1][0]) / 2.0]);
gr.meta_args_push(panmouseargs, "yshift", "i", [(c1[1] - this.prevTouches[0][1] + c2[1] - this.prevTouches[1][1]) / 2.0]);
this.grEventinput(panmouseargs);
}
this.pinchDiff = diff;
this.prevTouches = [c1, c2];
}
event.preventDefault();
};
/**
* Handles a mouseleave event
*/
this.handleMouseleave = function() {
if (typeof this.boxzoomTriggerTimeout !== 'undefined') {
clearTimeout(this.boxzoomTriggerTimeout);
}
this.overlayCanvas.style.cursor = 'auto';
this.panning = false;
this.prevMousePos = undefined;
if (this.boxzoom) {
let context = this.overlayCanvas.getContext('2d');
context.clearRect(0, 0, this.overlayCanvas.width, this.overlayCanvas.height);
}
this.boxzoom = false;
this.boxzoomPoint = [undefined, undefined];
this.keepAspectRatio = true;
};
/**
* Handles a mouseleave event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleMouseleave = function(event) {
this.pinchDiff = undefined;
this.prevTouches = undefined;
this.sendEvt({
"event": "mouseleave",
});
if (this.handleEvents) {
this.handleMouseleave();
}
};
/**
* Handles a mousemove event
* @param {number} x x-coordinate on the canvas of the mouse
* @param {number} y y-coordinate on the canvas of the mouse
*/
this.handleMousemove = function(x, y) {
if (this.panning) {
if (typeof this.boxzoomTriggerTimeout !== 'undefined') {
clearTimeout(this.boxzoomTriggerTimeout);
}
let mouseargs = gr.newmeta();
gr.meta_args_push(mouseargs, "x", "i", [this.prevMousePos[0]]);
gr.meta_args_push(mouseargs, "y", "i", [this.prevMousePos[1]]);
gr.meta_args_push(mouseargs, "xshift", "i", [x - this.prevMousePos[0]]);
gr.meta_args_push(mouseargs, "yshift", "i", [y - this.prevMousePos[1]]);
this.grEventinput(mouseargs);
this.prevMousePos = [x, y];
} else if (this.boxzoom) {
let context = this.overlayCanvas.getContext('2d');
let diff = [x - this.boxzoomPoint[0], y - this.boxzoomPoint[1]];
gr.switchmeta(this.id);
let box = gr.meta_get_box(this.boxzoomPoint[0], this.boxzoomPoint[1], this.boxzoomPoint[0] + diff[0], this.boxzoomPoint[1] + diff[1], this.keepAspectRatio);
context.clearRect(0, 0, this.overlayCanvas.width, this.overlayCanvas.height);
if (diff[0] * diff[1] >= 0) {
this.overlayCanvas.style.cursor = 'nwse-resize';
} else {
this.overlayCanvas.style.cursor = 'nesw-resize';
}
context.fillStyle = BOXZOOM_FILL_STYLE;
context.strokeStyle = BOXZOOM_STROKE_STYLE;
context.beginPath();
context.rect(box[0], box[1], box[2], box[3]);
context.globalAlpha = 0.2;
context.fill();
context.globalAlpha = 1.0;
context.stroke();
context.closePath();
}
};
/**
* Handles a mousemove event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleMousemove = function (event) {
let coords = this.getCoords(event);
this.sendEvt({
"x": coords[0],
"y": coords[1],
"event": "mousemove",
});
if (this.handleEvents) {
this.handleMousemove(coords[0], coords[1]);
}
event.preventDefault();
};
/**
* Handles a doubleclick event
* @param {number} x x-coordinate on the canvas of the mouse
* @param {number} y y-coordinate on the canvas of the mouse
*/
this.handleDoubleclick = function(x, y) {
let mouseargs = gr.newmeta();
gr.meta_args_push(mouseargs, "x", "i", [x]);
gr.meta_args_push(mouseargs, "y", "i", [y]);
gr.meta_args_push(mouseargs, "key", "s", "r");
this.grEventinput(mouseargs);
this.boxzoomPoint = [undefined, undefined];
};
/**
* Handles a doubleclick event triggered by the mouse
* @param {Event} event The fired mouse event
*/
this.mouseHandleDoubleclick = function(event) {
let coords = this.getCoords(event);
this.sendEvt({
"x": coords[0],
"y": coords[1],
"event": "doubleclick",
});
if (this.handleEvents) {
this.handleDoubleclick(coords[0], coords[1]);
}
event.preventDefault();
};
/**
* Handles a event triggered by a Jupyter Comm message
* @param {Object} msg The message describing the event
*/
this.msgHandleEvent = function(msg) {
switch(msg.event) {
case "mousewheel":
this.handleWheel(msg.x, msg.y, msg.angle_delta);
break;
case "mousedown":
this.handleMousedown(msg.x, msg.y, msg.button, msg.ctrlKey);
break;
case "mouseup":
this.handleMouseup(msg.x, msg.y, msg.button);
break;
case "mousemove":
this.handleMousemove(msg.x, msg.y);
break;
case "doubleclick":
this.handleDoubleclick(msg.x, msg.y);
break;
case "mouseleave":
this.handleMouseleave();
break;
default:
break;
}
};
/**
* Handles a command received cia jupyter comm
* @param {Object} msg Received msg containing the command
*/
this.msgHandleCommand = function(msg) {
switch(msg.command) {
case 'enable_events':
this.sendEvents = true;
break;
case 'disable_events':
this.sendEvents = false;
break;
case 'enable_jseventhandling':
this.handleEvents = true;
break;
case 'disable_jseventhandling':
this.handleEvents = false;
break;
default:
break;
}
};
/**
* Draw a plot described by a message received via jupyter comm
* @param {Object} msg message containing the draw-command
*/
this.draw = function() {
if (this.waiting) {
this.oncanvas = function() {
return this.draw();
};
} else {
console.log(document.getElementById('jsterm-' + this.id));
if (document.getElementById('jsterm-' + this.id) == null) {
createCanvas(this.id, this.width, this.height);
this.canvas = undefined;
this.waiting = true;
this.oncanvas = function() {
return this.draw();
};
setTimeout(function() {
this.refreshPlot(this.id, 0);
}.bind(this), RECONNECT_PLOT_TIMEOUT);
} else {
//if (document.getElementById('jsterm-data-' + this.id) == null) {
//saveData(msg, msg.content.data.id);
//}
//Jupyter.notebook.get_selected_cell().metadata.jsterm = msg;
if (document.getElementById('jsterm-' + this.id) !== this.canvas || typeof this.canvas === 'undefined' || typeof this.overlayCanvas === 'undefined') {
this.connectCanvas();
}
gr.switchmeta(this.id);
gr.current_canvas = this.canvas; //TODO is this always set? (check)
gr.current_context = gr.current_canvas.getContext('2d');
gr.select_canvas();
gr.plotmeta();
}
}
};
/**
* Connects a canvas to a JSTermWidget object.
*/
this.connectCanvas = function() {
if (document.getElementById('jsterm-' + this.id) != null) {
this.div = document.getElementById('jsterm-div-' + this.id);
this.canvas = document.getElementById('jsterm-' + this.id);
this.overlayCanvas = document.getElementById('jsterm-overlay-' + this.id);
this.overlayCanvas.addEventListener('DOMNodeRemoved', function() {
createCanvas(this.id, this.width, this.height);
this.canvas = undefined;
this.waiting = true;
this.oncanvas = function() {};
});
this.overlayCanvas.style.cursor = 'auto';
//registering event handler
this.overlayCanvas.addEventListener('wheel', function(evt) { this.mouseHandleWheel(evt); }.bind(this));
this.overlayCanvas.addEventListener('mousedown', function(evt) { this.mouseHandleMousedown(evt); }.bind(this));
this.overlayCanvas.addEventListener('touchstart', function(evt) { this.touchHandleTouchstart(evt); }.bind(this));
this.overlayCanvas.addEventListener('touchmove', function(evt) { this.touchHandleTouchmove(evt); }.bind(this));
this.overlayCanvas.addEventListener('touchend', function(evt) { this.touchHandleTouchend(evt); }.bind(this));
this.overlayCanvas.addEventListener('mousemove', function(evt) { this.mouseHandleMousemove(evt); }.bind(this));
this.overlayCanvas.addEventListener('mouseup', function(evt) { this.mouseHandleMouseup(evt); }.bind(this));
this.overlayCanvas.addEventListener('mouseleave', function(evt) { this.mouseHandleMouseleave(evt); }.bind(this));
this.overlayCanvas.addEventListener('dblclick', function(evt) { this.mouseHandleDoubleclick(evt); }.bind(this));
this.overlayCanvas.addEventListener('contextmenu', function(event) {
event.preventDefault();
return false;
});
}
};
/**
* Check if a deleted canvas has been recreated.
* Calls itself after REFRESH_PLOT_TIMEOUTms if no canvas is found
* @param {number} count [description]
*/
this.refreshPlot = function(count) {
if (document.getElementById('jsterm-' + this.id) == null) {
if (count < RECONNECT_PLOT_MAX_ATTEMPTS) {
setTimeout(function() {
this.refreshPlot( count + 1);
}.bind(this), RECONNECT_PLOT_TIMEOUT);
}
} else {
this.waiting = false;
if (typeof this.oncanvas !== 'undefined') {
this.oncanvas();
}
}
};
};
}
var grJSTermRunning = true;"""
if _gr_js === nothing
_gr_js = string("""
JSTerm.saveLoad('""", _js_fallback, """', jsLoaded, 10000);
var grJstermReady = false;
""")
else
_gr_js = string(_gr_js, "var grJstermReady = true;")
end
display(HTML(string("""
<script type="text/javascript">
""", _gr_js, """
""", _jsterm, """
</script>
""")))
end
function JSTermWidget(id::Int, width::Int, height::Int)
global id_count, js_running
if GR.isijulia()
id_count += 1
if !js_running
inject_js()
js_running = true
end
JSTermWidget(id, width, height, false)
else
error("JSTermWidget is only available in IJulia environments")
end
end
function jsterm_display(widget::JSTermWidget)
if GR.isijulia()
display(HTML(string("<div id=\"jsterm-div-", widget.identifier, "\" style=\"position: relative; width: ", widget.width, "px; height: ", widget.height, "px;\"><canvas id=\"jsterm-overlay-", widget.identifier, "\" style=\"position:absolute; top: 0; right: 0; z-index: 1;\" width=\"", widget.width, "\" height=\"", widget.height, "\"></canvas>
<canvas id=\"jsterm-", widget.identifier, "\" style=\"position: absolute; top: 0; right: 0; z-index: 0;\"width=\"", widget.width, "\" height=\"", widget.height, "\"></canvas>")))
widget.visible = true
else
error("jsterm_display is only available in IJulia environments")
end
end
comm = nothing
evthandler = Dict()
global_evthandler = nothing
function register_evthandler(f::Function, device, port)
global evthandler
if GR.isijulia()
send_command(Dict("command" => "enable_events"), "cmd", string(device, port))
evthandler[string(device, port)] = f
else
error("register_evthandler is only available in IJulia environments")
end
end
function unregister_evthandler(device, port)
global evthandler
if GR.isijulia()
if global_evthandler === nothing
send_command(Dict("command" => "disable_events"), "cmd", string(device, port))
end
evthandler[string(device, port)] = nothing
else
error("unregister_evthandler is only available in IJulia environments")
end
end
function register_evthandler(f::Function)
global global_evthandler
if GR.isijulia()
send_command(Dict("command" => "enable_events"), "cmd", nothing)
global_evthandler = f
else
error("register_evthandler is only available in IJulia environments")
end
end
function unregister_evthandler()
global global_evthandler, evthandler
if GR.isijulia()
send_command(Dict("command" => "disable_events"), "cmd", nothing)
for key in keys(evthandler)
if evthandler[key] !== nothing
send_command(Dict("command" => "enable_events"), "cmd", key)
end
end
global_evthandler = nothing
else
error("unregister_evthandler is only available in IJulia environments")
end
end
function send_command(msg, msgtype, id=nothing)
global comm
if GR.isijulia()
if comm === nothing
error("JSTerm comm not initialized.")
else
if id !== nothing
Main.IJulia.send_comm(comm, merge(msg, Dict("type" => msgtype, "id" => id)))
else
Main.IJulia.send_comm(comm, merge(msg, Dict("type" => msgtype)))
end
end
else
error("send_command is only available in IJulia environments")
end
end
function send_evt(msg, device, port)
if GR.isijulia()
send_command(msg, "evt", string(device, port))
else
error("send_evt is only available in IJulia environments")
end
end
function send_evt(msg, identifier)
if GR.isijulia()
send_command(msg, "evt", identifier)
else
error("send_evt is only available in IJulia environments")
end
end
function disable_jseventhandling(device, port)
if GR.isijulia()
send_command(Dict("command" => "disable_jseventhandling"), "cmd", string(device, port))
else
error("disable_jseventhandling is only available in IJulia environments")
end
end
function enable_jseventhandling(device, port)
if GR.isijulia()
send_command(Dict("command" => "enable_jseventhandling"), "cmd", string(device, port))
else
error("enable_jseventhandling is only available in IJulia environments")
end
end
function disable_jseventhandling()
if GR.isijulia()
send_command(Dict("command" => "disable_jseventhandling"), "cmd", nothing)
else
error("disable_jseventhandling is only available in IJulia environments")
end
end
function enable_jseventhandling()
if GR.isijulia()
send_command(Dict("command" => "enable_jseventhandling"), "cmd", nothing)
else
Main.IJulia.send_comm(comm, Dict("json" => f, "type" => "evt"))
error("enable_jseventhandling is only available in IJulia environments")
end
end
function jsterm_send(data::String)
global js_running, draw_condition, comm, PXWIDTH, PXHEIGHT
if GR.isijulia()
if comm === nothing
comm = Main.IJulia.Comm("jsterm_comm")
comm.on_close = function comm_close_callback(msg)
global js_running
js_running = false
end
comm.on_msg = function comm_msg_callback(msg)
data = msg.content["data"]
if haskey(data, "type")
if data["type"] == "createCanvas"
id = data["id"]
if haskey(jswidgets, id)
widget = jswidgets[id]
widget.width = data["width"]
widget.height = data["height"]
else
widget = JSTermWidget(id, data["width"], data["height"])
jswidgets[id] = widget
end
jswidgets[id].visible = false
jsterm_display(jswidgets[id])
elseif data["type"] == "save"
display(HTML(string("<div style=\"display:none;\" id=\"jsterm-data-", data["content"]["id"], "\" class=\"jsterm-data\">", data["content"]["data"], "</div>")))
elseif data["type"] == "evt"
global_evthandler(data["content"])
if haskey(evthandler, data["id"]) && evthandler[data["id"]] !== nothing
evthandler[data["id"]](data["content"])
end
end
end
end
end
Main.IJulia.send_comm(comm, Dict("json" => data, "type"=>"draw"))
else
error("jsterm_send is only available in IJulia environments")
end
end
function recv(name::Cstring, id::Int32, msg::Cstring)
# receives string from C and sends it to JS via Comm
global js_running
if !js_running
inject_js()
js_running = true
end
jsterm_send(unsafe_string(msg))
return convert(Int32, 1)
end
function send(name::Cstring, id::Int32)
# Dummy function, not in use
return convert(Cstring, "String")
end
jswidgets = nothing
send_c = nothing
recv_c = nothing
function initjs()
global jswidgets, send_c, recv_c
jswidgets = Dict{Int32, JSTermWidget}()
send_c = @cfunction(send, Cstring, (Cstring, Int32))
recv_c = @cfunction(recv, Int32, (Cstring, Int32, Cstring))
send_c, recv_c
end
end # module
|
Require Import init.
Require Export linear_base.
Require Import linear_subspace.
Require Import set.
Require Import unordered_list.
Definition linear_span U {V} `{Plus V, Zero V, ScalarMult U V}
(S : V → Prop) :=
λ v, ∀ sub : Subspace U V, S ⊆ subspace_set sub → subspace_set sub v.
(* begin hide *)
Section Span.
Context U {V} `{
UP : Plus U,
UZ : Zero U,
UN : Neg U,
UM : Mult U,
UO : One U,
UD : Div U,
@PlusComm U UP,
@PlusLid U UP UZ,
@PlusLinv U UP UZ UN,
@MultAssoc U UM,
@MultLid U UM UO,
@MultLinv U UZ UM UO UD,
VP : Plus V,
VZ : Zero V,
VN : Neg V,
@PlusComm V VP,
@PlusAssoc V VP,
@PlusLid V VP VZ,
@PlusLinv V VP VZ VN,
SM : ScalarMult U V,
@ScalarComp U V UM SM,
@ScalarId U V UO SM,
@ScalarLdist U V VP SM,
@ScalarRdist U V UP VP SM
}.
(* end hide *)
Variable A : V → Prop.
Let S := linear_span U A.
Lemma linear_span_zero : S 0.
Proof.
intros [T T_zero T_plus T_scalar]; cbn.
intros sub.
exact T_zero.
Qed.
Lemma linear_span_plus : ∀ a b, S a → S b → S (a + b).
Proof.
intros a b Sa Sb T sub.
specialize (Sa T sub).
specialize (Sb T sub).
apply subspace_plus; assumption.
Qed.
Lemma linear_span_scalar : ∀ a v, S v → S (a · v).
Proof.
intros a v Sv T sub.
specialize (Sv T sub).
apply subspace_scalar.
exact Sv.
Qed.
Definition linear_span_subspace := make_subspace S
linear_span_zero linear_span_plus linear_span_scalar.
Theorem linear_span_sub : A ⊆ S.
Proof.
intros v Av.
unfold S, linear_span.
intros sub A_sub.
apply A_sub.
exact Av.
Qed.
Definition linear_span_quotient := quotient_space linear_span_subspace.
Definition to_quotient v :=
to_equiv (subspace_equiv linear_span_subspace) v.
Definition linear_span_quotient_plus
:= quotient_space_plus linear_span_subspace.
Definition linear_span_quotient_plus_assoc
:= quotient_space_plus_assoc linear_span_subspace.
Definition linear_span_quotient_plus_comm
:= quotient_space_plus_comm linear_span_subspace.
Definition linear_span_quotient_zero
:= quotient_space_zero linear_span_subspace.
Definition linear_span_quotient_plus_lid
:= quotient_space_plus_lid linear_span_subspace.
Definition linear_span_quotient_neg
:= quotient_space_neg linear_span_subspace.
Definition linear_span_quotient_plus_linv
:= quotient_space_plus_linv linear_span_subspace.
Definition linear_span_quotient_scalar_mult
:= quotient_space_scalar_mult linear_span_subspace.
Definition linear_span_quotient_scalar_comp
:= quotient_space_scalar_comp linear_span_subspace.
Definition linear_span_quotient_scalar_id
:= quotient_space_scalar_id linear_span_subspace.
Definition linear_span_quotient_scalar_ldist
:= quotient_space_scalar_ldist linear_span_subspace.
Definition linear_span_quotient_scalar_rdist
:= quotient_space_scalar_rdist linear_span_subspace.
Theorem span_linear_combination : S = linear_combination_of A.
Proof.
pose (A_sub := make_subspace _ (linear_combination_of_zero A)
(linear_combination_of_plus A) (linear_combination_of_scalar A)).
apply antisym.
- intros v Sv.
unfold S, linear_span in Sv.
apply (Sv A_sub).
cbn.
clear v Sv.
intros v Av.
pose (l := (1, v) ː ulist_end).
assert (linear_combination_set l) as l_comb.
{
unfold linear_combination_set, l.
rewrite ulist_image_add, ulist_unique_add.
rewrite ulist_image_end.
split.
- apply in_ulist_end.
- apply ulist_unique_end.
}
exists [l|l_comb]; cbn.
split.
+ unfold linear_combination, l; cbn.
rewrite ulist_image_add, ulist_sum_add; cbn.
rewrite scalar_id.
rewrite ulist_image_end, ulist_sum_end.
rewrite plus_rid.
reflexivity.
+ unfold l, linear_list_in; cbn.
rewrite ulist_prop_add; cbn.
split; [>exact Av|apply ulist_prop_end].
- intros v [l [v_eq Sv]].
rewrite v_eq; clear v_eq.
apply (subspace_linear_combination linear_span_subspace).
cbn.
unfold linear_list_in in *.
eapply (ulist_prop_sub _ _ _ _ Sv).
Unshelve.
intros x.
apply linear_span_sub.
Qed.
(* begin hide *)
End Span.
(* end hide *)
|
-- Primitive Imperative Language --
module Example.Pil
import Data.List
import Data.List.Elem
import Data.Maybe
%default total
------------------------------------------------------
--- Auxiliary data definitions and their instances ---
------------------------------------------------------
export
data Name = MkName String
%name Name n, m
export
FromString Name where
fromString = MkName
export
Eq Name where
MkName n == MkName m = n == m
--- Static context in terms of which we are formulating an invariant ---
public export
Context : Type
Context = List (Name, Type)
%name Context ctx
-----------------------------------------------
--- List lookup with propositional equality ---
-----------------------------------------------
public export
data Lookup : a -> List (a, b) -> Type where
There : Lookup z xys -> Lookup z $ (x, y)::xys
Here : (y : b) -> Lookup x $ (x, y)::xys
-- !!! Idris searches from the bottom !!!
public export
reveal : Lookup {b} x xys -> b
reveal (Here y) = y
reveal (There subl) = reveal subl
-----------------------------------
--- The main language structure ---
-----------------------------------
public export
data Expression : (ctx : Context) -> (res : Type) -> Type where
-- Constant expression
C : (x : ty) -> Expression ctx ty
-- Value of the variable
V : (n : Name) -> (0 ty : Lookup n ctx) => Expression ctx $ reveal ty
-- Unary operation over the result of another expression
U : (f : a -> b) -> Expression ctx a -> Expression ctx b
-- Binary operation over the results of two another expressions
B : (f : a -> b -> c) -> Expression ctx a -> Expression ctx b -> Expression ctx c
infix 2 #=, ?#=
infixr 1 *>
public export
data Statement : (pre : Context) -> (post : Context) -> Type where
nop : Statement ctx ctx
(.) : (0 ty : Type) -> (n : Name) -> Statement ctx $ (n, ty)::ctx
(#=) : (n : Name) -> (0 ty : Lookup n ctx) => (v : Expression ctx $ reveal ty) -> Statement ctx ctx
for : (init : Statement outer_ctx inside_for) -> (cond : Expression inside_for Bool)
-> (upd : Statement inside_for inside_for) -> (body : Statement inside_for after_body)
-> Statement outer_ctx outer_ctx
if__ : (cond : Expression ctx Bool) -> Statement ctx ctx_then -> Statement ctx ctx_else -> Statement ctx ctx
(*>) : Statement pre mid -> Statement mid post -> Statement pre post
block : Statement outer inside -> Statement outer outer
print : Show ty => Expression ctx ty -> Statement ctx ctx
public export %inline
(>>=) : Statement pre mid -> (Unit -> Statement mid post) -> Statement pre post
a >>= f = a *> f ()
public export %inline
if_ : (cond : Expression ctx Bool) -> Statement ctx ctx_then -> Statement ctx ctx
if_ c t = if__ c t nop
public export %inline
while : Expression ctx Bool -> Statement ctx after_body -> Statement ctx ctx
while cond = for nop cond nop
-- Define with derived type and assign immediately
public export %inline
(?#=) : (n : Name) -> Expression ((n, ty)::ctx) ty -> Statement ctx $ (n, ty)::ctx
n ?#= v = ty. n *> n #= v
namespace AlternativeDefineAndAssign
public export %inline
(#=) : (p : (Name, Type)) -> Expression (p::ctx) (snd p) -> Statement ctx $ p::ctx
(n, _) #= v = n ?#= v
public export %inline
(.) : a -> b -> (b, a)
(.) a b = (b, a)
-------------------------
--- Examples of usage ---
-------------------------
--- Functions lifted to the expression level ---
export %inline
(+) : Expression ctx Int -> Expression ctx Int -> Expression ctx Int
(+) = B (+)
export %inline
div : Expression ctx Int -> Expression ctx Int -> Expression ctx Int
div = B div
export %inline
mod : Expression ctx Int -> Expression ctx Int -> Expression ctx Int
mod = B mod
export %inline
(<) : Expression ctx Int -> Expression ctx Int -> Expression ctx Bool
(<) = B (<)
export %inline
(>) : Expression ctx Int -> Expression ctx Int -> Expression ctx Bool
(>) = B (>)
export %inline
(==) : Eq a => Expression ctx a -> Expression ctx a -> Expression ctx Bool
(==) = B (==)
export %inline
(/=) : Eq a => Expression ctx a -> Expression ctx a -> Expression ctx Bool
(/=) = B (/=)
export %inline
(&&) : Expression ctx Bool -> Expression ctx Bool -> Expression ctx Bool
(&&) = B (\a, b => a && b) -- recoded because of laziness
export %inline
(++) : Expression ctx String -> Expression ctx String -> Expression ctx String
(++) = B (++)
export %inline
show : Show ty => Expression ctx ty -> Expression ctx String
show = U show
--- Example statements ---
simple_ass : Statement ctx $ ("x", Int)::ctx
simple_ass = do
Int. "x"
"x" #= C 2
lost_block : Statement ctx ctx
lost_block = do
block $ do
Int. "x"
"x" #= C 2
Int. "y" #= V "x"
Int. "z" #= C 3
print $ V "y" + V "z" + V "x"
some_for : Statement ctx ctx
some_for = for (do Int. "x" #= C 0; Int. "y" #= C 0) (V "x" < C 5 && V "y" < C 10) ("x" #= V "x" + C 1) $ do
"y" #= V "y" + V "x" + C 1
--bad_for : Statement ctx ctx
--bad_for = for (do Int. "x" #= C 0; Int. "y" #= C 0)
-- (V "y")
-- ("x" #= V "x" + C 1) $ do
-- "y" #= V "y" `div` V "x" + C 1
euc : {0 ctx : Context} -> let c = ("a", Int)::("b", Int)::ctx in Statement c $ ("res", Int)::c
euc = do
while (V "a" /= C 0 && V "b" /= C 0) $ do
if__ (V "a" > V "b")
("a" #= V "a" `mod` V "b")
("b" #= V "b" `mod` V "a")
Int. "res" #= V "a" + V "b"
name_shadowing : Statement ctx ctx
name_shadowing = block $ do
Int. "x" #= C 0
block $ do
Int. "x" #= C 3
Int. "y" #= V "x" + C 2
String. "x" #= C "foo"
print $ V "x" ++ C "bar" ++ show (V "y")
Int. "z" #= V "x" + C 2
|
include("comlineoption.jl")
include("iteration.jl")
include("saveresult.jl")
using .Comlineoption
using .Iteration
using .Saveresult
function main(args)
opt = Comlineoption.construct(args)
data, solve_tf_param, solve_tf_val, yarray = Iteration.construct(opt)
xarray = Iteration.iteration!(data, solve_tf_param, solve_tf_val, yarray)
Saveresult.saveresult(data, xarray, yarray)
end
@time main(ARGS)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.