text
stringlengths 0
3.34M
|
---|
Suppliers:Ningbo Global Union Imp. & Exp. Co., Ltd. |
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h0 : comp p q = 0
⊢ natDegree (comp p q) ≤ natDegree p * natDegree q
[PROOFSTEP]
rw [h0, natDegree_zero]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h0 : comp p q = 0
⊢ 0 ≤ natDegree p * natDegree q
[PROOFSTEP]
exact Nat.zero_le _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p q r : R[X]
h0 : ¬comp p q = 0
n : ℕ
hn : n ∈ support p
⊢ ↑(natDegree (↑C (coeff p n))) + n • ↑(natDegree q) = ↑(n * natDegree q)
[PROOFSTEP]
rw [natDegree_C, Nat.cast_zero, zero_add, nsmul_eq_mul]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p q r : R[X]
h0 : ¬comp p q = 0
n : ℕ
hn : n ∈ support p
⊢ ↑n * ↑(natDegree q) = ↑(n * natDegree q)
[PROOFSTEP]
simp
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
hp : p ≠ 0
h : IsRoot p a
hlt : 0 ≥ degree p
⊢ False
[PROOFSTEP]
have := eq_C_of_degree_le_zero hlt
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
hp : p ≠ 0
h : IsRoot p a
hlt : 0 ≥ degree p
this : p = ↑C (coeff p 0)
⊢ False
[PROOFSTEP]
rw [IsRoot, this, eval_C] at h
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
hp : p ≠ 0
h : coeff p 0 = 0
hlt : 0 ≥ degree p
this : p = ↑C (coeff p 0)
⊢ False
[PROOFSTEP]
simp only [h, RingHom.map_zero] at this
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
hp : p ≠ 0
h : coeff p 0 = 0
hlt : 0 ≥ degree p
this : p = 0
⊢ False
[PROOFSTEP]
exact hp this
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
⊢ natDegree p ≤ n ↔ ∀ (N : ℕ), n < N → coeff p N = 0
[PROOFSTEP]
simp_rw [natDegree_le_iff_degree_le, degree_le_iff_coeff_zero, Nat.cast_withBot, WithBot.coe_lt_coe]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
qn : natDegree q ≤ n
⊢ natDegree (p + q) ≤ n ↔ natDegree p ≤ n
[PROOFSTEP]
refine' ⟨fun h => _, fun h => natDegree_add_le_of_degree_le h qn⟩
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
qn : natDegree q ≤ n
h : natDegree (p + q) ≤ n
⊢ natDegree p ≤ n
[PROOFSTEP]
refine' natDegree_le_iff_coeff_eq_zero.mpr fun m hm => _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m✝ n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
qn : natDegree q ≤ n
h : natDegree (p + q) ≤ n
m : ℕ
hm : n < m
⊢ coeff p m = 0
[PROOFSTEP]
convert natDegree_le_iff_coeff_eq_zero.mp h m hm using 1
[GOAL]
case h.e'_2
R : Type u
S : Type v
ι : Type w
a b : R
m✝ n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
qn : natDegree q ≤ n
h : natDegree (p + q) ≤ n
m : ℕ
hm : n < m
⊢ coeff p m = coeff (p + q) m
[PROOFSTEP]
rw [coeff_add, natDegree_le_iff_coeff_eq_zero.mp qn _ hm, add_zero]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
pn : natDegree p ≤ n
⊢ natDegree (p + q) ≤ n ↔ natDegree q ≤ n
[PROOFSTEP]
rw [add_comm]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝ : Semiring R
p✝ q✝ r : R[X]
n : ℕ
p q : R[X]
pn : natDegree p ≤ n
⊢ natDegree (q + p) ≤ n ↔ natDegree q ≤ n
[PROOFSTEP]
exact natDegree_add_le_iff_left _ _ pn
[GOAL]
R : Type u
S : Type v
ι : Type w
a✝ b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
a : R
f : R[X]
⊢ natDegree (↑C a) + natDegree f = 0 + natDegree f
[PROOFSTEP]
rw [natDegree_C a]
[GOAL]
R : Type u
S : Type v
ι : Type w
a✝ b : R
m n : ℕ
inst✝ : Semiring R
p q r f : R[X]
a : R
⊢ natDegree f + natDegree (↑C a) = natDegree f + 0
[PROOFSTEP]
rw [natDegree_C a]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
ai : R
au : ai * a = 1
⊢ natDegree p = natDegree (1 * p)
[PROOFSTEP]
nth_rw 1 [← one_mul p]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
ai : R
au : ai * a = 1
⊢ natDegree (1 * p) = natDegree (↑C ai * (↑C a * p))
[PROOFSTEP]
rw [← C_1, ← au, RingHom.map_mul, ← mul_assoc]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
ai : R
au : a * ai = 1
⊢ natDegree p = natDegree (p * 1)
[PROOFSTEP]
nth_rw 1 [← mul_one p]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
ai : R
au : a * ai = 1
⊢ natDegree (p * 1) = natDegree (p * ↑C a * ↑C ai)
[PROOFSTEP]
rw [← C_1, ← au, RingHom.map_mul, ← mul_assoc]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : leadingCoeff p * a ≠ 0
⊢ natDegree (p * ↑C a) = natDegree p
[PROOFSTEP]
refine' eq_natDegree_of_le_mem_support (natDegree_mul_C_le p a) _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : leadingCoeff p * a ≠ 0
⊢ natDegree p ∈ support (p * ↑C a)
[PROOFSTEP]
refine' mem_support_iff.mpr _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : leadingCoeff p * a ≠ 0
⊢ coeff (p * ↑C a) (natDegree p) ≠ 0
[PROOFSTEP]
rwa [coeff_mul_C]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : a * leadingCoeff p ≠ 0
⊢ natDegree (↑C a * p) = natDegree p
[PROOFSTEP]
refine' eq_natDegree_of_le_mem_support (natDegree_C_mul_le a p) _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : a * leadingCoeff p ≠ 0
⊢ natDegree p ∈ support (↑C a * p)
[PROOFSTEP]
refine' mem_support_iff.mpr _
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
h : a * leadingCoeff p ≠ 0
⊢ coeff (↑C a * p) (natDegree p) ≠ 0
[PROOFSTEP]
rwa [coeff_C_mul]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r f g : R[X]
⊢ coeff (f * g) (natDegree f + natDegree g) = coeff f (natDegree f) * coeff g (natDegree g)
[PROOFSTEP]
simp only [coeff_natDegree, coeff_mul_degree_add_degree]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
qn : natDegree q ≤ n
⊢ coeff (p * q) (m + n) = coeff p m * coeff q n
[PROOFSTEP]
rcases eq_or_lt_of_le pm with (rfl | hm)
[GOAL]
case inl
R : Type u
S : Type v
ι : Type w
a b : R
n : ℕ
inst✝ : Semiring R
p q r : R[X]
qn : natDegree q ≤ n
pm : natDegree p ≤ natDegree p
⊢ coeff (p * q) (natDegree p + n) = coeff p (natDegree p) * coeff q n
[PROOFSTEP]
rcases eq_or_lt_of_le qn with (rfl | hn)
[GOAL]
case inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
qn : natDegree q ≤ n
hm : natDegree p < m
⊢ coeff (p * q) (m + n) = coeff p m * coeff q n
[PROOFSTEP]
rcases eq_or_lt_of_le qn with (rfl | hn)
[GOAL]
case inl.inl
R : Type u
S : Type v
ι : Type w
a b : R
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ natDegree p
qn : natDegree q ≤ natDegree q
⊢ coeff (p * q) (natDegree p + natDegree q) = coeff p (natDegree p) * coeff q (natDegree q)
[PROOFSTEP]
exact natDegree_add_coeff_mul _ _
[GOAL]
case inl.inr
R : Type u
S : Type v
ι : Type w
a b : R
n : ℕ
inst✝ : Semiring R
p q r : R[X]
qn : natDegree q ≤ n
pm : natDegree p ≤ natDegree p
hn : natDegree q < n
⊢ coeff (p * q) (natDegree p + n) = coeff p (natDegree p) * coeff q n
[PROOFSTEP]
rw [coeff_eq_zero_of_natDegree_lt hn, mul_zero]
[GOAL]
case inl.inr
R : Type u
S : Type v
ι : Type w
a b : R
n : ℕ
inst✝ : Semiring R
p q r : R[X]
qn : natDegree q ≤ n
pm : natDegree p ≤ natDegree p
hn : natDegree q < n
⊢ coeff (p * q) (natDegree p + n) = 0
[PROOFSTEP]
exact natDegree_lt_coeff_mul (add_lt_add_left hn _)
[GOAL]
case inr.inl
R : Type u
S : Type v
ι : Type w
a b : R
m : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
hm : natDegree p < m
qn : natDegree q ≤ natDegree q
⊢ coeff (p * q) (m + natDegree q) = coeff p m * coeff q (natDegree q)
[PROOFSTEP]
rw [coeff_eq_zero_of_natDegree_lt hm, zero_mul]
[GOAL]
case inr.inl
R : Type u
S : Type v
ι : Type w
a b : R
m : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
hm : natDegree p < m
qn : natDegree q ≤ natDegree q
⊢ coeff (p * q) (m + natDegree q) = 0
[PROOFSTEP]
exact natDegree_lt_coeff_mul (add_lt_add_right hm _)
[GOAL]
case inr.inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
qn : natDegree q ≤ n
hm : natDegree p < m
hn : natDegree q < n
⊢ coeff (p * q) (m + n) = coeff p m * coeff q n
[PROOFSTEP]
rw [coeff_eq_zero_of_natDegree_lt hn, mul_zero]
[GOAL]
case inr.inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pm : natDegree p ≤ m
qn : natDegree q ≤ n
hm : natDegree p < m
hn : natDegree q < n
⊢ coeff (p * q) (m + n) = 0
[PROOFSTEP]
exact natDegree_lt_coeff_mul (add_lt_add hm hn)
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
⊢ coeff (p ^ m) (m * n) = coeff p n ^ m
[PROOFSTEP]
induction' m with m hm
[GOAL]
case zero
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
⊢ coeff (p ^ Nat.zero) (Nat.zero * n) = coeff p n ^ Nat.zero
[PROOFSTEP]
simp
[GOAL]
case succ
R : Type u
S : Type v
ι : Type w
a b : R
m✝ n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
m : ℕ
hm : coeff (p ^ m) (m * n) = coeff p n ^ m
⊢ coeff (p ^ Nat.succ m) (Nat.succ m * n) = coeff p n ^ Nat.succ m
[PROOFSTEP]
rw [pow_succ', pow_succ', ← hm, Nat.succ_mul, coeff_mul_of_natDegree_le _ pn]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m✝ n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
m : ℕ
hm : coeff (p ^ m) (m * n) = coeff p n ^ m
⊢ natDegree (p ^ m) ≤ m * n
[PROOFSTEP]
refine' natDegree_pow_le.trans (le_trans _ (le_refl _))
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m✝ n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
m : ℕ
hm : coeff (p ^ m) (m * n) = coeff p n ^ m
⊢ m * natDegree p ≤ m * n
[PROOFSTEP]
exact mul_le_mul_of_nonneg_left pn m.zero_le
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
o : ℕ
pn : natDegree p ≤ n
mno : m * n ≤ o
⊢ coeff (p ^ m) o = if o = m * n then coeff p n ^ m else 0
[PROOFSTEP]
rcases eq_or_ne o (m * n) with rfl | h
[GOAL]
case inl
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p ≤ n
mno : m * n ≤ m * n
⊢ coeff (p ^ m) (m * n) = if m * n = m * n then coeff p n ^ m else 0
[PROOFSTEP]
simpa only [ite_true] using coeff_pow_of_natDegree_le pn
[GOAL]
case inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
o : ℕ
pn : natDegree p ≤ n
mno : m * n ≤ o
h : o ≠ m * n
⊢ coeff (p ^ m) o = if o = m * n then coeff p n ^ m else 0
[PROOFSTEP]
simpa only [h, ite_false] using
coeff_eq_zero_of_natDegree_lt $ lt_of_le_of_lt (natDegree_pow_le_of_le m pn) (lt_of_le_of_ne mno h.symm)
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p < n
⊢ coeff (p + q) n = coeff q n
[PROOFSTEP]
rw [add_comm]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
pn : natDegree p < n
⊢ coeff (q + p) n = coeff q n
[PROOFSTEP]
exact coeff_add_eq_left_of_lt pn
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f)
⊢ degree (Finset.sum s f) = sup s fun i => degree (f i)
[PROOFSTEP]
induction' s using Finset.induction_on with x s hx IH
[GOAL]
case empty
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h✝ : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f)
h : Set.Pairwise {i | i ∈ ∅ ∧ f i ≠ 0} (Ne on degree ∘ f)
⊢ degree (Finset.sum ∅ f) = sup ∅ fun i => degree (f i)
[PROOFSTEP]
simp
[GOAL]
case insert
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
IH : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f) → degree (Finset.sum s f) = sup s fun i => degree (f i)
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
⊢ degree (Finset.sum (insert x s) f) = sup (insert x s) fun i => degree (f i)
[PROOFSTEP]
simp only [hx, Finset.sum_insert, not_false_iff, Finset.sup_insert]
[GOAL]
case insert
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
IH : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f) → degree (Finset.sum s f) = sup s fun i => degree (f i)
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
specialize IH (h.mono fun _ => by simp (config := { contextual := true }))
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
IH : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f) → degree (Finset.sum s f) = sup s fun i => degree (f i)
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
x✝ : S
⊢ x✝ ∈ {i | i ∈ s ∧ f i ≠ 0} → x✝ ∈ {i | i ∈ insert x s ∧ f i ≠ 0}
[PROOFSTEP]
simp (config := { contextual := true })
[GOAL]
case insert
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
rcases lt_trichotomy (degree (f x)) (degree (s.sum f)) with (H | H | H)
[GOAL]
case insert.inl
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
H : degree (f x) < degree (Finset.sum s f)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
rw [← IH, sup_eq_right.mpr H.le, degree_add_eq_right_of_degree_lt H]
[GOAL]
case insert.inr.inl
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
H : degree (f x) = degree (Finset.sum s f)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
rcases s.eq_empty_or_nonempty with (rfl | hs)
[GOAL]
case insert.inr.inl.inl
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h✝ : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
hx : ¬x ∈ ∅
h : Set.Pairwise {i | i ∈ insert x ∅ ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum ∅ f) = sup ∅ fun i => degree (f i)
H : degree (f x) = degree (Finset.sum ∅ f)
⊢ degree (f x + Finset.sum ∅ fun x => f x) = degree (f x) ⊔ sup ∅ fun i => degree (f i)
[PROOFSTEP]
simp
[GOAL]
case insert.inr.inl.inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
H : degree (f x) = degree (Finset.sum s f)
hs : Finset.Nonempty s
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
obtain ⟨y, hy, hy'⟩ := Finset.exists_mem_eq_sup s hs fun i => degree (f i)
[GOAL]
case insert.inr.inl.inr.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
H : degree (f x) = degree (Finset.sum s f)
hs : Finset.Nonempty s
y : S
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
rw [IH, hy'] at H
[GOAL]
case insert.inr.inl.inr.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
by_cases hx0 : f x = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : f x = 0
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
simp [hx0, IH]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
have hy0 : f y ≠ 0 := by
contrapose! H
simpa [H, degree_eq_bot] using hx0
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
⊢ f y ≠ 0
[PROOFSTEP]
contrapose! H
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
H : f y = 0
⊢ degree (f x) ≠ degree (f y)
[PROOFSTEP]
simpa [H, degree_eq_bot] using hx0
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
hy0 : f y ≠ 0
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
refine' absurd H (h _ _ fun H => hx _)
[GOAL]
case neg.refine'_1
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
hy0 : f y ≠ 0
⊢ x ∈ {i | i ∈ insert x s ∧ f i ≠ 0}
[PROOFSTEP]
simp [hx0]
[GOAL]
case neg.refine'_2
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
hy0 : f y ≠ 0
⊢ y ∈ {i | i ∈ insert x s ∧ f i ≠ 0}
[PROOFSTEP]
simp [hy, hy0]
[GOAL]
case neg.refine'_3
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
hs : Finset.Nonempty s
y : S
H✝ : degree (f x) = degree (f y)
hy : y ∈ s
hy' : (sup s fun i => degree (f i)) = degree (f y)
hx0 : ¬f x = 0
hy0 : f y ≠ 0
H : x = y
⊢ x ∈ s
[PROOFSTEP]
exact H.symm ▸ hy
[GOAL]
case insert.inr.inr
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s✝ : Finset S
h✝ : Set.Pairwise {i | i ∈ s✝ ∧ f i ≠ 0} (Ne on degree ∘ f)
x : S
s : Finset S
hx : ¬x ∈ s
h : Set.Pairwise {i | i ∈ insert x s ∧ f i ≠ 0} (Ne on degree ∘ f)
IH : degree (Finset.sum s f) = sup s fun i => degree (f i)
H : degree (Finset.sum s f) < degree (f x)
⊢ degree (f x + Finset.sum s fun x => f x) = degree (f x) ⊔ sup s fun i => degree (f i)
[PROOFSTEP]
rw [← IH, sup_eq_left.mpr H.le, degree_add_eq_left_of_degree_lt H]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
by_cases H : ∃ x ∈ s, f x ≠ 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
H : ∃ x, x ∈ s ∧ f x ≠ 0
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
obtain ⟨x, hx, hx'⟩ := H
[GOAL]
case pos.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
have hs : s.Nonempty := ⟨x, hx⟩
[GOAL]
case pos.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
refine' natDegree_eq_of_degree_eq_some _
[GOAL]
case pos.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ degree (Finset.sum s f) = ↑(sup s fun i => natDegree (f i))
[PROOFSTEP]
rw [degree_sum_eq_of_disjoint]
[GOAL]
case pos.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ (sup s fun i => degree (f i)) = ↑(sup s fun i => natDegree (f i))
[PROOFSTEP]
rw [← Finset.sup'_eq_sup hs, ← Finset.sup'_eq_sup hs, Nat.cast_withBot, Finset.coe_sup' hs, ← Finset.sup'_eq_sup hs]
[GOAL]
case pos.intro.intro
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ (sup' s hs fun i => degree (f i)) = sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
refine' le_antisymm _ _
[GOAL]
case pos.intro.intro.refine'_1
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ (sup' s hs fun i => degree (f i)) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
rw [Finset.sup'_le_iff]
[GOAL]
case pos.intro.intro.refine'_1
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ ∀ (b : S), b ∈ s → degree (f b) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
intro b hb
[GOAL]
case pos.intro.intro.refine'_1
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
⊢ degree (f b) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
by_cases hb' : f b = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : f b = 0
⊢ degree (f b) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
simpa [hb'] using hs
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : ¬f b = 0
⊢ degree (f b) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
rw [degree_eq_natDegree hb', Nat.cast_withBot]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : ¬f b = 0
⊢ ↑(natDegree (f b)) ≤ sup' s hs (WithBot.some ∘ fun i => natDegree (f i))
[PROOFSTEP]
exact Finset.le_sup' (fun i : S => (natDegree (f i) : WithBot ℕ)) hb
[GOAL]
case pos.intro.intro.refine'_2
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ sup' s hs (WithBot.some ∘ fun i => natDegree (f i)) ≤ sup' s hs fun i => degree (f i)
[PROOFSTEP]
rw [Finset.sup'_le_iff]
[GOAL]
case pos.intro.intro.refine'_2
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ ∀ (b : S), b ∈ s → (WithBot.some ∘ fun i => natDegree (f i)) b ≤ sup' s hs fun i => degree (f i)
[PROOFSTEP]
intro b hb
[GOAL]
case pos.intro.intro.refine'_2
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
⊢ (WithBot.some ∘ fun i => natDegree (f i)) b ≤ sup' s hs fun i => degree (f i)
[PROOFSTEP]
simp only [Finset.le_sup'_iff, exists_prop, Function.comp_apply]
[GOAL]
case pos.intro.intro.refine'_2
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
⊢ ∃ b_1, b_1 ∈ s ∧ ↑(natDegree (f b)) ≤ degree (f b_1)
[PROOFSTEP]
by_cases hb' : f b = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : f b = 0
⊢ ∃ b_1, b_1 ∈ s ∧ ↑(natDegree (f b)) ≤ degree (f b_1)
[PROOFSTEP]
refine' ⟨x, hx, _⟩
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : f b = 0
⊢ ↑(natDegree (f b)) ≤ degree (f x)
[PROOFSTEP]
contrapose! hx'
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : f b = 0
hx' : degree (f x) < ↑(natDegree (f b))
⊢ f x = 0
[PROOFSTEP]
simpa [← Nat.cast_withBot, hb', degree_eq_bot] using hx'
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b✝ : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
b : S
hb : b ∈ s
hb' : ¬f b = 0
⊢ ∃ b_1, b_1 ∈ s ∧ ↑(natDegree (f b)) ≤ degree (f b_1)
[PROOFSTEP]
exact ⟨b, hb, (degree_eq_natDegree hb').ge⟩
[GOAL]
case pos.intro.intro.h
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
x : S
hx : x ∈ s
hx' : f x ≠ 0
hs : Finset.Nonempty s
⊢ Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on degree ∘ f)
[PROOFSTEP]
exact h.imp fun x y hxy hxy' => hxy (natDegree_eq_of_degree_eq hxy')
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
H : ¬∃ x, x ∈ s ∧ f x ≠ 0
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
push_neg at H
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
H : ∀ (x : S), x ∈ s → f x = 0
⊢ natDegree (Finset.sum s f) = sup s fun i => natDegree (f i)
[PROOFSTEP]
rw [Finset.sum_eq_zero H, natDegree_zero, eq_comm, show 0 = ⊥ from rfl, Finset.sup_eq_bot_iff]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
H : ∀ (x : S), x ∈ s → f x = 0
⊢ ∀ (s_1 : S), s_1 ∈ s → natDegree (f s_1) = ⊥
[PROOFSTEP]
intro x hx
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Semiring R
p q r : R[X]
f : S → R[X]
s : Finset S
h : Set.Pairwise {i | i ∈ s ∧ f i ≠ 0} (Ne on natDegree ∘ f)
H : ∀ (x : S), x ∈ s → f x = 0
x : S
hx : x ∈ s
⊢ natDegree (f x) = ⊥
[PROOFSTEP]
simp [H x hx]
[GOAL]
R : Type u
S : Type v
ι : Type w
a✝ b : R
m n : ℕ
inst✝ : Semiring R
p q r a : R[X]
⊢ max (natDegree (bit0 a)) (natDegree 1) ≤ natDegree a
[PROOFSTEP]
simp [natDegree_bit0]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
hp : p ≠ 0
f : R →+* S
z : S
hz : eval₂ f z p = 0
inj : ∀ (x : R), ↑f x = 0 → x = 0
hlt : 0 ≥ natDegree p
⊢ False
[PROOFSTEP]
have A : p = C (p.coeff 0) := eq_C_of_natDegree_le_zero hlt
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
hp : p ≠ 0
f : R →+* S
z : S
hz : eval₂ f z p = 0
inj : ∀ (x : R), ↑f x = 0 → x = 0
hlt : 0 ≥ natDegree p
A : p = ↑C (coeff p 0)
⊢ False
[PROOFSTEP]
rw [A, eval₂_C] at hz
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
hp : p ≠ 0
f : R →+* S
z : S
hz : ↑f (coeff p 0) = 0
inj : ∀ (x : R), ↑f x = 0 → x = 0
hlt : 0 ≥ natDegree p
A : p = ↑C (coeff p 0)
⊢ False
[PROOFSTEP]
simp only [inj (p.coeff 0) hz, RingHom.map_zero] at A
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
hp : p ≠ 0
f : R →+* S
z : S
hz : ↑f (coeff p 0) = 0
inj : ∀ (x : R), ↑f x = 0 → x = 0
hlt : 0 ≥ natDegree p
A : p = 0
⊢ False
[PROOFSTEP]
exact hp A
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
n : ℕ
⊢ ↑n < degree p ↔ n < natDegree p
[PROOFSTEP]
by_cases h : p = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
n : ℕ
h : p = 0
⊢ ↑n < degree p ↔ n < natDegree p
[PROOFSTEP]
simp [h]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n✝ : ℕ
inst✝¹ : Semiring R
p✝ q r : R[X]
inst✝ : Semiring S
p : R[X]
n : ℕ
h : ¬p = 0
⊢ ↑n < degree p ↔ n < natDegree p
[PROOFSTEP]
simp [degree_eq_natDegree h, Nat.cast_withBot, WithBot.coe_lt_coe]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
⊢ natDegree (p - q) = natDegree (q - p)
[PROOFSTEP]
rw [← natDegree_neg, neg_sub]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
qn : natDegree q ≤ n
⊢ natDegree (p - q) ≤ n ↔ natDegree p ≤ n
[PROOFSTEP]
rw [← natDegree_neg] at qn
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
qn : natDegree (-q) ≤ n
⊢ natDegree (p - q) ≤ n ↔ natDegree p ≤ n
[PROOFSTEP]
rw [sub_eq_add_neg, natDegree_add_le_iff_left _ _ qn]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
pn : natDegree p ≤ n
⊢ natDegree (p - q) ≤ n ↔ natDegree q ≤ n
[PROOFSTEP]
rwa [natDegree_sub, natDegree_sub_le_iff_left]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
dg : natDegree q < n
⊢ coeff (p - q) n = coeff p n
[PROOFSTEP]
rw [← natDegree_neg] at dg
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
dg : natDegree (-q) < n
⊢ coeff (p - q) n = coeff p n
[PROOFSTEP]
rw [sub_eq_add_neg, coeff_add_eq_left_of_lt dg]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝ : Ring R
p q : R[X]
df : natDegree p < n
⊢ coeff (p - q) n = -coeff q n
[PROOFSTEP]
rwa [sub_eq_add_neg, coeff_add_eq_right_of_lt, coeff_neg]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
a0 : a ≠ 0
⊢ degree (p * ↑C a) = degree p
[PROOFSTEP]
rw [degree_mul, degree_C a0, add_zero]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
a0 : a ≠ 0
⊢ degree (↑C a * p) = degree p
[PROOFSTEP]
rw [degree_mul, degree_C a0, zero_add]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
a0 : a ≠ 0
⊢ natDegree (p * ↑C a) = natDegree p
[PROOFSTEP]
simp only [natDegree, degree_mul_C a0]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
a0 : a ≠ 0
⊢ natDegree (↑C a * p) = natDegree p
[PROOFSTEP]
simp only [natDegree, degree_C_mul a0]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
⊢ natDegree (comp p q) = natDegree p * natDegree q
[PROOFSTEP]
by_cases q0 : q.natDegree = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
q0 : natDegree q = 0
⊢ natDegree (comp p q) = natDegree p * natDegree q
[PROOFSTEP]
rw [degree_le_zero_iff.mp (natDegree_eq_zero_iff_degree_le_zero.mp q0), comp_C, natDegree_C, natDegree_C, mul_zero]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
q0 : ¬natDegree q = 0
⊢ natDegree (comp p q) = natDegree p * natDegree q
[PROOFSTEP]
by_cases p0 : p = 0
[GOAL]
case pos
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
q0 : ¬natDegree q = 0
p0 : p = 0
⊢ natDegree (comp p q) = natDegree p * natDegree q
[PROOFSTEP]
simp only [p0, zero_comp, natDegree_zero, zero_mul]
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
q0 : ¬natDegree q = 0
p0 : ¬p = 0
⊢ natDegree (comp p q) = natDegree p * natDegree q
[PROOFSTEP]
refine' le_antisymm natDegree_comp_le (le_natDegree_of_ne_zero _)
[GOAL]
case neg
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
q0 : ¬natDegree q = 0
p0 : ¬p = 0
⊢ coeff (comp p q) (natDegree p * natDegree q) ≠ 0
[PROOFSTEP]
simp only [coeff_comp_degree_mul_degree q0, p0, mul_eq_zero, leadingCoeff_eq_zero, or_self_iff,
ne_zero_of_natDegree_gt (Nat.pos_of_ne_zero q0), pow_ne_zero, Ne.def, not_false_iff]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
k : ℕ
⊢ natDegree ((comp p)^[k] q) = natDegree p ^ k * natDegree q
[PROOFSTEP]
induction' k with k IH
[GOAL]
case zero
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
⊢ natDegree ((comp p)^[Nat.zero] q) = natDegree p ^ Nat.zero * natDegree q
[PROOFSTEP]
simp
[GOAL]
case succ
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
k : ℕ
IH : natDegree ((comp p)^[k] q) = natDegree p ^ k * natDegree q
⊢ natDegree ((comp p)^[Nat.succ k] q) = natDegree p ^ Nat.succ k * natDegree q
[PROOFSTEP]
rw [Function.iterate_succ_apply', natDegree_comp, IH, pow_succ, mul_assoc]
[GOAL]
R : Type u
S : Type v
ι : Type w
a b : R
m n : ℕ
inst✝¹ : Semiring R
inst✝ : NoZeroDivisors R
p q : R[X]
hq : natDegree q ≠ 0
⊢ leadingCoeff (comp p q) = leadingCoeff p * leadingCoeff q ^ natDegree p
[PROOFSTEP]
rw [← coeff_comp_degree_mul_degree hq, ← natDegree_comp, coeff_natDegree]
|
{-# OPTIONS --safe #-}
module Cubical.Categories.Limits.Initial where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism renaming (Iso to _≅_)
open import Cubical.HITs.PropositionalTruncation.Base
open import Cubical.Data.Sigma
open import Cubical.Categories.Category
open import Cubical.Categories.Functor
open import Cubical.Categories.Adjoint
private
variable
ℓ ℓ' : Level
ℓC ℓC' ℓD ℓD' : Level
module _ (C : Category ℓ ℓ') where
open Category C
isInitial : (x : ob) → Type (ℓ-max ℓ ℓ')
isInitial x = ∀ (y : ob) → isContr (C [ x , y ])
Initial : Type (ℓ-max ℓ ℓ')
Initial = Σ[ x ∈ ob ] isInitial x
initialOb : Initial → ob
initialOb = fst
initialArrow : (T : Initial) (y : ob) → C [ initialOb T , y ]
initialArrow T y = T .snd y .fst
initialArrowUnique : {T : Initial} {y : ob} (f : C [ initialOb T , y ])
→ initialArrow T y ≡ f
initialArrowUnique {T} {y} f = T .snd y .snd f
initialEndoIsId : (T : Initial) (f : C [ initialOb T , initialOb T ])
→ f ≡ id
initialEndoIsId T f = isContr→isProp (T .snd (initialOb T)) f id
hasInitial : Type (ℓ-max ℓ ℓ')
hasInitial = ∥ Initial ∥₁
-- Initiality of an object is a proposition.
isPropIsInitial : (x : ob) → isProp (isInitial x)
isPropIsInitial _ = isPropΠ λ _ → isPropIsContr
open CatIso
-- Objects that are initial are isomorphic.
initialToIso : (x y : Initial) → CatIso C (initialOb x) (initialOb y)
mor (initialToIso x y) = initialArrow x (initialOb y)
inv (initialToIso x y) = initialArrow y (initialOb x)
sec (initialToIso x y) = initialEndoIsId y _
ret (initialToIso x y) = initialEndoIsId x _
open isUnivalent
-- The type of initial objects of a univalent category is a proposition,
-- i.e. all initial objects are equal.
isPropInitial : (hC : isUnivalent C) → isProp Initial
isPropInitial hC x y =
Σ≡Prop isPropIsInitial (CatIsoToPath hC (initialToIso x y))
module _ {C : Category ℓC ℓC'} {D : Category ℓD ℓD'} (F : Functor C D) where
open Category
open Functor
open NaturalBijection
open _⊣_
open _≅_
preservesInitial : Type (ℓ-max (ℓ-max ℓC ℓC') (ℓ-max ℓD ℓD'))
preservesInitial = ∀ (x : ob C) → isInitial C x → isInitial D (F-ob F x)
isLeftAdjoint→preservesInitial : isLeftAdjoint F → preservesInitial
fst (isLeftAdjoint→preservesInitial (G , F⊣G) x initX y) = _♯ F⊣G (fst (initX (F-ob G y)))
snd (isLeftAdjoint→preservesInitial (G , F⊣G) x initX y) ψ =
_♯ F⊣G (fst (initX (F-ob G y)))
≡⟨ cong (F⊣G ♯) (snd (initX (F-ob G y)) (_♭ F⊣G ψ)) ⟩
_♯ F⊣G (_♭ F⊣G ψ)
≡⟨ leftInv (adjIso F⊣G) ψ ⟩
ψ ∎
|
Formal statement is: lemma (in t3_space) nhds_closed: assumes "x \<in> A" and "open A" shows "\<exists>A'. x \<in> A' \<and> closed A' \<and> A' \<subseteq> A \<and> eventually (\<lambda>y. y \<in> A') (nhds x)" Informal statement is: If $A$ is an open set containing $x$, then there is a closed set $A'$ containing $x$ and contained in $A$ such that $A'$ is in the filter of neighbourhoods of $x$. |
open import Data.Nat using (ℕ; zero; suc; _+_; _∸_; _≥_; _≤_; z≤n; s≤s)
open import Function.Equivalence using (_⇔_)
open import Relation.Binary.PropositionalEquality using (→-to-⟶)
postulate
adjoint : ∀ {x y z} → x + y ≥ z ⇔ x ≥ z ∸ y
unit : ∀ {x y} → x ≥ (x + y) ∸ y
apply : ∀ {x y} → (x ∸ y) + y ≥ x
|
@testset "Thinning" begin
# for □ like figure
img = falses(20,20)
for i=7:7+4, j=7:7+4
img[i,j] = img[j,i] = true
end
thin = thinning(img)
@test count(thin .== 1) == 1
@test thin[9,9] == 1
# for + like figure
img = falses(20,20)
for i=8:8+5, j=4:13+4
img[i,j] = true
img[j,i] = true
end
ans = falses(size(img))
ans[[111,131,151,171,191,192,193,194,195,211,231,251,271]] .= 1
@test thinning(img) == ans
img = Bool.([1 1 1 1 1 1
0 0 0 0 0 0
1 1 1 1 1 1
1 1 1 1 1 1
0 0 1 1 0 0
0 0 0 0 0 0])
ans = Bool.([1 1 1 1 1 1
0 0 0 0 0 0
0 0 0 0 0 0
1 1 1 1 1 0
0 0 0 0 0 0
0 0 0 0 0 0])
@test thinning(img) == ans
# already thinned
img = Bool.([0 0 0 0 0 0
0 0 0 1 1 0
0 0 1 0 0 0
0 0 1 0 0 0
0 0 0 1 0 0
0 0 0 0 0 0
0 0 0 0 0 0])
@test thinning(img) == img
end
|
! { dg-do compile }
! PR fortran/65173
program p
type t
character(1), allocatable :: n(256) ! { dg-error "must have a deferred shape" }
end type
end
|
#include <mbgl/map/map.hpp>
#include <mbgl/util/image.hpp>
#include <mbgl/util/run_loop.hpp>
#include <mbgl/gl/headless_frontend.hpp>
#include <mbgl/util/default_thread_pool.hpp>
#include <mbgl/storage/default_file_source.hpp>
#include <mbgl/style/style.hpp>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
#pragma GCC diagnostic ignored "-Wshadow"
#include <boost/program_options.hpp>
#pragma GCC diagnostic pop
namespace po = boost::program_options;
#include <cstdlib>
#include <iostream>
#include <fstream>
int main(int argc, char *argv[]) {
std::string style_path;
double lat = 0, lon = 0;
double zoom = 0;
double bearing = 0;
double pitch = 0;
double pixelRatio = 1;
uint32_t width = 512;
uint32_t height = 512;
static std::string output = "out.png";
std::string cache_file = "cache.sqlite";
std::string asset_root = ".";
std::string token;
bool debug = false;
po::options_description desc("Allowed options");
desc.add_options()
("style,s", po::value(&style_path)->required()->value_name("json"), "Map stylesheet")
("lon,x", po::value(&lon)->value_name("degrees")->default_value(lon), "Longitude")
("lat,y", po::value(&lat)->value_name("degrees")->default_value(lat), "Latitude in degrees")
("zoom,z", po::value(&zoom)->value_name("number")->default_value(zoom), "Zoom level")
("bearing,b", po::value(&bearing)->value_name("degrees")->default_value(bearing), "Bearing")
("pitch,p", po::value(&pitch)->value_name("degrees")->default_value(pitch), "Pitch")
("width,w", po::value(&width)->value_name("pixels")->default_value(width), "Image width")
("height,h", po::value(&height)->value_name("pixels")->default_value(height), "Image height")
("ratio,r", po::value(&pixelRatio)->value_name("number")->default_value(pixelRatio), "Image scale factor")
("token,t", po::value(&token)->value_name("key")->default_value(token), "Mapbox access token")
("debug", po::bool_switch(&debug)->default_value(debug), "Debug mode")
("output,o", po::value(&output)->value_name("file")->default_value(output), "Output file name")
("cache,d", po::value(&cache_file)->value_name("file")->default_value(cache_file), "Cache database file name")
("assets,d", po::value(&asset_root)->value_name("file")->default_value(asset_root), "Directory to which asset:// URLs will resolve")
;
try {
po::variables_map vm;
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
} catch(std::exception& e) {
std::cout << "Error: " << e.what() << std::endl << desc;
exit(1);
}
using namespace mbgl;
util::RunLoop loop;
DefaultFileSource fileSource(cache_file, asset_root);
// Try to load the token from the environment.
if (!token.size()) {
const char *token_ptr = getenv("MAPBOX_ACCESS_TOKEN");
if (token_ptr) {
token = token_ptr;
}
}
// Set access token if present
if (token.size()) {
fileSource.setAccessToken(std::string(token));
}
ThreadPool threadPool(4);
HeadlessFrontend frontend({ width, height }, pixelRatio, fileSource, threadPool);
Map map(frontend, MapObserver::nullObserver(), frontend.getSize(), pixelRatio, fileSource, threadPool, MapMode::Still);
if (style_path.find("://") == std::string::npos) {
style_path = std::string("file://") + style_path;
}
map.getStyle().loadURL(style_path);
map.setLatLngZoom({ lat, lon }, zoom);
map.setBearing(bearing);
map.setPitch(pitch);
if (debug) {
map.setDebug(debug ? mbgl::MapDebugOptions::TileBorders | mbgl::MapDebugOptions::ParseStatus : mbgl::MapDebugOptions::NoDebug);
}
try {
std::ofstream out(output, std::ios::binary);
out << encodePNG(frontend.render(map));
out.close();
} catch(std::exception& e) {
std::cout << "Error: " << e.what() << std::endl;
exit(1);
}
return 0;
}
|
(*<*)
(*
* Knowledge-based programs.
* (C)opyright 2011, Peter Gammie, peteg42 at gmail.com.
* License: BSD
*)
theory ClockView
imports
KBPsAlg
Eval
List_local
ODList
Trie2
"Transitive-Closure.Transitive_Closure_List_Impl"
"HOL-Library.Mapping"
begin
(*>*)
subsection\<open>The Clock View\<close>
text\<open>
\label{sec:kbps-theory-clock-view}
The \emph{clock view} records the current time and the observation for
the most recent state:
\<close>
definition (in Environment)
clock_jview :: "('a, 's, nat \<times> 'obs) JointView"
where
"clock_jview \<equiv> \<lambda>a t. (tLength t, envObs a (tLast t))"
(*<*)
context Environment
begin
lemma clock_jview_tInit:
"clock_jview a (tInit s) = (0, envObs a s)"
unfolding clock_jview_def by simp
lemma clock_jview_tStep:
"clock_jview a (t \<leadsto> s) = (Suc (tLength t), envObs a s)"
unfolding clock_jview_def by simp
lemma clock_jview_tStepI[intro]:
"\<lbrakk> tLength t = Suc n; envObs a (tLast t) = obs \<rbrakk>
\<Longrightarrow> clock_jview a t = (Suc n, obs)"
unfolding clock_jview_def by (cases t) simp_all
lemma clock_jview_inv:
"clock_jview a t = (n, obs) \<Longrightarrow> envObs a (tLast t) = obs"
unfolding clock_jview_def by (cases t) simp_all
lemmas clock_jview_simps =
clock_jview_tInit
clock_jview_tStep
clock_jview_inv
lemma clock_jview_eq_inv[iff]:
"clock_jview a t' = clock_jview a t
\<longleftrightarrow> tLength t' = tLength t \<and> envObs a (tLast t') = envObs a (tLast t)"
by (fastforce simp: clock_jview_def)
end(*>*)
text\<open>
This is the least-information synchronous view, given the requirements
of \S\ref{sec:kbps-views}. We show that finite-state implementations
exist for all environments with respect to this view as per
\<^citet>\<open>"Ron:1996"\<close>.
The corresponding incremental view simply increments the counter
records the new observation.
\<close>
definition (in Environment)
clock_jviewInit :: "'a \<Rightarrow> 'obs \<Rightarrow> nat \<times> 'obs"
where
"clock_jviewInit \<equiv> \<lambda>a obs. (0, obs)"
definition (in Environment)
clock_jviewIncr :: "'a \<Rightarrow> 'obs \<Rightarrow> nat \<times> 'obs \<Rightarrow> nat \<times> 'obs"
where
"clock_jviewIncr \<equiv> \<lambda>a obs' (l, obs). (l + 1, obs')"
text\<open>
It is straightforward to demonstrate the assumptions of the
incremental environment locale (\S\ref{sec:kbps-environments}) with
respect to an arbitrary environment.
\<close>
sublocale Environment
< Clock: IncrEnvironment jkbp envInit envAction envTrans envVal
clock_jview envObs clock_jviewInit clock_jviewIncr
(*<*)
apply (unfold_locales)
apply (simp_all add: clock_jviewInit_def clock_jviewIncr_def clock_jview_def)
done
(*>*)
text\<open>
As we later show, satisfaction of a formula at a trace \<open>t \<in>
Clock.jkbpC\<^bsub>n\<^esub>\<close> is determined by the set of final states of traces in
\<open>Clock.jkbpCn\<close>:
\<close>
context Environment
begin
abbreviation clock_commonAbs :: "'s Trace \<Rightarrow> 's set" where
"clock_commonAbs t \<equiv> tLast ` Clock.jkbpCn (tLength t)"
text\<open>
Intuitively this set contains the states that the agents commonly
consider possible at time @{term "n"}, which is sufficient for
determining knowledge as the clock view ignores paths. Therefore we
can simulate trace @{term "t"} by pairing this abstraction of @{term
"t"} with its final state:
\<close>
type_synonym (in -) 's clock_simWorlds = "'s set \<times> 's"
definition clock_sim :: "'s Trace \<Rightarrow> 's clock_simWorlds" where
"clock_sim \<equiv> \<lambda>t. (clock_commonAbs t, tLast t)"
text\<open>
In the Kripke structure for our simulation, we relate worlds for
@{term "a"} if the sets of commonly-held states coincide, and the
observation of the final states of the traces is the
same. Propositions are evaluated at the final state.
\<close>
definition clock_simRels :: "'a \<Rightarrow> 's clock_simWorlds Relation" where
"clock_simRels \<equiv> \<lambda>a. { ((X, s), (X', s')) |X X' s s'.
X = X' \<and> {s, s'} \<subseteq> X \<and> envObs a s = envObs a s' }"
definition clock_simVal :: "'s clock_simWorlds \<Rightarrow> 'p \<Rightarrow> bool" where
"clock_simVal \<equiv> envVal \<circ> snd"
abbreviation clock_simMC :: "('a, 'p, 's clock_simWorlds) KripkeStructure" where
"clock_simMC \<equiv> mkKripke (clock_sim ` Clock.jkbpC) clock_simRels clock_simVal"
(*<*)
lemma clock_simVal_def2[iff]: "clock_simVal (clock_sim t) = envVal (tLast t)"
unfolding clock_sim_def clock_simVal_def by simp
lemma clock_sim_range:
"sim_range Clock.MC clock_simMC clock_sim"
by (rule sim_rangeI) (simp_all add: clock_sim_def)
lemma clock_simVal:
"sim_val Clock.MC clock_simMC clock_sim"
by (rule sim_valI) (simp add: clock_simVal_def clock_sim_def)
lemma clock_sim_f:
"sim_f Clock.MC clock_simMC clock_sim"
apply (rule sim_fI)
apply (simp add: clock_simRels_def clock_sim_def)
apply (intro conjI)
apply (fastforce intro!: imageI)
apply (fastforce intro!: imageI)
apply (fastforce dest: Clock.mkM_simps(2))
apply (rule_tac x=v in image_eqI)
apply simp_all
done
lemma clock_sim_r:
"sim_r Clock.MC clock_simMC clock_sim"
apply (rule sim_rI)
apply (clarsimp simp: clock_simRels_def clock_sim_def cong del: image_cong_simp)
apply (rule_tac x=xa in exI)
unfolding Clock.mkM_def
apply auto
done
(*>*)
text\<open>
That this is in fact a simulation
(\S\ref{sec:kripke-theory-simulations}) is entirely straightforward.
\<close>
lemma clock_sim:
"sim Clock.MC clock_simMC clock_sim"
(*<*)
using clock_sim_range clock_simVal clock_sim_f clock_sim_r
unfolding sim_def
by blast
(*>*)
end (* context Environment *)
text\<open>
The \<open>SimIncrEnvironment\<close> of
\S\ref{sec:kbps-theory-automata-env-sims} only requires that we
provide it an @{term "Environment"} and a simulation.
\<close>
sublocale Environment
< Clock: SimIncrEnvironment jkbp envInit envAction envTrans envVal
clock_jview envObs clock_jviewInit clock_jviewIncr
clock_sim clock_simRels clock_simVal
(*<*)
by (unfold_locales, simp_all add: clock_sim)
(*>*)
text\<open>
We next consider algorithmic issues.
\<close>
(* **************************************** *)
subsubsection\<open>Representations\<close>
text\<open>
\label{sec:kbps-theory-clock-view-rep}
We now turn to the issue of how to represent equivalence classes of
states. As these are used as map keys, it is easiest to represent them
canonically. A simple approach is to use \emph{ordered distinct lists}
of type @{typ "'a odlist"} for the sets and \emph{tries} for the
maps. Therefore we ask that environment states @{typ "'s"} belong to
the class \<open>linorder\<close> of linearly-ordered types, and moreover
that the set \<open>agents\<close> be effectively presented. We introduce a
new locale capturing these requirements:
\<close>
locale FiniteLinorderEnvironment =
Environment jkbp envInit envAction envTrans envVal envObs
for jkbp :: "('a::{finite, linorder}, 'p, 'aAct) JKBP"
and envInit :: "('s::{finite, linorder}) list"
and envAction :: "'s \<Rightarrow> 'eAct list"
and envTrans :: "'eAct \<Rightarrow> ('a \<Rightarrow> 'aAct) \<Rightarrow> 's \<Rightarrow> 's"
and envVal :: "'s \<Rightarrow> 'p \<Rightarrow> bool"
and envObs :: "'a \<Rightarrow> 's \<Rightarrow> 'obs"
+ fixes agents :: "'a odlist"
assumes agents: "ODList.toSet agents = UNIV"
context FiniteLinorderEnvironment
begin
text\<open>
\label{sec:kbps-theory-clock-view-algops}
For a fixed agent @{term "a"}, we can reduce the number of worlds in
@{term "clock_simMC"} by taking its quotient with respect to the
equivalence relation for @{term "a"}. In other words, we represent a
simulated equivalence class by a pair of the set of all states
reachable at a particular time, and the subset of these that @{term
"a"} considers possible. The worlds in our representational Kripke
structure are therefore a pair of ordered, distinct lists:
\<close>
type_synonym (in -) 's clock_simWorldsRep = "'s odlist \<times> 's odlist"
text\<open>
We can readily abstract a representation to a set of simulated
equivalence classes:
\<close>
definition (in -)
clock_simAbs :: "'s::linorder clock_simWorldsRep \<Rightarrow> 's clock_simWorlds set"
where
"clock_simAbs X \<equiv> { (ODList.toSet (fst X), s) |s. s \<in> ODList.toSet (snd X) }"
text\<open>
Assuming @{term "X"} represents a simulated equivalence class for
@{term "t \<in> jkbpC"}, @{term "clock_simAbs X"} decomposes into these
two functions:
\<close>
definition
agent_abs :: "'a \<Rightarrow> 's Trace \<Rightarrow> 's set"
where
"agent_abs a t \<equiv>
{ tLast t' |t'. t' \<in> Clock.jkbpC \<and> clock_jview a t' = clock_jview a t}"
definition
common_abs :: "'s Trace \<Rightarrow> 's set"
where
"common_abs t \<equiv> tLast ` Clock.jkbpCn (tLength t)"
(*<*)
lemma aec_refl[intro, simp]:
"t \<in> Clock.jkbpC \<Longrightarrow> tLast t \<in> agent_abs a t"
unfolding agent_abs_def by auto
lemma aec_cec_subset:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
shows "x \<in> ODList.toSet aec \<Longrightarrow> x \<in> ODList.toSet cec"
using assms
unfolding agent_abs_def common_abs_def
by fastforce
lemma clock_simAbs_refl:
assumes tC: "t \<in> Clock.jkbpC"
and ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "clock_sim t \<in> clock_simAbs ec"
using assms by simp
lemma common_abs:
assumes tC: "t \<in> Clock.jkbpC"
assumes ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "ODList.toSet (fst ec) = common_abs t"
using tC clock_simAbs_refl[OF tC ec]
unfolding clock_sim_def clock_simAbs_def common_abs_def
by (auto simp: ODList.toSet_def[symmetric])
lemma agent_abs:
assumes tC: "t \<in> Clock.jkbpC"
assumes ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "ODList.toSet (snd ec) = agent_abs a t"
using assms
unfolding clock_sim_def clock_simAbs_def agent_abs_def
apply auto
apply (subgoal_tac "(ODList.toSet (fst ec), x) \<in> {(ODList.toSet (fst ec), s) |s. s \<in> ODList.toSet (snd ec)}")
apply auto (* FIXME filthy *)
done
(*>*)
text\<open>
This representation is canonical on the domain of interest (though not
in general):
\<close>
lemma clock_simAbs_inj_on:
"inj_on clock_simAbs { x . clock_simAbs x \<in> Clock.jkbpSEC }"
(*<*)
proof(rule inj_onI)
fix x y
assume x: "x \<in> { x . clock_simAbs x \<in> Clock.jkbpSEC }"
and y: "y \<in> { x . clock_simAbs x \<in> Clock.jkbpSEC }"
and xy: "clock_simAbs x = clock_simAbs y"
from x obtain a t
where tC: "t \<in> Clock.jkbpC"
and ec: "clock_simAbs x = Clock.sim_equiv_class a t"
by auto
from common_abs[OF tC ec] common_abs[OF tC trans[OF xy[symmetric] ec], symmetric]
have "fst x = fst y" by (blast intro: injD[OF toSet_inj])
moreover
from agent_abs[OF tC ec] agent_abs[OF tC trans[OF xy[symmetric] ec], symmetric]
have "snd x = snd y" by (blast intro: injD[OF toSet_inj])
ultimately show "x = y" by (simp add: prod_eqI)
qed
(*>*)
text\<open>
We could further compress this representation by labelling each
element of the set of states reachable at time $n$ with a bit to
indicate whether the agent considers that state possible. Note,
however, that the representation would be non-canonical: if \<open>(s, True)\<close> is in the representation, indicating that the agent
considers \<open>s\<close> possible, then \<open>(s, False)\<close> may or may
not be. The associated abstraction function is not injective and hence
would obfuscate the following. Repairing this would entail introducing
a new type, which would again complicate this development.
The following lemmas make use of this Kripke structure, constructed
from the set of final states of a temporal slice @{term "X"}:
\<close>
definition
clock_repRels :: "'a \<Rightarrow> ('s \<times> 's) set"
where
"clock_repRels \<equiv> \<lambda>a. { (s, s'). envObs a s = envObs a s' }"
abbreviation
clock_repMC :: "'s set \<Rightarrow> ('a, 'p, 's) KripkeStructure"
where
"clock_repMC \<equiv> \<lambda>X. mkKripke X clock_repRels envVal"
(*<*)
lemma clock_repMC_kripke[intro, simp]: "kripke (clock_repMC X)"
by (rule kripkeI) simp
lemma clock_repMC_S5n[intro, simp]: "S5n (clock_repMC X)"
unfolding clock_repRels_def
by (intro S5nI equivI refl_onI symI transI) auto
(*>*)
text\<open>
We can show that this Kripke structure retains sufficient information
from @{term "Clock.MCS"} by showing simulation. This is eased by
introducing an intermediary structure that focusses on a particular
trace:
\<close>
abbreviation
clock_jkbpCSt :: "'b Trace \<Rightarrow> 's clock_simWorlds set"
where
"clock_jkbpCSt t \<equiv> clock_sim ` Clock.jkbpCn (tLength t)"
abbreviation
clock_simMCt :: "'b Trace \<Rightarrow> ('a, 'p, 's clock_simWorlds) KripkeStructure"
where
"clock_simMCt t \<equiv> mkKripke (clock_jkbpCSt t) clock_simRels clock_simVal"
definition clock_repSim :: "'s clock_simWorlds \<Rightarrow> 's" where
"clock_repSim \<equiv> snd"
(*<*)
lemma jkbpCSt_jkbpCS_subset:
"clock_jkbpCSt t \<subseteq> clock_sim ` Clock.jkbpC"
by auto
lemma jkbpCSt_refl[iff]:
"t \<in> Clock.jkbpC \<Longrightarrow> clock_sim t \<in> clock_jkbpCSt t"
by blast
lemma fst_clock_sim[iff]:
"t \<in> Clock.jkbpC \<Longrightarrow> fst (clock_sim t) = tLast ` Clock.jkbpCn (tLength t)"
by (simp add: clock_sim_def)
lemma clock_repSim_simps[simp]:
"clock_repSim ` clock_sim ` T = tLast ` T"
"clock_repSim (clock_sim t) = tLast t"
unfolding clock_repSim_def clock_sim_def
by (auto intro!: image_eqI)
(*>*)
text\<open>\<close>
lemma clock_repSim:
assumes tC: "t \<in> Clock.jkbpC"
shows "sim (clock_simMCt t)
((clock_repMC \<circ> fst) (clock_sim t))
clock_repSim"
(*<*) (is "sim ?M ?M' ?f")
proof
show "sim_range ?M ?M' ?f"
proof
show "worlds ?M' = ?f ` worlds ?M"
unfolding clock_sim_def clock_repSim_def by force
next
fix a
show "relations ?M' a \<subseteq> worlds ?M' \<times> worlds ?M'"
by (simp add: clock_sim_def clock_repSim_def)
qed
next
show "sim_val ?M ?M' ?f"
by (rule, simp add: clock_sim_def clock_simVal_def clock_repSim_def split: prod.split)
next
show "sim_f ?M ?M' ?f"
apply rule
unfolding clock_repRels_def clock_repSim_def clock_simRels_def
apply (auto iff: clock_sim_def)
done
next
show "sim_r ?M ?M' ?f"
apply rule
unfolding clock_repRels_def clock_repSim_def clock_simRels_def clock_sim_def
apply clarsimp
done
qed
(*>*)
text\<open>
The following sections show how we satisfy the remaining requirements
of the \<open>Algorithm\<close> locale of
Figure~\ref{fig:kbps-alg-alg-locale}. Where the proof is routine, we
simply present the lemma without proof or comment.
Due to a limitation in the code generator in the present version of
Isabelle (2011), we need to define the equations we wish to execute
outside of a locale; the syntax \<open>(in -)\<close> achieves this by
making definitons at the theory top-level. We then define (but elide)
locale-local abbreviations that supply the locale-bound variables to
these definitions.
\<close>
(* **************************************** *)
subsubsection\<open>Initial states\<close>
text\<open>
The initial states of the automaton for an agent is simply @{term
"envInit"} paired with the partition of @{term "envInit"} under the
agent's observation.
\<close>
definition (in -)
clock_simInit :: "('s::linorder) list \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> 'obs)
\<Rightarrow> 'a \<Rightarrow> 'obs \<Rightarrow> 's clock_simWorldsRep"
where
"clock_simInit envInit envObs \<equiv> \<lambda>a iobs.
let cec = ODList.fromList envInit
in (cec, ODList.filter (\<lambda>s. envObs a s = iobs) cec)"
(*<*)
abbreviation
clock_simInit :: "'a \<Rightarrow> 'obs \<Rightarrow> 's clock_simWorldsRep"
where
"clock_simInit \<equiv> ClockView.clock_simInit envInit envObs"
(*>*)
text\<open>\<close>
lemma clock_simInit:
assumes "iobs \<in> envObs a ` set envInit"
shows "clock_simAbs (clock_simInit a iobs)
= clock_sim ` { t' \<in> Clock.jkbpC.
clock_jview a t' = clock_jviewInit a iobs }"
(*<*)
using assms
unfolding clock_simInit_def clock_simAbs_def clock_sim_def [abs_def] Let_def
apply clarsimp
apply rule
apply clarsimp
apply (rule_tac x="tInit s" in image_eqI)
apply (auto simp: Set.image_def Clock.jviewInit)[2]
apply clarsimp
apply (case_tac xa)
apply clarsimp
apply rule
apply rule
apply clarsimp
apply clarsimp
apply (rule_tac x="tInit xa" in image_eqI)
apply (auto intro!: image_eqI simp: Clock.jviewInit)
done
(*>*)
(* **************************************** *)
subsubsection\<open>Simulated observations\<close>
text\<open>
Agent @{term "a"} will make the same observation at any of the worlds
that it considers possible, so we choose the first one in the list:
\<close>
definition (in -)
clock_simObs :: "('a \<Rightarrow> ('s :: linorder) \<Rightarrow> 'obs)
\<Rightarrow> 'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 'obs"
where
"clock_simObs envObs \<equiv> \<lambda>a. envObs a \<circ> ODList.hd \<circ> snd"
(*<*)
abbreviation
clock_simObs :: "'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 'obs"
where
"clock_simObs \<equiv> ClockView.clock_simObs envObs"
(*>*)
text\<open>\<close>
lemma clock_simObs:
assumes tC: "t \<in> Clock.jkbpC"
and ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "clock_simObs a ec = envObs a (tLast t)"
(*<*)
proof -
have A: "\<forall>s \<in> set (toList (snd ec)). envObs a s = envObs a (tLast t)"
using agent_abs[OF tC ec]
by (clarsimp simp: agent_abs_def toSet_def)
have B: "tLast t \<in> set (toList (snd ec))"
using clock_simAbs_refl[OF assms]
unfolding clock_simAbs_def clock_sim_def
by (simp add: toSet_def snd_def)
show ?thesis
unfolding clock_simObs_def by (simp add: list_choose_hd[OF A B] ODList.hd_def)
qed
(*>*)
(* **************************************** *)
subsubsection\<open>Evaluation\<close>
text\<open>
\label{sec:kbps-theory-clock-view-eval}
We define our \<open>eval\<close> function in terms of @{term "evalS"},
which implements boolean logic over @{typ "'s odlist"} in the usual
way -- see \S\ref{sec:kbps-spr-single-agent-eval} for the relevant
clauses. It requires three functions specific to the representation:
one each for propositions, knowledge and common knowledge.
Propositions define subsets of the worlds considered possible:
\<close>
abbreviation (in -)
clock_evalProp :: "(('s::linorder) \<Rightarrow> 'p \<Rightarrow> bool)
\<Rightarrow> 's odlist \<Rightarrow> 'p \<Rightarrow> 's odlist"
where
"clock_evalProp envVal \<equiv> \<lambda>X p. ODList.filter (\<lambda>s. envVal s p) X"
text\<open>
The knowledge relation computes the subset of the
commonly-held-possible worlds \<open>cec\<close> that agent @{term "a"}
considers possible at world @{term "s"}:
\<close>
definition (in -)
clock_knowledge :: "('a \<Rightarrow> ('s :: linorder) \<Rightarrow> 'obs) \<Rightarrow> 's odlist
\<Rightarrow> 'a \<Rightarrow> 's \<Rightarrow> 's odlist"
where
"clock_knowledge envObs cec \<equiv> \<lambda>a s.
ODList.filter (\<lambda>s'. envObs a s = envObs a s') cec"
text\<open>
Similarly the common knowledge operation computes the transitive
closure of the union of the knowledge relations for the agents \<open>as\<close>:
\<close>
definition (in -)
clock_commonKnowledge :: "('a \<Rightarrow> ('s :: linorder) \<Rightarrow> 'obs) \<Rightarrow> 's odlist
\<Rightarrow> 'a list \<Rightarrow> 's \<Rightarrow> 's odlist"
where
"clock_commonKnowledge envObs cec \<equiv> \<lambda>as s.
let r = \<lambda>a. ODList.fromList [ (s', s'') . s' \<leftarrow> toList cec, s'' \<leftarrow> toList cec,
envObs a s' = envObs a s'' ];
R = toList (ODList.big_union r as)
in ODList.fromList (memo_list_trancl R s)"
text\<open>
The function \<open>memo_list_trancl\<close> comes from the executable
transitive closure theory of \<^citep>\<open>"AFP:TRANCL"\<close>.
The evaluation function evaluates a subjective knowledge formula on
the representation of an equivalence class:
\<close>
definition (in -)
eval :: "(('s :: linorder) \<Rightarrow> 'p \<Rightarrow> bool)
\<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> 'obs)
\<Rightarrow> 's clock_simWorldsRep \<Rightarrow> ('a, 'p) Kform \<Rightarrow> bool"
where
"eval envVal envObs \<equiv> \<lambda>(cec, aec). evalS (clock_evalProp envVal)
(clock_knowledge envObs cec)
(clock_commonKnowledge envObs cec)
aec"
text\<open>
This function corresponds with the standard semantics:
\<close>
(*<*)
lemma clock_coEC_relation_image:
"s \<in> ODList.toSet Y
\<Longrightarrow> ODList.toSet (clock_knowledge envObs Y a s) = relations (clock_repMC (ODList.toSet Y)) a `` {s}"
unfolding clock_knowledge_def clock_repRels_def Image_def
by auto
lemma clock_commonKnowledge_relation_image_aux:
"(\<Union>x\<in>set as. \<Union>a\<in>ODList.toSet Y. \<Union>aa\<in>ODList.toSet Y \<inter> {s''. envObs x a = envObs x s''}. {(a, aa)})
= ((\<Union>a\<in>set as. {(s, s'). envObs a s = envObs a s'}) \<inter> ODList.toSet Y \<times> ODList.toSet Y)"
by auto
lemma clock_commonKnowledge_relation_image:
"s \<in> ODList.toSet Y
\<Longrightarrow> ODList.toSet (clock_commonKnowledge envObs Y as s) = (\<Union>a \<in> set as. relations (clock_repMC (ODList.toSet Y)) a)\<^sup>+ `` {s}"
unfolding clock_commonKnowledge_def clock_repRels_def Let_def
apply (simp add: memo_list_trancl toSet_def[symmetric] Image_def clock_commonKnowledge_relation_image_aux)
done
lemma eval_rec_models:
assumes XY: "ODList.toSet X \<subseteq> ODList.toSet Y"
and s: "s \<in> ODList.toSet X"
shows "s \<in> ODList.toSet (eval_rec (clock_evalProp envVal) (clock_knowledge envObs Y) (clock_commonKnowledge envObs Y) X \<phi>)
\<longleftrightarrow> clock_repMC (ODList.toSet Y), s \<Turnstile> \<phi>"
using XY s
proof(induct \<phi> arbitrary: X s)
case (Kknows a' \<phi> X s)
from \<open>s \<in> ODList.toSet X\<close> clock_coEC_relation_image[OF subsetD[OF Kknows(2) Kknows(3)], where a=a']
show ?case
apply simp
apply rule
apply (drule arg_cong[where f="ODList.toSet"])
apply (clarsimp simp: odlist_all_iff)
apply (cut_tac s3="w'" and X3="clock_knowledge envObs Y a' s" in Kknows.hyps)
using Kknows(2) Kknows(3)
apply (auto simp add: S5n_rels_closed[OF clock_repMC_S5n])[3]
apply (clarsimp simp: toSet_eq_iff odlist_all_iff)
apply (subst Kknows.hyps)
using Kknows(2) Kknows(3)
apply (auto simp add: S5n_rels_closed[OF clock_repMC_S5n])
done
next
case (Kcknows as \<phi> X s)
show ?case
proof(cases "as = Nil")
case True with \<open>s \<in> ODList.toSet X\<close> show ?thesis by clarsimp
next
case False
with \<open>s \<in> ODList.toSet X\<close> clock_commonKnowledge_relation_image[OF subsetD[OF Kcknows(2) Kcknows(3)], where as=as]
show ?thesis
apply simp
apply rule
apply (drule arg_cong[where f="ODList.toSet"])
apply (clarsimp simp: odlist_all_iff)
apply (cut_tac s3="w'" and X3="clock_commonKnowledge envObs Y as s" in Kcknows.hyps)
using Kcknows(2) Kcknows(3)
apply (auto simp add: S5n_rels_closed[OF clock_repMC_S5n])[3]
apply (subst (asm) trancl_unfold) back back back
apply auto[1] (* FIXME disgusting *)
apply (clarsimp simp: toSet_eq_iff odlist_all_iff)
apply (subst Kcknows.hyps)
using Kcknows(2) Kcknows(3)
apply (auto simp add: S5n_rels_closed[OF clock_repMC_S5n])
apply (subst (asm) trancl_unfold) back back back
apply auto[1] (* FIXME disgusting *)
done
qed
qed simp_all
lemma trc_aux:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
shows "ODList.toSet (big_union (clock_commonKnowledge envObs cec as) (toList aec)) \<subseteq> ODList.toSet cec"
apply (clarsimp simp: toSet_def[symmetric])
apply (subst (asm) clock_commonKnowledge_relation_image)
apply (erule aec_cec_subset[OF tC aec cec])
apply (subst (asm) trancl_unfold)
using assms
apply (auto simp: agent_abs_def)
done
lemma clock_repMC_aec:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
and x: "x \<in> ODList.toSet aec"
and xy: "(x, y) \<in> relations (clock_repMC (ODList.toSet cec)) a"
shows "y \<in> ODList.toSet aec"
using assms
unfolding clock_repRels_def agent_abs_def common_abs_def
by auto
lemma clock_repMC_cec:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
and x: "x \<in> ODList.toSet aec"
and y: "y \<in> ODList.toSet aec"
shows "(x, y) \<in> relations (clock_repMC (ODList.toSet cec)) a"
using assms
unfolding clock_repRels_def agent_abs_def common_abs_def
by auto
lemma evalS_models:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
and subj_phi: "subjective a \<phi>"
and s: "s \<in> ODList.toSet aec"
shows "evalS (clock_evalProp envVal) (clock_knowledge envObs cec) (clock_commonKnowledge envObs cec) aec \<phi>
\<longleftrightarrow> clock_repMC (ODList.toSet cec), s \<Turnstile> \<phi>" (is "?lhs \<phi> = ?rhs \<phi>")
using subj_phi s aec cec
proof(induct \<phi> rule: subjective.induct[case_names Kprop Knot Kand Kknows Kcknows])
case (Kknows a a' \<psi>) show ?case
apply (clarsimp simp: toSet_eq_iff)
apply rule
apply clarsimp
apply (subgoal_tac "w' \<in> ODList.toSet aec")
apply (drule_tac c="w'" in subsetD)
apply assumption
apply (simp add: eval_rec_models[OF subsetI[OF aec_cec_subset[OF tC aec cec]]])
apply (rule clock_repMC_aec[OF tC Kknows(3) Kknows(4), rotated, where x=s])
using Kknows
apply simp
using Kknows
apply simp
apply clarsimp
apply (simp add: eval_rec_models[OF subsetI[OF aec_cec_subset[OF tC aec cec]]])
using tC Kknows
apply (clarsimp simp: agent_abs_def)
apply (erule (1) ballE)
using Kknows
apply (cut_tac x="tLast t'" and y="tLast t'a" in clock_repMC_cec[OF tC Kknows(3) Kknows(4)])
unfolding clock_repRels_def
apply auto
done
next
case (Kcknows a as \<psi>)
have "?lhs (Kcknows as \<psi>)
= (\<forall>y\<in>ODList.toSet aec.
\<forall>x\<in>(\<Union>a\<in>set as. relations (clock_repMC (ODList.toSet cec)) a)\<^sup>+ `` {y}.
x \<in> ODList.toSet (eval_rec (clock_evalProp envVal) (clock_knowledge envObs cec) (clock_commonKnowledge envObs cec)
(big_union (clock_commonKnowledge envObs cec as) (toList aec)) \<psi>))"
(* FIXME dreaming of a cong rule here. *)
using toSet_def[symmetric]
apply (clarsimp simp: toSet_eq_iff toSet_def[symmetric] subset_eq)
apply (rule ball_cong[OF refl])
apply (rule ball_cong)
apply (subst clock_commonKnowledge_relation_image[OF aec_cec_subset[OF tC Kcknows(3) Kcknows(4)]])
apply simp_all
done
also have "... = (\<forall>s\<in>ODList.toSet aec. clock_repMC (ODList.toSet cec), s \<Turnstile> Kcknows as \<psi>)"
apply (rule ball_cong[OF refl])
apply simp
apply (rule ball_cong[OF refl])
apply (subst eval_rec_models[OF trc_aux[OF tC Kcknows(3) Kcknows(4), where as=as], symmetric])
apply (simp add: toSet_def[symmetric])
apply (rule_tac x=y in bexI)
apply (subst clock_commonKnowledge_relation_image[OF aec_cec_subset[OF tC Kcknows(3) Kcknows(4)]])
apply simp_all
done
also have "... = clock_repMC (ODList.toSet cec), s \<Turnstile> Kknows a (Kcknows as \<psi>)"
using clock_repMC_aec[OF tC Kcknows(3) Kcknows(4) Kcknows(2)]
clock_repMC_cec[OF tC Kcknows(3) Kcknows(4) Kcknows(2)]
by (auto cong: ball_cong)
also have "... = clock_repMC (ODList.toSet cec), s \<Turnstile> Kcknows as \<psi>"
apply (rule S5n_common_knowledge_fixed_point_simpler[symmetric])
using Kcknows
apply (auto intro: aec_cec_subset[OF tC Kcknows(3) Kcknows(4) Kcknows(2)])
done
finally show ?case .
qed simp_all
(*>*)
lemma eval_models:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
and subj_phi: "subjective a \<phi>"
and s: "s \<in> ODList.toSet aec"
shows "eval envVal envObs (cec, aec) \<phi>
\<longleftrightarrow> clock_repMC (ODList.toSet cec), s \<Turnstile> \<phi>"
(*<*)
unfolding eval_def
using evalS_models[OF tC aec cec subj_phi s]
apply (simp add: Let_def)
done
(*>*)
(* **************************************** *)
subsubsection\<open>Simulated actions\<close>
text\<open>
From a common equivalence class and a subjective equivalence class for
agent @{term "a"}, we can compute the actions enabled for @{term "a"}:
\<close>
definition (in -)
clock_simAction :: "('a, 'p, 'aAct) JKBP \<Rightarrow> (('s :: linorder) \<Rightarrow> 'p \<Rightarrow> bool)
\<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> 'obs)
\<Rightarrow> 'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 'aAct list"
where
"clock_simAction jkbp envVal envObs \<equiv> \<lambda>a (Y, X).
[ action gc. gc \<leftarrow> jkbp a, eval envVal envObs (Y, X) (guard gc) ]"
(*<*)
abbreviation
clock_simAction :: "'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 'aAct list"
where
"clock_simAction \<equiv> ClockView.clock_simAction jkbp envVal envObs"
(*>*)
text\<open>
Using the above result about evaluation, we can relate \<open>clock_simAction\<close> to @{term "jAction"}. Firstly, \<open>clock_simAction\<close> behaves the same as @{term "jAction"} using the
@{term "clock_repMC"} structure:
\<close>
lemma clock_simAction_jAction:
assumes tC: "t \<in> Clock.jkbpC"
and aec: "ODList.toSet aec = agent_abs a t"
and cec: "ODList.toSet cec = common_abs t"
shows "set (clock_simAction a (cec, aec))
= set (jAction (clock_repMC (ODList.toSet cec)) (tLast t) a)"
(*<*)
unfolding clock_simAction_def jAction_def
apply clarsimp
apply rule
apply clarsimp
apply (rule_tac x=xa in bexI)
apply simp
apply clarsimp
apply (subst eval_models[OF tC aec cec, symmetric])
using tC aec cec subj
apply simp_all
apply clarsimp
apply (rule_tac x=xa in bexI)
apply (rule refl)
apply clarsimp
apply (subst eval_models[OF tC aec cec])
using tC aec cec subj
apply simp_all
done
lemma clock_submodel_aux:
assumes tC: "t \<in> Clock.jkbpC"
and s: "s \<in> worlds (clock_simMCt t)"
shows "gen_model Clock.MCS s = gen_model (clock_simMCt t) s"
proof(rule gen_model_subset[where T="clock_jkbpCSt t"])
fix a
let ?X = "clock_sim ` Clock.jkbpCn (tLength t)"
show "relations Clock.MCS a \<inter> ?X \<times> ?X
= relations (clock_simMCt t) a \<inter> ?X \<times> ?X"
by (simp add: Int_ac Int_absorb1
relation_mono[OF jkbpCSt_jkbpCS_subset jkbpCSt_jkbpCS_subset])
next
let ?X = "clock_sim ` Clock.jkbpCn (tLength t)"
from s show "(\<Union>a. relations (clock_simMCt t) a)\<^sup>* `` {s} \<subseteq> ?X"
apply (clarsimp simp del: mkKripke_simps)
apply (erule kripke_rels_trc_worlds)
apply auto
done
next
let ?Y = "Clock.jkbpCn (tLength t)"
let ?X = "clock_sim ` ?Y"
from s obtain t'
where st': "s = clock_sim t'"
and t'C: "t' \<in> Clock.jkbpC"
and t'O: "tLength t = tLength t'"
by fastforce
{ fix t''
assume tt': "(t', t'') \<in> (\<Union>a. relations Clock.MC a)\<^sup>*"
from t'C tt' have t''C: "t'' \<in> Clock.jkbpC"
by - (erule kripke_rels_trc_worlds, simp_all)
from t'O tt' have t''O: "tLength t = tLength t''"
by (simp add: Clock.sync_tLength_eq_trc)
from t''C t''O have "t'' \<in> ?Y" by fastforce }
hence "(\<Union>a. relations Clock.MC a)\<^sup>* `` {t'} \<subseteq> ?Y"
by clarsimp
hence "clock_sim ` ((\<Union>a. relations Clock.MC a)\<^sup>* `` {t'}) \<subseteq> ?X"
by (rule image_mono)
with st' t'C
show "(\<Union>a. relations Clock.MCS a)\<^sup>* `` {s} \<subseteq> ?X"
using sim_trc_commute[OF Clock.mkM_kripke clock_sim, where t=t'] by simp
qed (insert s, auto)
(*>*)
text\<open>
We can connect the agent's choice of actions on the \<open>clock_repMC\<close> structure to those on the \<open>Clock.MC\<close>
structure using our earlier results about actions being preserved by
generated models and simulations.
\<close>
lemma clock_simAction':
assumes tC: "t \<in> Clock.jkbpC"
assumes aec: "ODList.toSet aec = agent_abs a t"
assumes cec: "ODList.toSet cec = common_abs t"
shows "set (clock_simAction a (cec, aec)) = set (jAction Clock.MC t a)"
(*<*) (is "?lhs = ?rhs")
proof -
from tC aec cec
have "?lhs = set (jAction (clock_repMC (ODList.toSet cec)) (tLast t) a)"
by (rule clock_simAction_jAction)
also from tC aec cec
have "... = set (jAction (clock_simMCt t) (clock_sim t) a)"
by (simp add: simulation_jAction_eq[OF _ clock_repSim] common_abs_def)
also from tC
have "... = set (jAction Clock.MCS (clock_sim t) a)"
using gen_model_jAction_eq[OF clock_submodel_aux[OF tC, where s="clock_sim t"], where w'="clock_sim t"]
gen_model_world_refl[where w="clock_sim t" and M="clock_simMCt t"]
by simp
also from tC have "... = set (jAction Clock.MC t a)"
by (simp add: simulation_jAction_eq[OF _ clock_sim])
finally show ?thesis .
qed
(*>*)
text\<open>
The @{term "Algorithm"} locale requires a specialisation of this
lemma:
\<close>
lemma clock_simAction:
assumes tC: "t \<in> Clock.jkbpC"
assumes ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "set (clock_simAction a ec) = set (jAction Clock.MC t a)"
(*<*)
using assms clock_simAction'[OF tC, where cec="fst ec" and aec="snd ec"]
apply (simp add: common_abs agent_abs)
done
(*>*)
(* **************************************** *)
subsubsection\<open>Simulated transitions\<close>
text\<open>
We need to determine the image of the set of commonly-held-possible
states under the transition function, and also for the agent's
subjective equivalence class. We do this with the \<open>clock_trans\<close> function:
\<close>
definition (in -)
clock_trans :: "('a :: linorder) odlist \<Rightarrow> ('a, 'p, 'aAct) JKBP
\<Rightarrow> (('s :: linorder) \<Rightarrow> 'eAct list)
\<Rightarrow> ('eAct \<Rightarrow> ('a \<Rightarrow> 'aAct) \<Rightarrow> 's \<Rightarrow> 's)
\<Rightarrow> ('s \<Rightarrow> 'p \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> 'obs)
\<Rightarrow> 's odlist \<Rightarrow> 's odlist \<Rightarrow> 's odlist"
where
"clock_trans agents jkbp envAction envTrans envVal envObs \<equiv> \<lambda>cec X.
ODList.fromList (concat
[ [ envTrans eact aact s .
eact \<leftarrow> envAction s,
aact \<leftarrow> listToFuns (\<lambda>a. clock_simAction jkbp envVal envObs a
(cec, clock_knowledge envObs cec a s))
(toList agents) ] .
s \<leftarrow> toList X ])"
(*<*)
abbreviation
clock_trans :: "'s odlist \<Rightarrow> 's odlist \<Rightarrow> 's odlist"
where
"clock_trans \<equiv> ClockView.clock_trans agents jkbp envAction envTrans envVal envObs"
lemma clock_trans_aux:
assumes t'C: "t' \<in> Clock.jkbpC"
and ec: "clock_simAbs ec = Clock.sim_equiv_class a' t'"
and tC: "t \<in> Clock.jkbpCn (tLength t')"
and eact: "eact \<in> set (envAction (tLast t))"
shows "(aact \<in> set (listToFuns (\<lambda>a. clock_simAction a (fst ec, clock_knowledge envObs (fst ec) a (tLast t)))
(toList agents)))
\<longleftrightarrow> (\<forall>a. aact a \<in> set (jAction (Clock.MCn (tLength t')) t a))"
using assms
apply -
apply (frule Clock.jkbpCn_jkbpC_inc)
apply (clarsimp simp: listToFuns_ext[OF agents[unfolded toSet_def]])
apply (subst clock_simAction')
apply (erule Clock.jkbpCn_jkbpC_inc)
apply (subst clock_coEC_relation_image)
apply (simp add: common_abs common_abs_def toSet_def[symmetric])
apply (fastforce simp: common_abs agent_abs_def common_abs_def clock_repRels_def)
apply (simp add: common_abs common_abs_def)
apply (simp add: Clock.jkbpC_jkbpCn_jAction_eq)
done
(*>*)
text\<open>
The function @{term "listToFuns"} exhibits the isomorphism between @{typ
"('a \<times> 'b list) list"} and @{typ "('a \<Rightarrow> 'b) list"} for finite types
@{typ "'a"}.
We can show that the transition function works for both the
commonly-held set of states and the agent subjective one. The proofs
are straightforward.
\<close>
lemma clock_trans_common:
assumes tC: "t \<in> Clock.jkbpC"
assumes ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "ODList.toSet (clock_trans (fst ec) (fst ec))
= { s |t' s. t' \<leadsto> s \<in> Clock.jkbpC \<and> tLength t' = tLength t }"
(*<*) (is "?lhs = ?rhs")
proof
show "?lhs \<subseteq> ?rhs"
unfolding clock_trans_def
apply (clarsimp simp: toSet_def[symmetric] common_abs[OF assms] common_abs_def)
apply (rule_tac x=xa in exI)
apply clarsimp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def iff: clock_trans_aux[OF tC ec])
done
next
show "?rhs \<subseteq> ?lhs"
unfolding clock_trans_def
apply (clarsimp simp: toSet_def[symmetric] common_abs[OF assms] common_abs_def)
apply (drule Clock.jkbpC_tLength_inv[where n="Suc (tLength t)"])
apply (auto simp: Let_def iff: clock_trans_aux[OF tC ec])
done
qed
(*>*)
text\<open>\<close>
lemma clock_trans_agent:
assumes tC: "t \<in> Clock.jkbpC"
assumes ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "ODList.toSet (clock_trans (fst ec) (snd ec))
= { s |t' s. t' \<leadsto> s \<in> Clock.jkbpC \<and> clock_jview a t' = clock_jview a t }"
(*<*) (is "?lhs = ?rhs")
proof
show "?lhs \<subseteq> ?rhs"
unfolding clock_trans_def
apply (clarsimp simp: toSet_def[symmetric] common_abs[OF assms] agent_abs[OF assms] common_abs_def agent_abs_def)
apply (rule_tac x=t' in exI)
apply clarsimp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def iff: clock_trans_aux[OF tC ec])
done
next
show "?rhs \<subseteq> ?lhs"
unfolding clock_trans_def
apply (clarsimp simp: toSet_def[symmetric] common_abs[OF assms] agent_abs[OF assms] common_abs_def agent_abs_def)
apply (drule Clock.jkbpC_tLength_inv[where n="Suc (tLength t)"])
apply (auto simp: Let_def iff: clock_trans_aux[OF tC ec])
done
qed
(*>*)
text\<open>
Note that the clock semantics disregards paths, so we simply compute
the successors of the temporal slice and partition that. Similarly the
successors of the agent's subjective equivalence class tell us what
the set of possible observations are:
\<close>
definition (in -)
clock_mkSuccs :: "('s :: linorder \<Rightarrow> 'obs) \<Rightarrow> 'obs \<Rightarrow> 's odlist
\<Rightarrow> 's clock_simWorldsRep"
where
"clock_mkSuccs envObs obs Y' \<equiv> (Y', ODList.filter (\<lambda>s. envObs s = obs) Y')"
text\<open>
Finally we can define our transition function on simulated states:
\<close>
definition (in -)
clock_simTrans :: "('a :: linorder) odlist \<Rightarrow> ('a, 'p, 'aAct) JKBP
\<Rightarrow> (('s :: linorder) \<Rightarrow> 'eAct list)
\<Rightarrow> ('eAct \<Rightarrow> ('a \<Rightarrow> 'aAct) \<Rightarrow> 's \<Rightarrow> 's)
\<Rightarrow> ('s \<Rightarrow> 'p \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> 'obs)
\<Rightarrow> 'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 's clock_simWorldsRep list"
where
"clock_simTrans agents jkbp envAction envTrans envVal envObs \<equiv> \<lambda>a (Y, X).
let X' = clock_trans agents jkbp envAction envTrans envVal envObs Y X;
Y' = clock_trans agents jkbp envAction envTrans envVal envObs Y Y
in [ clock_mkSuccs (envObs a) obs Y' .
obs \<leftarrow> map (envObs a) (toList X') ]"
(*<*)
abbreviation
clock_simTrans :: "'a \<Rightarrow> 's clock_simWorldsRep \<Rightarrow> 's clock_simWorldsRep list"
where
"clock_simTrans \<equiv> ClockView.clock_simTrans agents jkbp envAction envTrans envVal envObs"
(*>*)
text\<open>
Showing that this respects the property asked of it by the @{term
"Algorithm"} locale is straightforward:
\<close>
lemma clock_simTrans:
assumes tC: "t \<in> Clock.jkbpC"
and ec: "clock_simAbs ec = Clock.sim_equiv_class a t"
shows "clock_simAbs ` set (clock_simTrans a ec)
= { Clock.sim_equiv_class a (t' \<leadsto> s)
|t' s. t' \<leadsto> s \<in> Clock.jkbpC \<and> clock_jview a t' = clock_jview a t }"
(*<*) (is "?lhs = ?rhs")
proof
note image_cong_simp [cong del]
show "?lhs \<subseteq> ?rhs"
unfolding clock_simTrans_def clock_mkSuccs_def
using clock_trans_common[OF tC ec] clock_trans_agent[OF tC ec]
apply (clarsimp simp: toSet_def[symmetric] clock_simAbs_def Let_def)
apply (rule_tac x=t' in exI)
apply (rule_tac x=xa in exI)
apply (clarsimp simp: clock_sim_def)
apply safe
apply clarsimp
apply (rule_tac x="t'a \<leadsto> s" in image_eqI)
apply (clarsimp simp: Let_def Set.image_def)
apply safe
apply (rule_tac x="t'b \<leadsto> x" in exI)
apply (clarsimp simp: Let_def Set.image_def)
apply (drule_tac t="t'b \<leadsto> x" in Clock.jkbpC_tLength_inv[OF _ refl])
apply (auto simp: Let_def)[1]
apply (rule_tac x="ta" in exI)
apply simp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def)[3]
apply (rule_tac x="tLast ta" in exI)
apply (clarsimp simp: Let_def Set.image_def)
apply safe
apply (rule_tac x="taa" in exI)
apply simp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def)[1]
apply (drule_tac t="t'a \<leadsto> x" in Clock.jkbpC_tLength_inv[OF _ refl])
apply (rule_tac x="t'a \<leadsto> x" in exI)
apply (auto simp: Let_def)[1]
apply (drule_tac t="ta" in Clock.jkbpC_tLength_inv)
apply blast
apply (clarsimp simp: Let_def)
apply (rule_tac x="ta" in exI)
apply simp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def)
done
next
show "?rhs \<subseteq> ?lhs"
unfolding clock_simTrans_def Let_def
apply (cases ec)
using clock_trans_common[OF tC ec] clock_trans_agent[OF tC ec]
apply (clarsimp simp: toSet_def[symmetric] Set.image_def clock_simAbs_def
simp del: split_paired_Ex)
apply (rule_tac x="clock_mkSuccs (envObs a) (envObs a s) (clock_trans aa aa)" in exI)
apply safe
apply auto[1]
apply (rule_tac x="tLast x" in exI)
apply (clarsimp simp: clock_trans_common[OF tC ec] clock_mkSuccs_def)
apply safe
apply (clarsimp simp: clock_sim_def simp del: Clock.jkbpCn.simps)
apply rule
apply (clarsimp simp: Let_def)
apply (rule_tac x="ta" in exI)
apply (simp add: Let_def)
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (clarsimp simp: Let_def)
apply (rule_tac x=eact in exI)
apply (rule_tac x=aact in exI)
apply clarsimp
apply (clarsimp simp: Let_def Set.image_def)
apply (drule_tac t="t'a \<leadsto> xa" in Clock.jkbpC_tLength_inv[OF _ refl])
apply (rule_tac x="t'a \<leadsto> xa" in exI)
apply (auto simp: Let_def)[1]
apply (drule_tac t="x" in Clock.jkbpC_tLength_inv[OF _ refl])
apply (simp only: Let_def Clock.jkbpCn.simps)
apply clarify
apply (rule_tac x="ta" in exI)
apply simp
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def)[1]
apply (clarsimp simp: clock_trans_common[OF tC ec] clock_mkSuccs_def)
apply (rule_tac x="t'a \<leadsto> sa" in exI)
apply (clarsimp simp: clock_sim_def Let_def)
(* FIXME similar to above *)
apply rule
apply (clarsimp simp: Set.image_def)
apply (rule_tac x="t'b \<leadsto> x" in exI)
apply (drule_tac t="t'b \<leadsto> x" in Clock.jkbpC_tLength_inv[OF _ refl])
apply (auto simp: Let_def)[1]
apply clarsimp
apply (rule_tac x="ta" in exI)
apply auto
apply (rule Clock.jkbpCn_jkbpC_inc[where n="Suc (tLength t)"])
apply (auto simp: Let_def)
done
qed
(*>*)
end (* context FiniteLinorderEnvironment *)
(* **************************************** *)
subsubsection\<open>Maps\<close>
text\<open>
\label{sec:kbps-theory-clock-view-maps}
As mentioned above, the canonicity of our ordered, distinct list
representation of automaton states allows us to use them as keys in a
digital trie; a value of type @{typ "('key, 'val) trie"} maps keys of
type @{typ "'key list"} to values of type @{typ "'val"}.
In this specific case we track automaton transitions using a two-level
structure mapping sets of states to an association list mapping
observations to sets of states, and for actions automaton states map
directly to agent actions.
\<close>
type_synonym ('s, 'obs) clock_trans_trie
= "('s, ('s, ('obs, 's clock_simWorldsRep) mapping) trie) trie"
type_synonym ('s, 'aAct) clock_acts_trie = "('s, ('s, 'aAct) trie) trie"
(*<*)
definition
trans_MapOps_lookup :: "('s :: linorder, 'obs) clock_trans_trie
\<Rightarrow> 's clock_simWorldsRep \<times> 'obs
\<rightharpoonup> 's clock_simWorldsRep"
where
"trans_MapOps_lookup \<equiv> \<lambda>m k.
Option.bind (trie_odlist_lookup m (fst (fst k))) (\<lambda>m.
(Option.bind (trie_odlist_lookup m (snd (fst k))) (\<lambda>m.
Mapping.lookup m (snd k))))"
definition
trans_MapOps_update :: "('s :: linorder) clock_simWorldsRep \<times> 'obs \<Rightarrow> 's clock_simWorldsRep
\<Rightarrow> ('s :: linorder, 'obs) clock_trans_trie
\<Rightarrow> ('s :: linorder, 'obs) clock_trans_trie"
where
"trans_MapOps_update \<equiv> \<lambda>k v m.
trie_odlist_update_with (fst (fst k)) m empty_trie (\<lambda>m.
trie_odlist_update_with (snd (fst k)) m Mapping.empty (\<lambda>m.
Mapping.update (snd k) v m))"
definition
trans_MapOps :: "(('s :: linorder, 'obs) clock_trans_trie,
's clock_simWorldsRep \<times> 'obs, 's clock_simWorldsRep) MapOps"
where
"trans_MapOps \<equiv>
\<lparr> MapOps.empty = empty_trie,
lookup = trans_MapOps_lookup,
update = trans_MapOps_update \<rparr>"
lemma (in FiniteLinorderEnvironment) trans_MapOps:
"MapOps (\<lambda>k. (clock_simAbs (fst k), snd k)) (Clock.jkbpSEC \<times> UNIV) trans_MapOps"
proof
fix k show "MapOps.lookup trans_MapOps (MapOps.empty trans_MapOps) k = None"
unfolding trans_MapOps_def trans_MapOps_lookup_def trie_odlist_lookup_def
by (auto split: prod.split)
next
fix e k k' M
assume k: "(clock_simAbs (fst k), snd k) \<in> Clock.jkbpSEC \<times> (UNIV :: 'z set)"
and k': "(clock_simAbs (fst k'), snd k') \<in> Clock.jkbpSEC \<times> (UNIV :: 'z set)"
show "MapOps.lookup trans_MapOps (MapOps.update trans_MapOps k e M) k'
= (if (clock_simAbs (fst k'), snd k') = (clock_simAbs (fst k), snd k)
then Some e else MapOps.lookup trans_MapOps M k')"
proof(cases "(clock_simAbs (fst k'), snd k') = (clock_simAbs (fst k), snd k)")
case True hence "k = k'"
using inj_onD[OF clock_simAbs_inj_on] k k' by (auto iff: prod_eqI)
thus ?thesis
unfolding trans_MapOps_def trans_MapOps_lookup_def trans_MapOps_update_def trie_odlist_lookup_def trie_odlist_update_with_def
by (simp add: lookup_trie_update_with lookup_update split: option.split prod.split)
next
case False thus ?thesis
unfolding trans_MapOps_def trans_MapOps_lookup_def trans_MapOps_update_def trie_odlist_lookup_def trie_odlist_update_with_def
by (cases "fst k = fst k'")
(auto simp add: lookup_empty lookup_update_neq prod_eq_iff lookup_trie_update_with split: option.split prod.split)
qed
qed
(* A map for the agent actions. *)
definition
acts_MapOps_lookup :: "('s :: linorder, 'aAct) clock_acts_trie
\<Rightarrow> 's clock_simWorldsRep
\<rightharpoonup> 'aAct"
where
"acts_MapOps_lookup \<equiv> \<lambda>m k.
Option.bind (trie_odlist_lookup m (fst k)) (\<lambda>m.
(trie_odlist_lookup m (snd k)))"
definition
acts_MapOps_update :: "('s :: linorder) clock_simWorldsRep \<Rightarrow> 'aAct
\<Rightarrow> ('s :: linorder, 'aAct) clock_acts_trie
\<Rightarrow> ('s :: linorder, 'aAct) clock_acts_trie"
where
"acts_MapOps_update \<equiv> \<lambda>k v m.
trie_odlist_update_with (fst k) m empty_trie (\<lambda>m.
trie_odlist_update (snd k) v m)"
definition
acts_MapOps :: "(('s :: linorder, 'aAct) clock_acts_trie, 's clock_simWorldsRep, 'aAct) MapOps"
where
"acts_MapOps \<equiv>
\<lparr> MapOps.empty = empty_trie,
lookup = acts_MapOps_lookup,
update = acts_MapOps_update \<rparr>"
lemma (in FiniteLinorderEnvironment) acts_MapOps:
"MapOps clock_simAbs Clock.jkbpSEC acts_MapOps"
proof
fix k show "MapOps.lookup acts_MapOps (MapOps.empty acts_MapOps) k = None"
unfolding acts_MapOps_def acts_MapOps_lookup_def trie_odlist_lookup_def
by auto
next
fix e k k' M
assume k: "clock_simAbs k \<in> Clock.jkbpSEC"
and k': "clock_simAbs k' \<in> Clock.jkbpSEC"
show "MapOps.lookup acts_MapOps (MapOps.update acts_MapOps k e M) k'
= (if clock_simAbs k' = clock_simAbs k
then Some e else MapOps.lookup acts_MapOps M k')"
proof(cases "clock_simAbs k' = clock_simAbs k")
case True hence "k = k'"
using inj_onD[OF clock_simAbs_inj_on] k k' by (auto iff: prod_eqI)
thus ?thesis
unfolding acts_MapOps_def acts_MapOps_lookup_def acts_MapOps_update_def
by (auto simp: lookup_trie_update lookup_trie_update_with
trie_odlist_update_with_def trie_odlist_update_def trie_odlist_lookup_def)
next
case False thus ?thesis
unfolding acts_MapOps_def acts_MapOps_lookup_def acts_MapOps_update_def
by (auto simp: lookup_trie_update lookup_trie_update_with
trie_odlist_update_with_def trie_odlist_update_def trie_odlist_lookup_def
dest: prod_eqI
split: option.split)
qed
qed
(*>*)
text\<open>
We define two records @{term "acts_MapOps"} and @{term "trans_MapOps"}
satisfying the @{term "MapOps"} predicate
(\S\ref{sec:kbps-theory-map-ops}). Discharging the obligations in the
@{term "Algorithm"} locale is routine, leaning on the work of
\<^citet>\<open>"DBLP:conf/itp/LammichL10"\<close>.
\<close>
subsubsection\<open>Locale instantiation\<close>
text\<open>
Finally we assemble the algorithm and discharge the proof obligations.
\<close>
sublocale FiniteLinorderEnvironment
< Clock: Algorithm
jkbp envInit envAction envTrans envVal
clock_jview envObs clock_jviewInit clock_jviewIncr
clock_sim clock_simRels clock_simVal
clock_simAbs clock_simObs clock_simInit clock_simTrans clock_simAction
acts_MapOps trans_MapOps
(*<*)
apply (unfold_locales)
apply clarify
apply (rule clock_simInit)
apply simp
apply clarify
apply (erule (1) clock_simObs)
apply clarify
apply (erule (1) clock_simAction)
apply clarify
apply (erule (1) clock_simTrans)
apply (rule acts_MapOps)
apply (rule trans_MapOps)
done
(*>*)
text\<open>
Explicitly, the algorithm for this case is:
\<close>
definition
"mkClockAuto \<equiv> \<lambda>agents jkbp envInit envAction envTrans envVal envObs.
mkAlgAuto acts_MapOps
trans_MapOps
(clock_simObs envObs)
(clock_simInit envInit envObs)
(clock_simTrans agents jkbp envAction envTrans envVal envObs)
(clock_simAction jkbp envVal envObs)
(\<lambda>a. map (clock_simInit envInit envObs a \<circ> envObs a) envInit)"
lemma (in FiniteLinorderEnvironment) mkClockAuto_implements:
"Clock.implements
(mkClockAuto agents jkbp envInit envAction envTrans envVal envObs)"
(*<*)
using Clock.k_mkAlgAuto_implements
unfolding mkClockAuto_def mkAlgAuto_def Clock.k_frontier_def
by simp
(*
We actually run this unfolding of the algorithm. The lemma is keeping
us honest.
*)
definition
"ClockAutoDFS \<equiv> \<lambda>agents jkbp envInit envAction envTrans envVal envObs. \<lambda>a.
alg_dfs acts_MapOps
trans_MapOps
(clock_simObs envObs a)
(clock_simTrans agents jkbp envAction envTrans envVal envObs a)
(clock_simAction jkbp envVal envObs a)
(map (clock_simInit envInit envObs a \<circ> envObs a) envInit)"
lemma (in FiniteLinorderEnvironment)
"mkClockAuto agents jkbp envInit envAction envTrans envVal envObs
= (\<lambda>a. alg_mk_auto acts_MapOps trans_MapOps (clock_simInit a) (ClockAutoDFS agents jkbp envInit envAction envTrans envVal envObs a))"
unfolding mkClockAuto_def ClockAutoDFS_def mkAlgAuto_def alg_mk_auto_def by (simp add: Let_def)
(*>*)
text\<open>
We discuss the clock semantics further in \S\ref{sec:kbps-alg-clock}.
\<close>
(*<*)
end
(*>*)
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Properties of operations on strings
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.String.Properties where
open import Data.Bool.Base using (Bool)
import Data.Char.Properties as Charₚ
import Data.List.Properties as Listₚ
import Data.List.Relation.Binary.Pointwise as Pointwise
import Data.List.Relation.Binary.Lex.Strict as StrictLex
open import Data.String.Base
open import Function
open import Relation.Nullary using (yes; no)
open import Relation.Nullary.Decidable using (map′; isYes)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality.Core
import Relation.Binary.Construct.On as On
import Relation.Binary.PropositionalEquality as PropEq
------------------------------------------------------------------------
-- Primitive properties
open import Agda.Builtin.String.Properties public
renaming ( primStringToListInjective to toList-injective)
------------------------------------------------------------------------
-- Properties of _≈_
≈⇒≡ : _≈_ ⇒ _≡_
≈⇒≡ = toList-injective _ _
∘ Pointwise.Pointwise-≡⇒≡
∘ Pointwise.map Charₚ.≈⇒≡
≈-reflexive : _≡_ ⇒ _≈_
≈-reflexive = Pointwise.map Charₚ.≈-reflexive
∘ Pointwise.≡⇒Pointwise-≡
∘ cong toList
≈-refl : Reflexive _≈_
≈-refl {x} = ≈-reflexive {x} {x} refl
≈-sym : Symmetric _≈_
≈-sym = Pointwise.symmetric (λ {i j} → Charₚ.≈-sym {i} {j})
≈-trans : Transitive _≈_
≈-trans = Pointwise.transitive (λ {i j k} → Charₚ.≈-trans {i} {j} {k})
≈-subst : ∀ {ℓ} → Substitutive _≈_ ℓ
≈-subst P x≈y p = subst P (≈⇒≡ x≈y) p
infix 4 _≈?_
_≈?_ : Decidable _≈_
x ≈? y = Pointwise.decidable Charₚ._≈?_ (toList x) (toList y)
≈-isEquivalence : IsEquivalence _≈_
≈-isEquivalence = record
{ refl = λ {i} → ≈-refl {i}
; sym = λ {i j} → ≈-sym {i} {j}
; trans = λ {i j k} → ≈-trans {i} {j} {k}
}
≈-setoid : Setoid _ _
≈-setoid = record
{ isEquivalence = ≈-isEquivalence
}
≈-isDecEquivalence : IsDecEquivalence _≈_
≈-isDecEquivalence = record
{ isEquivalence = ≈-isEquivalence
; _≟_ = _≈?_
}
≈-decSetoid : DecSetoid _ _
≈-decSetoid = record
{ isDecEquivalence = ≈-isDecEquivalence
}
-----------------------------------------------------------------------
-- Properties of _≡_
infix 4 _≟_
_≟_ : Decidable _≡_
x ≟ y = map′ ≈⇒≡ ≈-reflexive $ x ≈? y
≡-setoid : Setoid _ _
≡-setoid = PropEq.setoid String
≡-decSetoid : DecSetoid _ _
≡-decSetoid = PropEq.decSetoid _≟_
------------------------------------------------------------------------
-- Properties of _<_
infix 4 _<?_
_<?_ : Decidable _<_
x <? y = StrictLex.<-decidable Charₚ._≈?_ Charₚ._<?_ (toList x) (toList y)
<-isStrictPartialOrder-≈ : IsStrictPartialOrder _≈_ _<_
<-isStrictPartialOrder-≈ =
On.isStrictPartialOrder
toList
(StrictLex.<-isStrictPartialOrder Charₚ.<-isStrictPartialOrder-≈)
<-isStrictTotalOrder-≈ : IsStrictTotalOrder _≈_ _<_
<-isStrictTotalOrder-≈ =
On.isStrictTotalOrder
toList
(StrictLex.<-isStrictTotalOrder Charₚ.<-isStrictTotalOrder-≈)
<-strictPartialOrder-≈ : StrictPartialOrder _ _ _
<-strictPartialOrder-≈ =
On.strictPartialOrder
(StrictLex.<-strictPartialOrder Charₚ.<-strictPartialOrder-≈)
toList
<-strictTotalOrder-≈ : StrictTotalOrder _ _ _
<-strictTotalOrder-≈ =
On.strictTotalOrder
(StrictLex.<-strictTotalOrder Charₚ.<-strictTotalOrder-≈)
toList
------------------------------------------------------------------------
-- Alternative Boolean equality test.
--
-- Why is the definition _==_ = primStringEquality not used? One
-- reason is that the present definition can sometimes improve type
-- inference, at least with the version of Agda that is current at the
-- time of writing: see unit-test below.
infix 4 _==_
_==_ : String → String → Bool
s₁ == s₂ = isYes (s₁ ≟ s₂)
private
-- The following unit test does not type-check (at the time of
-- writing) if _==_ is replaced by primStringEquality.
data P : (String → Bool) → Set where
p : (c : String) → P (_==_ c)
unit-test : P (_==_ "")
unit-test = p _
-- Version 1.1
setoid = ≡-setoid
{-# WARNING_ON_USAGE setoid
"Warning: setoid was deprecated in v1.1.
Please use ≡-setoid instead."
#-}
decSetoid = ≡-decSetoid
{-# WARNING_ON_USAGE decSetoid
"Warning: decSetoid was deprecated in v1.1.
Please use ≡-decSetoid instead."
#-}
strictTotalOrder = <-strictTotalOrder-≈
{-# WARNING_ON_USAGE strictTotalOrder
"Warning: strictTotalOrder was deprecated in v1.1.
Please use <-strictTotalOrder-≈ instead."
#-}
|
import numpy as np
import pandas as pd
from talib import abstract
from lib.strategy.base_strategy import BaseStrategy
class StochRsi(BaseStrategy):
# settings
period = 30
buy_threshold = 0.7
sell_threshold = 0.3
def __init__(self, feed: pd.DataFrame):
super().__init__(feed)
if self.has_enough_feed():
rsi = abstract.RSI(self.feed, period=self.period)
maxrsi = abstract.MAX(rsi, period=self.period)
minrsi = abstract.MIN(rsi, period=self.period)
srsi = (rsi - minrsi) / (maxrsi - minrsi)
self.feed["srsi"] = srsi
prev_srsi = self.feed["srsi"].shift(1)
self.feed["cross_up"] = (self.feed["srsi"] > self.buy_threshold) & (
prev_srsi <= self.buy_threshold
)
self.feed["cross_down"] = (self.feed["srsi"] < self.sell_threshold) & (
prev_srsi >= self.sell_threshold
)
def get_name(self) -> str:
return "stoch_rsi"
def should_buy(self) -> bool:
return self.feed.iloc[-1]["cross_up"]
def should_sell(self) -> bool:
return self.feed.iloc[-1]["cross_down"]
def is_valid(self) -> bool:
return not np.isnan(self.feed.iloc[-1]["srsi"])
|
From 1954 onward , Wheeler began to devote an increasing amount of his time to encouraging greater public interest in archaeology , and it was in that year that he obtained an agent . Oxford University Press also published two of his books in 1954 . The first was a book on archaeological methodologies , Archaeology from the Earth , which was translated into various languages . The second was Rome Beyond the Imperial Frontier , discussing evidence for Roman activity at sites like Arikamedu and Segontium . In 1955 Wheeler released his episodic autobiography , Still Digging , which had sold over 70 @,@ 000 copies by the end of the year . In 1959 , Wheeler wrote Early India and Pakistan , which was published as part as Daniel 's " Ancient Peoples and Places " series for Thames and Hudson ; as with many earlier books , he was criticised for rushing to conclusions .
|
[STATEMENT]
lemma pathfinish_of_real[simp]:"pathfinish of_real = 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pathfinish of_real = (1::'a)
[PROOF STEP]
unfolding pathfinish_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. of_real 1 = (1::'a)
[PROOF STEP]
by simp |
using Test, SeisModels
@testset "Conversion" begin
@testset "To LinearLayeredModel" begin
# PREMPolyModel
let m = LinearLayeredModel(PREM)
@test m isa LinearLayeredModel
# Default spacing
@test maximum(diff(m.r)) == 20
@test minimum(diff(m.r)) == 0
# Specified spacing
@test maximum(diff(LinearLayeredModel(IASP91, 30).r)) == 30
@test vp(m, 1000) ≈ vp(PREM, 1000) rtol=0.0001
@test vs(m, 3600) ≈ vs(PREM, 3600) rtol=0.0001
end
# SteppedLayeredModel
let m = MOON_WEBER_2011, m′ = LinearLayeredModel(m)
@test m′ isa LinearLayeredModel
@test vp(m, 100) ≈ vp(m′, 100) rtol=0.0001
end
end
@testset "To PREMPolyModel" begin
# SteppedLayeredModel
@test_throws ArgumentError PREMPolyModel(MOON_WEBER_2011, -1)
@test reffrequency(PREMPolyModel(MOON_WEBER_2011, fref=10.0)) == 10.0
let m = MOON_WEBER_2011, m′ = PREMPolyModel(MOON_WEBER_2011)
for property in (:vp, :vs, :density)
@test all(r -> evaluate(m, property, r) ≈ evaluate(m′, property, r),
range(0, stop=surface_radius(m), length=10_000))
end
@test mass(m) ≈ mass(m′)
@test pressure(m, 0) ≈ pressure(m′, 0)
end
# LinearLayeredModel
@test_throws ArgumentError PREMPolyModel(AK135, 0)
@test_throws ArgumentError PREMPolyModel(AK135, -1)
@test reffrequency(PREMPolyModel(AK135, fref=0.1)) == 0.1
let m = AK135, m′ = PREMPolyModel(AK135)
for property in (:vp, :vs, :density)
@test all(r -> evaluate(m, property, r) ≈ evaluate(m′, property, r),
range(0, stop=surface_radius(m), length=10_000))
end
@test mass(m) ≈ mass(m′)
@test pressure(m, 0) ≈ pressure(m′, 0)
end
end
@testset "To SteppedLayeredModel" begin
@test_throws ArgumentError SteppedLayeredModel(IASP91, 0)
for m in (PREM, AK135)
m′ = SteppedLayeredModel(m)
@test m′ isa SteppedLayeredModel
radii = range(0, stop=surface_radius(m), length=10_000)
for property in (:vp, :vs, :density)
@test all(r -> ≈(evaluate.((m, m′), property, r)..., rtol=0.01),
radii)
end
@test mass(m) ≈ mass(m′) rtol=0.001
@test pressure(m, 0) ≈ pressure(m′, 0) rtol=0.001
end
@test maximum(diff(SteppedLayeredModel(PREM, 20).r)) == 20
end
@testset "Roundtrip" begin
let rtol = 0.01
for T in (PREMPolyModel, LinearLayeredModel, SteppedLayeredModel)
for m in (PREM, AK135, MOON_WEBER_2011)
m isa T && continue # Skip identity conversions
m′ = T(m)
m″ = typeof(m)(m′)
models = (m, m″)
radii = range(0, stop=surface_radius(m), length=10_000)
for property in (:vp, :vs, :density)
@test all(r -> ≈(evaluate.(models, property, r)..., rtol=rtol),
radii)
end
@test ≈(mass.(models)..., rtol=rtol)
@test ≈(gravity.(models, surface_radius(m))..., rtol=rtol)
@test ≈(pressure.(models, 0)..., rtol=rtol)
end
end
end
end
end
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Predicate transformers
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Relation.Unary.PredicateTransformer where
open import Level hiding (_⊔_)
open import Function
open import Data.Product
open import Relation.Nullary
open import Relation.Unary
open import Relation.Binary using (REL)
------------------------------------------------------------------------
-- Heterogeneous and homogeneous predicate transformers
PT : ∀ {a b} → Set a → Set b → (ℓ₁ ℓ₂ : Level) → Set _
PT A B ℓ₁ ℓ₂ = Pred A ℓ₁ → Pred B ℓ₂
Pt : ∀ {a} → Set a → (ℓ : Level) → Set _
Pt A ℓ = PT A A ℓ ℓ
-- Composition and identity
_⍮_ : ∀ {a b c ℓ₁ ℓ₂ ℓ₃} {A : Set a} {B : Set b} {C : Set c} →
PT B C ℓ₂ ℓ₃ → PT A B ℓ₁ ℓ₂ → PT A C ℓ₁ _
S ⍮ T = S ∘ T
skip : ∀ {a ℓ} {A : Set a} → PT A A ℓ ℓ
skip P = P
------------------------------------------------------------------------
-- Operations on predicates extend pointwise to predicate transformers
module _ {a b} {A : Set a} {B : Set b} where
-- The bottom and the top of the predicate transformer lattice.
abort : PT A B zero zero
abort = λ _ → ∅
magic : PT A B zero zero
magic = λ _ → U
-- Negation.
∼_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂
∼ T = ∁ ∘ T
-- Refinement.
infix 4 _⊑_ _⊒_ _⊑′_ _⊒′_
_⊑_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → Set _
S ⊑ T = ∀ {X} → S X ⊆ T X
_⊑′_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → Set _
S ⊑′ T = ∀ X → S X ⊆ T X
_⊒_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → Set _
T ⊒ S = T ⊑ S
_⊒′_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → Set _
T ⊒′ S = S ⊑′ T
-- The dual of refinement.
infix 4 _⋢_
_⋢_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → Set _
S ⋢ T = ∃ λ X → S X ≬ T X
-- Union.
infixl 6 _⊓_
_⊓_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂
S ⊓ T = λ X → S X ∪ T X
-- Intersection.
infixl 7 _⊔_
_⊔_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂
S ⊔ T = λ X → S X ∩ T X
-- Implication.
infixl 8 _⇛_
_⇛_ : ∀ {ℓ₁ ℓ₂} → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂ → PT A B ℓ₁ ℓ₂
S ⇛ T = λ X → S X ⇒ T X
-- Infinitary union and intersection.
infix 9 ⨆ ⨅
⨆ : ∀ {ℓ₁ ℓ₂ i} (I : Set i) → (I → PT A B ℓ₁ ℓ₂) → PT A B ℓ₁ _
⨆ I T = λ X → ⋃[ i ∶ I ] T i X
syntax ⨆ I (λ i → T) = ⨆[ i ∶ I ] T
⨅ : ∀ {ℓ₁ ℓ₂ i} (I : Set i) → (I → PT A B ℓ₁ ℓ₂) → PT A B ℓ₁ _
⨅ I T = λ X → ⋂[ i ∶ I ] T i X
syntax ⨅ I (λ i → T) = ⨅[ i ∶ I ] T
-- Angelic and demonic update.
⟨_⟩ : ∀ {ℓ} → REL A B ℓ → PT B A ℓ _
⟨ R ⟩ P = λ x → R x ≬ P
[_] : ∀ {ℓ} → REL A B ℓ → PT B A ℓ _
[ R ] P = λ x → R x ⊆ P
|
## Imports
import numpy as np
from handybeam.samplers.abstract_sampler import AbstractSampler
# Class
class ClistSampler(AbstractSampler):
'''
---------------------------------------------
ClistSampler
---------------------------------------------
This is the general sampling grid class. It takes a list of sampling points
and samples the acoustic field at these points.
'''
def __init__(self,parent=None, local_work_size = (128,1,1)):
'''
---------------------------------------------
__init__(parent)
---------------------------------------------
This method intialises an instance of the ClistSampler class.
Parameters
----------
parent : handybeam_core.world.World
This is an instance of the handybeam world class.
local_work_size : tuple
This sets the local work size for the GPU, not recommended to change unless the user
has experience with OpenCL and pyopencl.
'''
super(ClistSampler,self).__init__()
self.world = parent
self.pressure_field = None
self.coordinates = np.zeros((0,3),dtype = np.float32)
self.no_points = None
self.local_work_size = local_work_size
def find_clist_grid_volume(self):
'''
---------------------------------------------
find_clist_grid_volume()
---------------------------------------------
This method finds the volume of the requested sampling grid.
'''
# Find the distance along the x-axis.
x_min = np.min(self.coordinates[:,0])
x_max = np.max(self.coordinates[:,0])
x_length = x_max - x_min
# Find the distance along the y-axis.
y_min = np.min(self.coordinates[:,1])
y_max = np.max(self.coordinates[:,1])
y_length = y_max - y_min
# Find the distance along the z-axis.
z_min = np.min(self.coordinates[:,2])
z_max = np.max(self.coordinates[:,2])
z_length = z_max - z_min
# Find the volume.
self.volume = x_length * y_length * z_length
def add_sampling_points(self, x_list, y_list, z_list):
'''
---------------------------------------------
add_sampling_points(x_list,y_list,z_list)
---------------------------------------------
This method adds the requested sampling points to the sampler object.
Parameters
----------
x_list : numpy array
This is an array containing the x-coordinates of the requested
sampling points.
y_list : numpy array
This is an array containing the y-coordinates of the requested
sampling points.
z_list : numpy array
This is an array containing the z-coordinates of the requested
sampling points.
'''
self.coordinates = np.column_stack([x_list,y_list, z_list])
self.coordinates = self.coordinates.astype(np.float32)
def clear_data(self):
'''
---------------------------------------------
clear_data ()
---------------------------------------------
This method clears the data assigned to the object.
'''
self.coordinates = np.zeros((0,3),dtype = np.float32)
self.pressure_field = None
def propagate(self, print_performance_feedback=False):
'''
---------------------------------------------
propagate(print_performance_feedback)
---------------------------------------------
This method calls the clist_propagator to propagate the acoustic field to
the desired sampling points.
Parameters
----------
print_performance_feedback : boolean
Boolean value determining whether or not to output the GPU performance.
'''
kernel_output = self.parent.propagator.clist_propagator(
tx_array=self.parent.tx_array,
sampling_point_list=self.coordinates,
local_work_size= self.local_work_size,
print_performance_feedback=print_performance_feedback
)
self.pressure_field = np.nan_to_num(kernel_output[:,0] + np.complex(0,1) * kernel_output[:,1])
self.no_points = len(self.coordinates[:,0])
|
[STATEMENT]
lemma swap_delete_add_edge:
assumes "(a, b, c) \<noteq> (x, y, z)"
shows "delete_edge a b c (add_edge x y z H) = add_edge x y z (delete_edge a b c H)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. delete_edge a b c (add_edge x y z H) = add_edge x y z (delete_edge a b c H)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(a, b, c) \<noteq> (x, y, z)
goal (1 subgoal):
1. delete_edge a b c (add_edge x y z H) = add_edge x y z (delete_edge a b c H)
[PROOF STEP]
unfolding delete_edge_def add_edge_def
[PROOF STATE]
proof (prove)
using this:
(a, b, c) \<noteq> (x, y, z)
goal (1 subgoal):
1. \<lparr>nodes = nodes \<lparr>nodes = {x, z} \<union> nodes H, edges = insert (x, y, z) (edges H)\<rparr>, edges = edges \<lparr>nodes = {x, z} \<union> nodes H, edges = insert (x, y, z) (edges H)\<rparr> - {(a, b, c)}\<rparr> = \<lparr>nodes = {x, z} \<union> nodes \<lparr>nodes = nodes H, edges = edges H - {(a, b, c)}\<rparr>, edges = insert (x, y, z) (edges \<lparr>nodes = nodes H, edges = edges H - {(a, b, c)}\<rparr>)\<rparr>
[PROOF STEP]
by auto |
It is possible to erase up to 4 HDD / SSD at the same time. IDE HDD connection is also possible with dedicated adapter.
HDD / SSD is exchanged from the order in which erasing is completed, and asynchronous erase function which can erase newly is installed.Five types of erase algorithms are installed. The erasing method can be selected according to the application.
HDD copy function installed. It is possible to copy one HDD data to up to three HDDs simultaneously.
Data delete contents can be printed with the attached dedicated printer . It is also possible to output text data of work log to USB memory.
Dedicated carrying case with waterproof / dustproof specification is included, which can contain the main body and all accessories.
This product is compatible with SATA 6 Gbps HDD / SSD, but the internal transfer speed is up to 130 MB / sec.
2.5 “HDD and 3.5” HDD can not be connected at the same time. |
\thispagestyle{empty}
%----------------------------------------------------------------------
\chapter{Control and optimization}
\label{control.chap}
%----------------------------------------------------------------------
\section{Objectives}
%----------------------------------------------------------------------
Once a system has been enough understood, there is still to do the main work: enhance it. This means most of the time optimization, and control can be seen as an online optimization (i. e. continuously making the system better). There are several control techniques as there are many optimization techniques. Since this reader aims at keeping things practical, we will not develop all possible ideas here. Instead we insist onto 2 main ideas:
\begin{enumerate}
\item explicit a criteria to be optimized: in principle, this is rooted in the system description and what we want to do with the system. There are many ways to enhance a system, and very often the models are too complicated to have an explicit optimum (whenever they have one). But without criteria, there is even no way to know what is better (or worse).
\item simplify the model so that the optimization (or control) problem can be solved by simple tools. As a rule, it is often better to get a simple but efficient control rather than to try to compute the optimal one. This rules also holds for optimization: the best is the enemy of the good; well enough is enough in lots of practical systems.
\end{enumerate}
Therefore some tools for PDE are applied in the next sections as well as simple tools that can be applied by smartly simplifying the problem.
\section{Tools and examples}
\subsection{Kalman filter}
\input{control-kalman}
\subsection{PID control}
\subsection{Adjoint optimization}
|
import numpy as np
from ..base import Processor
@Processor.register("min_max")
class MinMax(Processor):
@property
def input_dim(self) -> int:
return 1
@property
def output_dim(self) -> int:
return 1
def fit(self, columns: np.ndarray) -> Processor:
d_min, d_max = columns.min(), columns.max()
self._caches["min"], self._caches["diff"] = d_min, d_max - d_min
return self
def _process(self, columns: np.ndarray) -> np.ndarray:
d_min, diff = map(self._caches.get, ["min", "diff"])
columns -= d_min
columns /= diff
return columns
def _recover(self, processed_columns: np.ndarray) -> np.ndarray:
d_min, diff = map(self._caches.get, ["min", "diff"])
processed_columns *= diff
processed_columns += d_min
return processed_columns
__all__ = ["MinMax"]
|
module Bracketing
abstract type BracketingMethod end
struct Bracket
left::Real
right::Real
function Bracket(a, b)
if a <= b
new(a, b)
else
new(b, a)
end
end
end
function search(params::BracketingMethod, f, x_0 = 0)::Real
bracket = find_bracket(f, x_0)
search_bracket(params, f, bracket).right
end
function find_bracket(f, x; step = 1e-2, factor = 2.0, max_iter = 1_000_000)::Bracket
"""
Algorithm 3.1
`max_iter` is a very high number since upon reaching this limit the algorithm
will crash with error. This is because a failure to find a bracket means there's
something wrong with either the algorithm, or the objective function.
"""
a, ya = x, f(x)
b, yb = a + step, f(a + step)
if yb > ya
a, b = b, a
ya, yb = yb, ya
step = step > 0 ? -step : step
end
for _ = 1:max_iter
c, yc = b + step, f(b + step)
if yc > yb
return Bracket(a, c)
end
a, ya, b, yb = b, yb, c, yc
step *= factor
end
error("Bracketing.find_bracket: max number of iterations reached")
end
struct GoldenSection <: BracketingMethod
ϵ
max_iter
GoldenSection(; ϵ = eps(), max_iter = 100) = new(ϵ, max_iter)
end
function search_bracket(params::GoldenSection, f, bracket::Bracket)::Bracket
"""
Golden section search
Algorithm 3.3
"""
a, b = bracket.left, bracket.right
max_eval = (b - a) / (params.ϵ * log(Base.MathConstants.golden))
n = params.max_iter > max_eval ? max_eval : params.max_iter
ρ = Base.MathConstants.golden - 1
d = ρ * b + (1 - ρ) * a
yd = f(d)
for _ = 1:n - 1
if abs(a - b) < params.ϵ
return Bracket(a, b)
end
c = ρ * a + (1 - ρ) * b
yc = f(c)
if yc < yd
b, d, yd = d, c, yc
else
a, b = b, c
end
end
Bracket(a, b)
end
end # module
|
Formal statement is: lemma starlike_imp_contractible: fixes S :: "'a::real_normed_vector set" shows "starlike S \<Longrightarrow> contractible S" Informal statement is: If $S$ is a starlike set, then $S$ is contractible. |
[STATEMENT]
lemma oneBIT_step4y:
assumes "x\<noteq>y" "x : {x0,y0}" "y\<in>{x0,y0}"
shows "BIT_Step (type4 [x0, y0] x y) y = type0 [x0, y0] y x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. type4 [x0, y0] x y \<bind> (\<lambda>s. BIT_step s y \<bind> (\<lambda>(a, is'). return_pmf (step (fst s) y a, is'))) = type0 [x0, y0] y x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
x \<in> {x0, y0}
y \<in> {x0, y0}
goal (1 subgoal):
1. type4 [x0, y0] x y \<bind> (\<lambda>s. BIT_step s y \<bind> (\<lambda>(a, is'). return_pmf (step (fst s) y a, is'))) = type0 [x0, y0] y x
[PROOF STEP]
apply(simp add: type4_def BIT_step_def bind_assoc_pmf bind_return_pmf step_def mtf2_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<noteq> y; x = x0 \<or> x = y0; y = x0 \<or> y = y0\<rbrakk> \<Longrightarrow> (y0 = y \<longrightarrow> (x0 = y \<longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xb. return_pmf (if y \<in> set (fst (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y - (if fst (snd (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) ! index (snd (snd (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y then 0 else length (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))))..<index (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y] (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) else swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))), flip (index (snd (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y) (fst (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))), snd (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))))) = type0 [y, y] y x) \<and> (x0 \<noteq> y \<longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xb. return_pmf (if y \<in> set (fst (if \<not> xb then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y - (if fst (snd (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) ! index (snd (snd (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y then 0 else length (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))))..<index (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y] (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) else swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))), flip (index (snd (snd (if \<not> xb then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y) (fst (snd (if \<not> xb then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))), snd (snd (if \<not> xb then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))))) = type0 [x0, y] y x)) \<and> (y0 \<noteq> y \<longrightarrow> (x0 = y \<longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xb. return_pmf (if y \<in> set (fst (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y - (if fst (snd (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) ! index (snd (snd (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y then 0 else length (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))))..<index (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y] (swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) else swaps [] (fst (if \<not> [xa, xb] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))), flip (index (snd (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y) (fst (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))), snd (snd (if \<not> xa then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))))) = type0 [y, y0] y x) \<and> (x0 \<noteq> y \<longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xb. return_pmf (if y \<in> set (fst (if \<not> [] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y - (if fst (snd (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))) ! index (snd (snd (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y then 0 else length (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))))..<index (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) y] (swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))) else swaps [] (fst (if \<not> [xa, xb] ! (index [y0] y + 1) then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))), flip (length (snd (snd (if \<not> [] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0]))))) (fst (snd (if \<not> [] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))), snd (snd (if \<not> [] ! 0 then ([x, y], [xa, xb], [x0, y0]) else ([y, x], [xa, xb], [x0, y0])))))) = type0 [x0, y0] y x))
[PROOF STEP]
apply(safe)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>x0 \<noteq> y0; x = x0; y = y0; x0 \<noteq> y0\<rbrakk> \<Longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if y0 \<in> set (fst (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0 - (if fst (snd (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0 then 0 else length (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0] (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0) (fst (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))), snd (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))))) = type0 [x0, y0] y0 x0
2. \<lbrakk>y0 \<noteq> x0; x = y0; y = x0; y0 \<noteq> x0\<rbrakk> \<Longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if x0 \<in> set (fst (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 - (if fst (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 then 0 else length (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0] (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0) (fst (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))), snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))))) = type0 [x0, y0] x0 y0
[PROOF STEP]
apply(rule pmf_eqI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. \<lbrakk>x0 \<noteq> y0; x = x0; y = y0; x0 \<noteq> y0\<rbrakk> \<Longrightarrow> pmf (bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if y0 \<in> set (fst (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0 - (if fst (snd (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0 then 0 else length (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0] (swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! (index [y0] y0 + 1) then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))) y0) (fst (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0])))), snd (snd (if \<not> xa then ([x0, y0], [x, xa], [x0, y0]) else ([y0, x0], [x, xa], [x0, y0]))))))) i = pmf (type0 [x0, y0] y0 x0) i
2. \<lbrakk>y0 \<noteq> x0; x = y0; y = x0; y0 \<noteq> x0\<rbrakk> \<Longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if x0 \<in> set (fst (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 - (if fst (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 then 0 else length (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0] (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0) (fst (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))), snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))))) = type0 [x0, y0] x0 y0
[PROOF STEP]
apply(simp add: add.commute pmf_bind swap_def type0_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>y0 \<noteq> x0; x = y0; y = x0; y0 \<noteq> x0\<rbrakk> \<Longrightarrow> bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if x0 \<in> set (fst (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 - (if fst (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 then 0 else length (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0] (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0) (fst (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))), snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))))) = type0 [x0, y0] x0 y0
[PROOF STEP]
apply(rule pmf_eqI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i. \<lbrakk>y0 \<noteq> x0; x = y0; y = x0; y0 \<noteq> x0\<rbrakk> \<Longrightarrow> pmf (bernoulli_pmf (1 / 2) \<bind> (\<lambda>x. bernoulli_pmf (1 / 2) \<bind> (\<lambda>xa. return_pmf (if x0 \<in> set (fst (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) then swaps [index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 - (if fst (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))) ! index (snd (snd (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0 then 0 else length (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))))..<index (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0] (swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) else swaps [] (fst (if \<not> [x, xa] ! 0 then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))), flip (index (snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))) x0) (fst (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0])))), snd (snd (if \<not> x then ([y0, x0], [x, xa], [x0, y0]) else ([x0, y0], [x, xa], [x0, y0]))))))) i = pmf (type0 [x0, y0] x0 y0) i
[PROOF STEP]
apply(simp add: pmf_bind swap_def type0_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
[STATEMENT]
lemma compet_max_profit:
assumes "j \<in> firms"
assumes "competitive_equilibrium P X Y"
shows "Y j \<in> profit_maximisation P (production_sets j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Y j \<in> profit_maximisation P (production_sets j)
[PROOF STEP]
using assms(1) assms(2)
[PROOF STATE]
proof (prove)
using this:
j \<in> firms
competitive_equilibrium P X Y
goal (1 subgoal):
1. Y j \<in> profit_maximisation P (production_sets j)
[PROOF STEP]
by blast |
wiki:WikiPedia:Chicago_Style_Pizza Chicago Style Pizza was first created at http://www.pizzeriauno.com Pizzeria Uno by Ike Sewell in 1943. The idea was to create a pizza that was a whole meal. The pizza is heavy, thick and typically eaten with a knife and fork.
Chicago Style Pizza has several variations.
Deep Dish: Baked in a 23 inch deep pan, it consists of a crust, cheese, topping, and a thick layer of tomato sauce.
Stuffed Pizza: Similar to deep dish, but with an additional thin layer of crust set on top of the toppings and then covered by another layer of sauce.
Thin Crust: Thin crispy crust with a little bit of sauce a fair amount of cheese and toppings. It is typically cut into squares rather than wedges.
There are no restaurants in Davis but there are a few in nearby cities:
The most famous purveyor on the West Coast is Zacharys Pizza in the Bay Area.
If you dont want to drive that far, there is Chicago Fire Restaurant in Midtown Sacramento and Folsom which is only a few years old, and hasnt had the time to develop as much of a following but is a close second or comparable depending on who you talk to.
If you are really desperate and cant make it out to Folsom, you can hit Zeldas Original Gourmet Pizza in Midtown Sacramento, but it is probably the least of the three.
BJs Restaurant and Brewery located in Vacaville does not seem to specialize in Chicagostyle pizza, but it is on the menu. There is also a BJs in Folsom as well. It has more similarities with Pizza Hut Pan Pizza that its chicago brethren, but it is tasty and reasonably priced.
I am not really a pizza snob, but Id say BJs definitely specializes specifically in Chicago style. It was my favorite place to get pizza when I lived in San Diego and Im thrilled its finally up here. At the very least its awesome and the closest to Davis. Plus you can get a pizookie for dessert and no other pizza place can top that!
There are also places that claim to have Chicago Style Pizza, but are not. Papa Murphys claims to have a Chicago Style Stuffed Pizza, but lacking the deep dish needed to cook it and hold in all the ingredients, it falls a bit short.
Frozen Unos pizza is also available at supermarkets.
Patxis Chicago Style Pizza in SF is pretty good. I went there for dinner last night. Their deep dish pizzas are INTENSE. Its on Hayes, about three blocks west of Van Hess. Try it next time you are over there. Users/JulienBiewerElstob Julien
Discussion
what does this have to do with Davis?
People in Davis eat it, there may be a few displaced Davis Midwest Connection Midwesterners in Davis looking for it. That is why I created this page originally. Just because it isnt in Davis proper does not mean it shouldnt be here. If you notice, all there are a LOT of pages related to things outside of Davis, but are part of the Davis experience. Users/RogerClark
I hear what youre saying, but according to the information above theres nowhere in Davis to get it. It seems like a food list page with Chicago Style Pizza on it and an outgoing sacwiki link might be more appropriate. This page says nothing other than youre SOL, which makes it seem silly to have a dedicated page to it. Users/DomenicSantangelo
Theres lots of silly pages here, that what makes it fun, that and there is no Sacwiki to link to. It is more than just what is physically in Davis it is from a Davis perspective. If you live in Davis and want something specific (i.e. Chicago Style Pizza), it may not be in town, but at least you know where you can find it. It could probably be integrated into another page, but it isnt doing any harm by being here. Users/RogerClark
The fact of absence is a useful fact when looking for something. It is certainly different than absence of fact, as it would be if this page were deleted. Im not saying that there should be pages that say Polar Bears none in Davis, but there are enough pizza joints to warrant a search for a Chicago style pizza. This page provides the answer to that search there aint no sich beast in the area. Users/JabberWokky
Also, Zeldas is only 15 miles from Davis... a distance someone could even bike if desperate enough. This page definitely provides useful info to Davis residents. Users/JevanGray Jevan
Theres a Chicago Style pizza place opening up in Vacaville. Its call BJs, does any know of this place? Does it have a reputation already? Users/MichelleAccurso
BJs Restaurant and Brewery BJs is apparently a mostly west coast chain. I havent been to one so I have no clue, but the site makes it look more like a TGI Chilibees sorta place that happens to have their interpretation of Chicago Style Pizza. I will hold off completely judging it until I try one out, but initial impressions are not good. Users/RogerClark Chicago Style Pizza Snob
Ok, I have tried it and I would go there for nonpizza related fare, but it is a rather good Pan Pizza. Users/RogerClark Chicago Style Pizza Snob
Papa Murphys Chicago Style pie, as mentioned above, is very good value, and pretty tasty. Im not sure what the original comment referred to, as the pie comes in its own baking pan, which browns the bottom nicely and contains all the ingredients just fine. I think it is the only Chicago Style pizza available in Davis. Users/ChrisLambertus
As a native Chicagoan, Ive been quite pleased with Chicago Fire. Their stuffed pizza is excellent and authentic, and even better, their thin crust is the cutintosquares stuff that used to be the only delivery option on the South Side. The only issue is that they tend to get very crowded on weekends. Between the wait for a table and the wait (~40 minutes) for a stuffed pizza, its not the place to go if you want a quick meal. Users/nmwallace
As a displaced Midwesterner, I am glad for this page, because I want to know where I can get good Chicago pizza without the $300 round trip! Users/deeray82
|
Autumn Songs with Music ; Blackie , 1927
|
[STATEMENT]
lemma nsqn_addpreRT_inv [simp]:
"\<And>rt dip npre dip'. dip \<in> kD(rt) \<Longrightarrow>
nsqn (the (addpreRT rt dip npre)) dip' = nsqn rt dip'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rt dip npre dip'. dip \<in> kD rt \<Longrightarrow> nsqn (the (addpreRT rt dip npre)) dip' = nsqn rt dip'
[PROOF STEP]
unfolding addpreRT_def nsqn_def nsqn\<^sub>r_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rt dip npre dip'. dip \<in> kD rt \<Longrightarrow> (case the (map_option (\<lambda>s. rt(dip \<mapsto> addpre s npre)) (rt dip)) dip' of None \<Rightarrow> 0 | Some r \<Rightarrow> if \<pi>\<^sub>4 r = val \<or> \<pi>\<^sub>2 r = 0 then \<pi>\<^sub>2 r else \<pi>\<^sub>2 r - 1) = (case rt dip' of None \<Rightarrow> 0 | Some r \<Rightarrow> if \<pi>\<^sub>4 r = val \<or> \<pi>\<^sub>2 r = 0 then \<pi>\<^sub>2 r else \<pi>\<^sub>2 r - 1)
[PROOF STEP]
by (frule kD_Some) (clarsimp split: option.split) |
% NeuroCam manual - IR modifications for Logitech c920.
% Written by Christopher Thomas.
% Copyright (c) 2021 by Vanderbilt University. This work is released under
% the Creative Commons Attribution-ShareAlike 4.0 International License.
\chapter{IR Modifications for Logitech C920 Camera}
\label{c920}
The Logitech C920 webcam can be readily modified for near-infrared use. This
involves the following actions:
\begin{itemize}
\item Disassembling the camera.
\item Removing the optics assembly from the board.
\item Removing the infrared filter from the optics assembly.
\item Replacing the infrared filter with glass plates of appropriate
thickness.
\end{itemize}
Detailed documentation of these actions is described below.
\section{Confirm Camera Identity}
\includegraphics[height=2in]{pics-c920/01-start.jpg}
This documentation only applies to the Logitech C920 camera. Other cameras,
even closely-related models, have different internal layouts and methods of
disassembly.
\section{Remove Stickers and Microphone Guards}
\includegraphics[height=2in]{pics-c920/02-bottomscrews.jpg}
The screws securing the microphone guards are covered by two layers of
stickers. Both layers must be removed to access them.
\section{Remove Cover}
\includegraphics[height=2in]{pics-c920/03-coverscrews.jpg}
\section{Remove Mounting Bracket}
\includegraphics[height=2in]{pics-c920/04-bracketscrews.jpg}
There is a peg between each pair of bracket screws. To remove the mounting
bracket, rotate the bracket so that its metal flanges lift off of these pegs.
The flanges will then slide out of the camera easily.
\section{Remove Spacer}
\includegraphics[height=2in]{pics-c920/05-spacerscrews.jpg}
\section{Remove Board}
\includegraphics[height=2in]{pics-c920/06-boardscrews.jpg}
The USB cable is fixed to the case. The board can be removed by rotating it,
while bending the side of the case slightly to release the catches that secure
the board and the USB cable. Do not bend the board itself (that may destroy
it).
Alternatively, the USB cable can be unplugged from the board (via the header
and ground pin). This is risky, as both connectors are easy to damage.
\section{Optional: Remove LEDs}
\includegraphics[height=2in]{pics-c920/06b-leds.jpg}
The camera status LEDs are white rectangular components with labels ``D3''
through ``D8''. Only four of the six positions are populated. These can be
removed using ``solder wick'' per Step \ref{sect-920-desolder} if desired.
Tearing pads is tolerable, as long as no other traces or components are
disturbed.
Do not use a heat gun. Hot air will desolder other nearby components and may
blow them out of position.
\section{Unscrew Optics Assembly}
\includegraphics[height=2in]{pics-c920/07-opticsscrews.jpg}
After being unscrewed, the optics assembly will remain secured to the board
by its soldered contacts.
\section{Un-Solder and Remove Optics Assembly}
\label{sect-920-desolder}
\includegraphics[height=2in]{pics-c920/08-opticsleads.jpg}
The optics assembly leads can be desoldered using ``solder wick'' (also
called ``desoldering braid''). A variety that has flux in the braid is
recommended (though flux can be applied separately if necessary). Setting
the iron to high temperature makes desoldering easier, but care must be
taken to avoid heat damage to nearby components.
Be gentle when removing the optics assembly, as the contact pads it's
soldered to can be torn off of the board if they're still partly bonded to
the leads.
Do not use a heat gun. The sensor die is nearby and may be damaged. The
anisotropic film holding the sensor die to the board may also be damaged.
\section{Verify Absence of Damage and Dust}
\includegraphics[height=2in]{pics-c920/09-filterdie.jpg}
The IR filter (in the optics assembly) and sensor die (on the board) are now
visible.
The sensor die is fragile (and has a fragile coating). It should be visually
inspected to confirm that no scratches or dust particles are present. Take
care not to breathe on it while inspecting it, as moisture will leave droplets
on the die.
If moisture or dust are present on the die, the die can be cleaned by wiping
it very gently using ``lens tissue'' (also called ``lens paper''). Wiping that
is not sufficiently gentle will instead scratch the die by dragging dust
particles across it.
The lens tissue may be soaked in alcohol prior to wiping. This makes it
easier to safely remove dust, and is the only way to remove water stains,
but it is possible that alcohol may damage the coatings on the die. Wipe
diagonally from one corner of the die to the other, to minimize the amount
of liquid that clings to the trailing edge of the die, and then wipe again
with a dry piece of lens tissue (or a dry corner of the same piece).
Do not allow alcohol to contact the anisotropic film holding the die to the
board, as it will likely cause delamination.
\section{Heat Infrared Filter}
\includegraphics[height=3in]{pics-c920/10-heatgun.jpg}
The infrared filter plate is bonded to the optics assembly with glue. Play a
heat gun around the edge of the filter plate to soften this glue. Use a
circular motion to avoid overheating any one part of the edge or allowing any
other part to cool. Use a low temperature setting to avoid cracking the plate
due to heat shock and to avoid melting the plastic housing.
\section{Remove Infrared Filter}
\includegraphics[height=2in]{pics-c920/11-knife.jpg}
Use the edge of a sharp knife to pry the filter plate out of the optics
assembly. Lift from one corner, letting the plate hinge on one of the
opposing edges. If there is any resistance, use the heat gun again to soften
the glue.
Discard the filter plate after removal.
\section{Scribe Microscope Cover Slips}
\includegraphics[height=2in]{pics-c920/13-scribingslides.jpg}
The filter plate is 7mm by 7mm by 0.31mm. It must be replaced by one or more
pieces of glass with a total thickness of 0.35mm or more for the camera to
focus properly. This corresponds to the thickness of two \#2 microscope cover
slips (also called ``cover glasses'').
To cut these cover slips, draw guide lines on to a sheet of paper, line
up the edge of the cover slip with one guide line, place a straightedge
over the other guide line, and scribe the glass several times using a
carbide-tipped scribing tool. Carefully snap the cover slip along this line
(holding it with the scribed side facing away from you, and then bending it
so that it bows outwards in that direction).
\textbf{Take extreme care to clean up flakes and slivers of glass.} These
will be produced, and are incredibly sharp (their edges are as thin as a
single atom).
Cut the cover slip sections slightly smaller than 7mm by 7mm; there is no way
to trim them after they are cut.
\section{Wash Cover Slip Plates}
\includegraphics[height=3in]{pics-c920/14-washingslides.jpg}
The cover slip plates must be absolutely free of dust and of oils before
being bonded to the optics assembly. To ensure this, pour acetone into a
small glass or ceramic dish, grasp the edges of a plate with tweezers,
submerge the plate in acetone, and agitate it for a few seconds. Place the
plate onto a dry piece of lens tissue, fold the paper over, and wipe the
plate to remove droplets of acetone and remaining dust. Set clean plates
aside on another piece of lens tissue.
Take care to grasp the plates by the edges, with tweezers. Grasping the
faces will scrape the surfaces of the plates. Squeezing too hard at the
edges will shatter the plates. Grasping faces with fingers will deposit
grease on the plates, and grasping edges with fingers will result in
injury, as snapped edges have atom-thin sharp cusps.
\section{Glue Cover Slip Plates into Optics Assembly}
\includegraphics[height=3in]{pics-c920/15-superglue.jpg}
Carefully stack two cover slip plates into the frame that was occupied by
the IR filter. Carefully apply a bead of cyanoacrylate glue (``super glue'')
to each corner of the frame, wiping the bead outwards (away from the middle
of the plate) when applying it. Wipe each bead again with a piece of lens
tissue to reduce the bead's height.
Do not bond the edges of the frame; just the corners. There has to be ample
space for cyanoacrylate vapours to escape to avoid frosting the plates or
the optics within the assembly. Use a new piece of lens tissue every time
you wipe a bead of glue, to avoid smearing glue on to the cover slip plates.
Be very gentle when applying or wiping glue, to avoid popping the cover slip
plates out of the frame. Cover slip plates can be re-seated using tweezers
if this occurs.
Allow the glue to dry for at least 5 minutes before mounting the optics
assembly back on to the camera board.
%
% This is the end of the file.
|
lemma homotopic_loops_imp_path_component_value: "\<lbrakk>homotopic_loops S p q; 0 \<le> t; t \<le> 1\<rbrakk> \<Longrightarrow> path_component S (p t) (q t)" |
-- |
-- Module: Math.NumberTheory.DirichletCharactersTests
-- Copyright: (c) 2018 Bhavik Mehta
-- License: MIT
-- Maintainer: Andrew Lelechenko <[email protected]>
--
-- Tests for Math.NumberTheory.DirichletCharacters
--
{-# LANGUAGE GADTs #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE ViewPatterns #-}
module Math.NumberTheory.DirichletCharactersTests where
import Test.Tasty
import Data.Complex
import Data.List (genericLength)
import Data.Maybe (isJust, mapMaybe)
import Data.Proxy
import Data.Semigroup
import qualified Data.Vector as V
import Numeric.Natural
import GHC.TypeNats (SomeNat(..), someNatVal, KnownNat, natVal, sameNat)
import Data.Type.Equality
import Math.NumberTheory.ArithmeticFunctions (totient, divisorsList)
import Math.NumberTheory.DirichletCharacters
import qualified Math.NumberTheory.Moduli.Sqrt as J
import Math.NumberTheory.Moduli.Class (SomeMod(..), modulo)
import Math.NumberTheory.TestUtils (testSmallAndQuick, Positive(..))
-- | This tests property 6 from https://en.wikipedia.org/wiki/Dirichlet_character#Axiomatic_definition
dirCharOrder :: forall n. KnownNat n => DirichletCharacter n -> Bool
dirCharOrder chi = isPrincipal (totient n `stimes` chi)
where n = natVal @n Proxy
-- | Tests wikipedia's property 3 (note 1,2,5 are essentially enforced by the type system).
testMultiplicative :: KnownNat n => DirichletCharacter n -> Natural -> Natural -> Bool
testMultiplicative chi (fromIntegral -> a) (fromIntegral -> b) = chiAB == chiAchiB
where chi' = evalGeneral chi
chiAB = chi' (a*b)
chiAchiB = (<>) <$> chi' a <*> chi' b
-- | Test property 4 from wikipedia
testAtOne :: KnownNat n => DirichletCharacter n -> Bool
testAtOne chi = eval chi mempty == mempty
dirCharProperty :: (forall n. KnownNat n => DirichletCharacter n -> a) -> Positive Natural -> Natural -> a
dirCharProperty test (Positive n) i =
case someNatVal n of
SomeNat (Proxy :: Proxy n) -> test chi
where chi = indexToChar @n (i `mod` totient n)
realCharProperty :: (forall n. KnownNat n => RealCharacter n -> a) -> Positive Natural -> Int -> a
realCharProperty test (Positive n) i =
case someNatVal n of
SomeNat (Proxy :: Proxy n) -> test chi
where chi = chars !! (i `mod` length chars)
chars = mapMaybe isRealCharacter [principalChar @n .. maxBound]
-- | There should be totient(n) characters
countCharacters :: Positive Natural -> Bool
countCharacters (Positive n) =
case someNatVal n of
SomeNat (Proxy :: Proxy n) ->
genericLength (allChars @n) == totient n
-- | The principal character should be 1 if gcd k n is 1 and 0 otherwise
principalCase :: Positive Natural -> Positive Integer -> Bool
principalCase (Positive n) (Positive k) =
case k `modulo` n of
SomeMod a -> evalGeneral chi a == if gcd k (fromIntegral n) > 1
then Zero
else mempty
where chi = principalChar
InfMod{} -> False
-- | Test the orthogonality relations https://en.wikipedia.org/wiki/Dirichlet_character#Character_orthogonality
orthogonality1 :: forall n. KnownNat n => DirichletCharacter n -> Bool
orthogonality1 chi = magnitude (total - correct) < (1e-13 :: Double)
where n = natVal @n Proxy
total = sum [orZeroToNum toComplex (evalGeneral chi a) | a <- [0 .. maxBound]]
correct = if isPrincipal chi
then fromIntegral $ totient n
else 0
orthogonality2 :: Positive Natural -> Integer -> Bool
orthogonality2 (Positive n) a =
case a `modulo` n of
SomeMod a' -> magnitude (total - correct) < (1e-13 :: Double)
where total = sum [orZeroToNum toComplex (evalGeneral chi a') | chi <- allChars]
correct = if a' == 1
then fromIntegral $ totient n
else 0
InfMod {} -> False
-- | Manually confirm isRealCharacter is correct (in both directions)
realityCheck :: KnownNat n => DirichletCharacter n -> Bool
realityCheck chi = isJust (isRealCharacter chi) == isReal'
where isReal' = and [real (evalGeneral chi t) | t <- [minBound..maxBound]]
real Zero = True
real (NonZero t) = t <> t == mempty
-- | Check real character evaluation matches normal evaluation
realEvalCheck :: KnownNat n => RealCharacter n -> Int -> Bool
realEvalCheck chi i' = fromIntegral (toRealFunction chi i) == (orZeroToNum toComplex (evalGeneral (getRealChar chi) i) :: Complex Double)
where i = fromIntegral i'
-- | The jacobi character agrees with the jacobi symbol
jacobiCheck :: Positive Natural -> Bool
jacobiCheck (Positive n) =
case someNatVal (2*n+1) of
SomeNat (Proxy :: Proxy n) ->
case jacobiCharacter @n of
Just chi -> and [toRealFunction chi (fromIntegral j) == J.symbolToNum (J.jacobi j (2*n+1)) | j <- [0..2*n]]
_ -> False
-- | Bulk evaluation agrees with pointwise evaluation
evalAllCheck :: forall n. KnownNat n => DirichletCharacter n -> Bool
evalAllCheck chi = V.generate (fromIntegral $ natVal @n Proxy) (evalGeneral chi . fromIntegral) == evalAll chi
-- | Induced characters agree with the original character.
-- (Except for when d=1, where chi(0) = 1, which is true for no other d)
inducedCheck :: forall d. KnownNat d => DirichletCharacter d -> Positive Natural -> Bool
inducedCheck chi (Positive k) =
case someNatVal (d*k) of
SomeNat (Proxy :: Proxy n) ->
case induced @n chi of
Just chi2 -> and (V.izipWith matchedValue (V.concat (replicate (fromIntegral k) (evalAll chi))) (evalAll chi2))
Nothing -> False
where d = natVal @d Proxy
matchedValue i x1 x2 = if gcd (fromIntegral i) (d*k) > 1
then x2 == Zero
else x2 == x1
-- | Primitive checker is correct (in both directions)
primitiveCheck :: forall n. KnownNat n => DirichletCharacter n -> Bool
primitiveCheck chi = isJust (isPrimitive chi) == isPrimitive'
where isPrimitive' = all testModulus possibleModuli
n = fromIntegral (natVal @n Proxy) :: Int
possibleModuli = init (divisorsList n)
table = evalAll chi
testModulus d = not $ null [a | a <- [1..n-1], gcd a n == 1, a `mod` d == 1 `mod` d, table V.! a /= mempty]
-- | Ensure that makePrimitive gives primitive characters
makePrimitiveCheck :: DirichletCharacter n -> Bool
makePrimitiveCheck chi = case makePrimitive chi of
WithNat chi' -> isJust (isPrimitive (getPrimitiveChar chi'))
-- | sameNat also ensures the two new moduli are the same
makePrimitiveIdem :: DirichletCharacter n -> Bool
makePrimitiveIdem chi = case makePrimitive chi of
WithNat (chi' :: PrimitiveCharacter n') ->
case makePrimitive (getPrimitiveChar chi') of
WithNat (chi'' :: PrimitiveCharacter n'') ->
case sameNat (Proxy :: Proxy n') (Proxy :: Proxy n'') of
Just Refl -> chi' == chi''
Nothing -> False
orderCheck :: DirichletCharacter n -> Bool
orderCheck chi = isPrincipal (n `stimes` chi) && and [not (isPrincipal (i `stimes` chi)) | i <- [1..n-1]]
where n = orderChar chi
fromTableCheck :: forall n. KnownNat n => DirichletCharacter n -> Bool
fromTableCheck chi = isJust (fromTable @n (evalAll chi))
-- A bunch of functions making sure that every function which can produce a character (in
-- particular by fiddling internal representation) produces a valid character
indexToCharValid :: KnownNat n => DirichletCharacter n -> Bool
indexToCharValid = validChar
principalCharValid :: Positive Natural -> Bool
principalCharValid (Positive n) =
case someNatVal n of
SomeNat (Proxy :: Proxy n) -> validChar (principalChar @n)
mulCharsValid :: KnownNat n => DirichletCharacter n -> DirichletCharacter n -> Bool
mulCharsValid chi1 chi2 = validChar (chi1 <> chi2)
mulCharsValid' :: Positive Natural -> Natural -> Natural -> Bool
mulCharsValid' (Positive n) i j =
case someNatVal n of
SomeNat (Proxy :: Proxy n) ->
mulCharsValid (indexToChar @n (i `mod` totient n)) (indexToChar @n (j `mod` totient n))
stimesCharValid :: KnownNat n => DirichletCharacter n -> Int -> Bool
stimesCharValid chi n = validChar (n `stimes` chi)
succValid :: KnownNat n => DirichletCharacter n -> Bool
succValid = validChar . succ
inducedValid :: forall d. KnownNat d => DirichletCharacter d -> Positive Natural -> Bool
inducedValid chi (Positive k) = case someNatVal (natVal @d Proxy * k) of
SomeNat (Proxy :: Proxy n) -> maybe False validChar (induced @n chi)
jacobiValid :: Positive Natural -> Bool
jacobiValid (Positive n) =
case someNatVal (2*n+1) of
SomeNat (Proxy :: Proxy n) ->
case jacobiCharacter @n of
Just chi -> validChar (getRealChar chi)
_ -> False
makePrimitiveValid :: DirichletCharacter n -> Bool
makePrimitiveValid chi = case makePrimitive chi of
WithNat chi' -> validChar (getPrimitiveChar chi')
testSuite :: TestTree
testSuite = testGroup "DirichletCharacters"
[ testSmallAndQuick "Dirichlet characters divide the right order" (dirCharProperty dirCharOrder)
, testSmallAndQuick "Dirichlet characters are multiplicative" (dirCharProperty testMultiplicative)
, testSmallAndQuick "Dirichlet characters are 1 at 1" (dirCharProperty testAtOne)
, testSmallAndQuick "Right number of Dirichlet characters" countCharacters
, testSmallAndQuick "Principal character behaves as expected" principalCase
, testSmallAndQuick "Orthogonality relation 1" (dirCharProperty orthogonality1)
, testSmallAndQuick "Orthogonality relation 2" orthogonality2
, testSmallAndQuick "Real character checking is correct" (dirCharProperty realityCheck)
, testSmallAndQuick "Real character evaluation is accurate" (realCharProperty realEvalCheck)
, testSmallAndQuick "Jacobi character matches symbol" jacobiCheck
, testSmallAndQuick "Bulk evaluation matches pointwise" (dirCharProperty evalAllCheck)
, testSmallAndQuick "Induced character is correct" (dirCharProperty inducedCheck)
, testSmallAndQuick "Primitive character checking is correct" (dirCharProperty primitiveCheck)
, testSmallAndQuick "makePrimitive produces primitive character" (dirCharProperty makePrimitiveCheck)
, testSmallAndQuick "makePrimitive is idempotent" (dirCharProperty makePrimitiveIdem)
, testSmallAndQuick "Calculates correct order" (dirCharProperty orderCheck)
, testSmallAndQuick "Can construct from table" (dirCharProperty fromTableCheck)
, testGroup "Creates valid characters"
[ testSmallAndQuick "indexToChar" (dirCharProperty indexToCharValid)
, testSmallAndQuick "principalChar" principalCharValid
, testSmallAndQuick "mulChars" mulCharsValid'
, testSmallAndQuick "stimesChar" (dirCharProperty stimesCharValid)
, testSmallAndQuick "succ" (dirCharProperty succValid)
, testSmallAndQuick "induced" (dirCharProperty inducedValid)
, testSmallAndQuick "jacobi" jacobiValid
, testSmallAndQuick "makePrimitive" (dirCharProperty makePrimitiveValid)
]
]
|
Require Import Crypto.Specific.Framework.RawCurveParameters.
Require Import Crypto.Util.LetIn.
(***
Modulus : 2^192 - 2^64 - 1
Base: 48
***)
Definition curve : CurveParameters :=
{|
sz := 4%nat;
base := 48;
bitwidth := 64;
s := 2^192;
c := [(1, 1); (2^64, 1)];
carry_chains := Some [[0; 3]; [1; 0; 2; 3]; [1; 0]]%nat;
a24 := None;
coef_div_modulus := Some 2%nat;
goldilocks := None;
karatsuba := None;
montgomery := false;
freeze := Some true;
ladderstep := false;
mul_code := None;
square_code := None;
upper_bound_of_exponent_loose := None;
upper_bound_of_exponent_tight := None;
allowable_bit_widths := None;
freeze_extra_allowable_bit_widths := None;
modinv_fuel := None
|}.
Ltac extra_prove_mul_eq _ := idtac.
Ltac extra_prove_square_eq _ := idtac.
|
\section{A Project Inspiration: JTAG to DE10-Lite Interface}
The inspiration for this project came from a a search at github.com on DE10-Lite:
\url{https://github.com/hildebrandmw/de10lite-hdl}
This is a nice collection of work done with the DE10-Lite board for academic purposes.
What really got my attention is the project play\_gif:
\url{https://github.com/hildebrandmw/de10lite-hdl/tree/master/projects/play_gif}
The interesting feature of this project is the usage of the USB to load data (animated GIF image file)
to the FPGA. So this is a data pipeline from a Linux desktop to the FPGA which is built into the board!
This met my requirement that I be able to control the FPGA remotely from a desktop (or SBC) computer.
The interface is via ``JTAG'', which is typically used as a debugging interface. It is not specifically intended for mass data transfer, but in this case it was pressed into service.
The interface is a bit clunky to use. It requires a ``TCL Server'', and a running instance of the Quartus development tool! Not exactly what I was looking for, but I got the demo to work easily! It is very nicely done work demonstrating several features of FPGA technology.
The way it works is conceptually simple. The FPGA part of the project implements a VGA\footnote{VGA is a relatively simple video standard which seems to be common on many FPGA development boards.} interface to the connector on the DE10-Lite board. The image loaded from the desktop computer is sliced into its constituent ``frames''.
Another interesting aspect of the project is the interface to the SDRAM of the DE10-Lite which is a 64MB external part on the board. The sliced-up image is loaded into the SDRAM, and then another control module pages the VGA output through the memory. Thus you see the animated GIF displayed on the monitor. Really you are seeing in a very direct manner the data loaded into the SDRAM. Cool!
\subsection{IP and Platform Designer}
First, a little bit of FPGA jargon. ``Intellectual Property'' (IP) in the context of semiconductor devices is a block of circuitry which has been heavily engineered and refined to perform some particular function. It could be patented or otherwise protected from duplication by competitors.
Due to the way integrated circuits are manufactured, blocks of ''IP'' can be added to the silicon and be expected to perform to the IP owner's specifications. Typically IP can be included as part of a design kit, or it can be paid for with a license fee.
IP is good because it can reduce engineering design effort, improve performance, and enhance quality. The trade-off is license fee cost, and you don't necessarily get exactly what you want.
In our case, we are given a whole bunch of IP for free that we can experiment with! This is bundled into ``Platform Designer'' which is a tool-within-a-tool in the Quartus design suite.
To do justice to this there should be an entire section on ``Platform Designer''. I will summarize here. There are excellent video Platform Designer tutorials which you can access if you register for a free account at the Intel web site.
``Platform Designer'' is a building-block system. You get a library of IP, along with a mechanism to hook them together. The design is bundled into a ``Qsys'' file. Let's have a look at the Qsys part of the play\_gif project:
\begin{figure}[h]
\centering
\includegraphics[width=1.0\textwidth]{images/platform_designer.pdf}
\centering\bfseries
\caption{Play\_gif project Qsys file}
\end{figure}
The upper left corner of the GUI is the library of IP. Some of the categories:
\begin{itemize}
\item Basic Functions
\item DSP
\item Memory Interfaces
\item Processors and Peripherals (including a ``Nios'' processor)
\item Memory Interfaces and Controllers
\end{itemize}
There is a sort of ``sub-library'' called ``University Program'' which includes:
\begin{itemize}
\item Audio and Video
\item Bridges
\item Clocks
\item Communication
\item Generic IO
\item Memory
\end{itemize}
The project used this IP:
\begin{enumerate}
\item ALTPLL Intel FPGA IP
\item JTAG to Avalon Master Bridge
\item DRAM Controller Intel FPGA IP
\item External Bus to Avalon Bridge
\item Clock Bridge
\item (Parallel IO) Intel FPGA IP
\end{enumerate}
The above IP can be seen in the column ``Name''. There are two instantiations of the Parallel IO.
In the column ``Connections'' can be seen the graphical interconnections between the IP blocks.
Connections are made by simply clicking on the circles at the intersections of the ``wires'' between the IP.
Thus an entire system can be assembled using this GUI. No writing of Verilog required!
The project does include some hand-written Verilog. This ``Qsys'' design is ``dropped in'' to the project
as a Verilog module.
Here is what the system looks like:
\begin{figure}[h]
\centering
\includegraphics[width=1.0\textwidth]{images/play_gif.png}
\centering\bfseries
\caption{Play\_gif JTAG Interface System Diagram}
\end{figure}
What this diagram does not show is the requirement for a running Quartus and a TCL/JTAG server program.
The ``Avalon Interconnect'' deserves some explanation. This is a system bus used in the MAX10 FPGA. From the Avalon Interface specification:
\begin{quotation}
Avalon® interfaces simplify system design by allowing you to easily connect
components in Intel® FPGA. The Avalon interface family defines interfaces appropriate
for streaming high-speed data, reading and writing registers and memory, and
controlling off-chip devices. Components available in Platform Designer incorporate
these standard interfaces. Additionally, you can incorporate Avalon interfaces in
custom components, enhancing the interoperability of designs.
\end{quotation}
It is an internal bus standard used to connect Avalon bus masters and slaves. So it is a single-click process to connect Avalon components in Platform Designer. It is really amazing what you get for such little effort!
This project is interesting, and shows a path to communication between a desktop computer and the FPGA.
However, the JTAG + Quartus + TCL/JTAG Server is cumbersome. A SPI to Avalon bus IP component is listed in the catalog.
What if the JTAG and development tools could be replaced by something simpler like a SPI bus?
So that is what evolved into my ``introductory FPGA project''. The revised system diagram:
\begin{figure}[h]
\centering
\includegraphics[width=1.0\textwidth]{images/spi_avalon_system}
\centering\bfseries
\caption{Play\_gif with SPI System Diagram}
\end{figure}
The significant change on the FPGA is the swapping of the JTAG Avalon bus master with the SPI-Avalon slave. This was not entirely a drop-in replacement, as there were changes to reset and clock connections in addition to swapping JTAG to SPI components. But it is easy, and the swapping can be done in a couple of minutes.
External to the DE10-Lite board, there is a USB to SPI adapter board. This board is based on the FT232H chip by FTDI. This can be bought from eBay for about \$10. Search for ``ft232 spi'' and you will find several options. I recommend one with headers to allow it to be plugged into a common breadboard. The FTDI device requires a C shared library (libMPSSE) to be installed:
\url{https://www.ftdichip.com/Support/SoftwareExamples/MPSSE/LibMPSSE-SPI.htm}
Other changes required for SPI:
\begin{itemize}
\item The ports on the QSYS module changed (JTAG -> SPI), thus the Verilog module in which it is instantiated required minor changes. This was done with the text editor feature of Quartus.
\item Another change is required to the FPGA pins. The new SPI bus must be routed to some easily accessible header on the DE10-Lite board. Since the board has an Arduino compatible header, and this header has a standard set of four pins for SPI, those pins were used. The details can be seen in the DE10-Lite manual provided by Terasic. The ``Pin Planner'' tool was used to make the changes.
\item The ``Synopsys Design Constraints'' (.sdc) file was updated to incorporate the SPI bus.
\end{itemize}
Here is the rapid-prototype breadboard hook-up:
\begin{figure}[h]
\centering
\includegraphics[width=0.5\textwidth]{images/de10_spi}
\centering\bfseries
\caption{DE10-Lite Connected to FTDI SPI Breakout}
\end{figure}
In spite of the length of the breadboard jumper wires, the SPI interface performed remarkably well right up to the FTDI clock limit of 30 MHz.
|
function [Btu] = Nm2Btu(Nm)
% Convert energy or work from newton-meters to British thermal units.
% Chad A. Greene 2012
Btu = Nm*0.00094781707775; |
<center>
<h1> INF285 - Computación Científica </h1>
<h2> Roots of 1D equations </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.37</h2>
</center>
<div id='toc' />
## Table of Contents
* [Introduction](#intro)
* [Bisection Method](#bisection)
* [Fixed Point Iteration and Cobweb diagram](#fpi)
* [FPI - example from etxtbook](#fpi-textbook-example)
* [Newton Method](#nm)
* [Wilkinson Polynomial](#wilkinson)
* [Acknowledgements](#acknowledgements)
* [Extra Examples](#extraexamples)
```python
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
%matplotlib inline
from ipywidgets import interact
from ipywidgets import widgets
sym.init_printing()
from scipy import optimize
import pandas as pd
pd.set_option("display.colheader_justify","center")
pd.options.display.float_format = '{:.10f}'.format
from colorama import Fore, Back, Style
# https://pypi.org/project/colorama/
# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Style: DIM, NORMAL, BRIGHT, RESET_ALL
textBold = lambda x: Style.BRIGHT+x+Style.RESET_ALL
textBoldH = lambda x: Style.BRIGHT+Back.YELLOW+x+Style.RESET_ALL
```
<div id='intro' />
# Introduction
[Back to TOC](#toc)
In this document we're going to study how to find roots of 1D equations using numerical methods.
First, let's start with the definition of a root:
<b>Definition</b>: The function $f(x)$ has a <b>root</b> in $x = r$ if $f(r) = 0$.
Example: Let's say we want to solve the equation $r + \log(r) = 3$.
We can re-arrange the equation as follows: $r + \log(r) - 3 = 0$.
Thus, solving the previous equation is equivalent to find the root of $f(x) = x + \log(x) - 3$.
This example shows how we can translate an equation into a root-finding problem!
We will study now several numerical methods to find roots.
We will start by defining a function $f(x)$ using a __lambda__ definition.
```python
f = lambda x: x+np.log(x)-3
```
Notice that we have used the NumPy implementation for the logarithmic function. _**Quick question**: what is the base for this implementation? Is it the natural logarithm or logarithm base 10?_
But before we start working on a numerical implementation, we should always consider in solving the problem algebraically.
This can be done with SymPy, i.e. using symbolic computation.
For instance, we will start by defining a symbolic variable:
```python
# Definition of symbolic variable
x = sym.Symbol('x')
# Defining 'symbolic' function
fsym = lambda x: x+sym.log(x)-3
# Finding the root 'symbolically' and obtaining the only root
r=sym.solve(sym.Eq(fsym(x), 0), x)[0]
print(textBoldH('Root obtained:'),r)
print(textBoldH('Numerical root:'),r.evalf())
```
[1m[43mRoot obtained:[0m LambertW(exp(3))
[1m[43mNumerical root:[0m 2.20794003156932
**Lamber W function**: https://en.wikipedia.org/wiki/Lambert_W_function
We will now obtain the root 'manually'.
This is not a recommended path but it is useful initially.
```python
def find_root_manually(r=2.0):
# Defining a vector to evaluate f(x) in a vectorized fashion
x = np.linspace(1,3,1000)
# reating the figure
plt.figure(figsize=(8,8))
# Plotting the function in a vectorized way
plt.plot(x,f(x),'b-')
# Plotting the x-axis.
# Quick question: Why do we have to multiply 'x' by '0'? What would happen if we only put '0' instead of 'x*0'?
plt.plot(x,x*0,'r--')
# Adding the background grid to the plot. We strongly recommend it!
plt.grid(True)
# Just adding labels.
plt.ylabel('$f(x)$',fontsize=16)
plt.xlabel('$x$',fontsize=16)
plt.title('$r='+str(r)+',\, f(r)='+str(f(r))+'$',fontsize=16)
plt.plot(r,f(r),'k.',markersize=20)
plt.show()
interact(find_root_manually,r=(1,3,1e-3))
```
interactive(children=(FloatSlider(value=2.0, description='r', max=3.0, min=1.0, step=0.001), Output()), _dom_c…
<function __main__.find_root_manually(r=2.0)>
<div id='bisection' />
# Bisection Method
[Back to TOC](#toc)
The bisection method finds the root of a function $f$.
It requires that:
1. $f$ be a **continuous** function.
2. The interval $[a,b]$, such that $f(a)\cdot f(b) < 0$.
If these 2 conditions are satisfied, it means that there is a value $r$, between $a$ and $b$, for which $f(r) = 0$.
To summarize how this method works, start with the aforementioned interval (checking that there's a root in it), and split it into two smaller intervals $[a,c]$ and $[c,b]$.
Then, check which of the two intervals contains a root.
Keep splitting each "eligible" interval until the algorithm converges or the tolerance is achived.
```python
def bisect(f, a, b, tol=1e-5, maxNumberIterations=100):
# Evaluating the extreme points of the interval provided
fa = f(a)
fb = f(b)
# Iteration counter.
i = 0
# Just checking if the sign is not negative => not root necessarily
if np.sign(f(a)*f(b)) >= 0:
print('f(a)f(b)<0 not satisfied!')
return None
# Output table to store the numerical evolution of the algorithm
output_table = []
# Main loop: it will iterate until it satisfies one of the two criterias:
# The tolerance 'tol' is achived or the max number of iterations is reached.
while ((b-a)/2 > tol) and i<=maxNumberIterations:
# Obtaining the midpoint of the interval. Quick question: What could happen if a different point is used?
c = (a+b)/2.
# Evaluating the mid point
fc = f(c)
# Saving the output data
output_table.append([i, a, c, b, fa, fc, fb, b-a])
# Did we find the root?
if fc == 0:
print('f(c)==0')
break
elif np.sign(fa*fc) < 0:
# This first case consider that the new inetrval is defined by [a,c]
b = c
fb = fc
else:
# This second case consider that the new interval is defined by [c,b]
a = c
fa = fc
# Increasing the iteration counter
i += 1
# Showing final output table
columns = ['$i$', '$a_i$', '$c_i$', '$b_i$', '$f(a_i)$', '$f(c_i)$', '$f(b_i)$', '$b_i-a_i$']
df = pd.DataFrame(data=output_table, columns=columns)
display(df)
# Computing the best approximation obtaind for the root, which is the midpoint of the final interval.
xc = (a+b)/2.
return xc
```
```python
# Initial example
f1 = lambda x: x+np.log(x)-3
# A different function, notice that x is multiplied to the exponential now and not added, as before.
f2 = lambda x: x*np.exp(x)-3
# This is the introductory example about Fixed Point Iteration
f3 = lambda x: np.cos(x)-x
bisect(f1,1e-10,3) # Recall to change the 'tol'!
```
It's very important to define a concept called **convergence rate**.
This rate shows how fast the convergence of a method is at a specified point.
The convergence rate for the bisection is always 0.5 because this method uses the half of the interval for each iteration.
In this particular case we observe $e_{i+1} \approx \dfrac{e_{i}}{2}$, why? where?
<div id='fpi' />
# Fixed Point Iteration and Cobweb diagram
[Back to TOC](#toc)
To learn about the Fixed-Point Iteration we will first learn about the concept of a Fixed Point.
A Fixed Point of a function $g$ is a real number $r$, where $g(r) = r$
The Fixed-Point Iteration is based in the Fixed Point concept and works like this to find the root of a function:
\begin{align*}
x_{0} &= initial\_guess \\
x_{i+1} &= g(x_{i})
\end{align*}
To find an equation's root using this method, we'll have to rearrange the equation to make it of the following form $x = g(x)$.
For example, if we want to obtain the root of $f(r)=0$, one could add a zero convenient this way, $f(r)+r=r$, i.e. we add it $r$ on both sides.
This way we have $g(r)=r+f(r)$ and the fixed point iteration could be performed.
In the following example, we'll find the root of $f(x) = x - \cos(x)$ by iterating over the funcion $g(x) = \cos(x)$.
```python
# Just plotting the Cobweb diagram: https://en.wikipedia.org/wiki/Cobweb_plot
def cobweb(x,g=None):
min_x = np.amin(x)
max_x = np.amax(x)
plt.figure(figsize=(10,10))
ax = plt.axes()
plt.plot(np.array([min_x,max_x]),np.array([min_x,max_x]),'b-')
for i in np.arange(x.size-1):
delta_x = x[i+1]-x[i]
head_length = np.abs(delta_x)*0.04
arrow_length = delta_x-np.sign(delta_x)*head_length
ax.arrow(x[i], x[i], 0, arrow_length, head_width=1.5*head_length, head_length=head_length, fc='k', ec='k')
ax.arrow(x[i], x[i+1], arrow_length, 0, head_width=1.5*head_length, head_length=head_length, fc='k', ec='k')
if g!=None:
y = np.linspace(min_x,max_x,1000)
plt.plot(y,g(y),'r')
plt.title('Cobweb diagram')
plt.grid(True)
plt.show()
# This code performs the fixed point iteration.
def fpi(g, x0, k, flag_cobweb=False):
# This is where we store all the approximation,
# this is technically not needed but we store them because we need them for the cobweb diagram at the end.
x = np.empty(k+1)
# Just starting the fixed point iteration from the 'initial guess'
x[0] = x0
# Initializing the error in NaN
error_i = np.nan
# Output table to store the numerical evolution of the algorithm
output_table = []
# Main loop
for i in range(k):
# Iteration
x[i+1] = g(x[i])
# Storing error from previous iteration
error_iminus1 = error_i
# Computing error for current iteration.
# Notice that from the theory we need to compute e_i=|x_i-r|, i.e. we need the root 'r'
# but we don't have it, so we approximate it by 'x_{i+1}'.
error_i = abs(x[i]-x[i+1])
output_table.append([i,x[i],x[i+1],error_i,error_i/error_iminus1,error_i/(error_iminus1**((1+np.sqrt(5))/2.)),error_i/(error_iminus1**2)])
# Showing final output table
columns = ['$i$', '$x_i$', '$x_{i+1}$', '$e_i$', r'$\frac{e_i}{e_{i-1}}$', r'$\frac{e_i}{e_{i-1}^\alpha}$', r'$\frac{e_i}{e_{i-1}^2}$']
df = pd.DataFrame(data=output_table, columns=columns)
display(df)
# Just showing cobweb if required
if flag_cobweb:
cobweb(x,g)
return x[-1]
```
```python
# First example
g = lambda x: np.cos(x)
# Examples from classnotes
g1 = lambda x: -(3/2)*x+5/2
g2 = lambda x: -(1/2)*x+3/2
fpi(g, 1.1, 20, True)
# Suggestions:
# 1.- A very useful and simple 'limit cicle' example! Try it. Credit: anonymous student from class 20210922.
# fpi(lambda x: -x, 2, 10, True)
# 2.- Try the next example. Why do we see 1.0000 for e_{i+1}/e_i over 90 iterations?
# fpi(g, 1, 100, True)
# 3.- The following fixed-point iteration obtain sqrt(2). Credits: Anonymous student from class 20210921 and 20210922.
# gD = lambda x: (x+2)/(x+1)
# fpi(gD, 1, 10, True)
```
Let's quickly explain the Cobweb Diagram we have here. The <font color="blue">blue</font> line is the function $y=x$ and the <font color="red">red</font> is the function $y=g(x)$.
The point in which they meet is $r=g(r)$, i.e. the fixed point.
In this particular example, we start at <font color="blue">$y = x = 1.5$</font> (the top right corner) and then we "jump" **vertically** to <font color="red">$y = \cos(1.5) \approx 0.07$</font>.
After this, we jump **horizontally** to <font color="blue">$x = \cos(1.5) \approx 0.07$</font>.
Then, we jump again **vertically** to <font color="red">$y = \cos\left(\cos(1.5)\right) \approx 0.997$</font> and so on.
See the pattern here? We're just iterating over $x = \cos(x)$, getting closer to the center of the diagram where the fixed point resides, in $x \approx 0.739$.
It's very important to mention that the algorithm will converge only if the rate of convergence $S < 1$, where $S = \left| g'(r) \right|$.
If you want to use this method, you'll have to construct $g(x)$ starting from $f(x)$ accordingly.
In this example, $g(x) = \cos(x) \Rightarrow g'(x) = -\sin(x)$ and $|-\sin(0.739)| \approx 0.67$.
**Quick question:** Do you see the value 0.67 in the previous table?
### Another example. Look at this web page to undertand the context: https://divisbyzero.com/2008/12/18/sharkovskys-theorem/amp/?__twitter_impression=true
```python
# Consider this funtion
g = lambda x: -(3/2)*x**2+(11/2)*x-2
# Here we compute the derivative of it.
gp = lambda x: -3*x+11/2
# We plot now the funcion itself (red), its derivative (magenta) and the function y=x (blue).
# We also plot the values -1 and 1 with green dashed curves.
# This analyis shows that the fixed point, which is the intersection between teh red and blue curves,
# does not generate a convergent fix-point-iteration since the derivative (magenta curve) has a value
# lower then -1 about the fized point.
x=np.linspace(2,3,100)
plt.figure(figsize=(8,8))
plt.plot(x,g(x),'r-',label=r'$g(x)$')
plt.plot(x,x,'b-')
plt.plot(x,gp(x),'m-')
plt.plot(x,gp(x)*0+1,'g--')
plt.plot(x,gp(x)*0-1,'g--')
plt.grid(True)
plt.show()
```
What it is interesting about the previous example is that it generates an interesting limit cicle! In the next cell we evaluate the fixed point with initial guess equal to 1. The iteration oscilates generating the following sequence: 1, 2, 3, 1, 2, 3, .... Which is nice!
```python
fpi(g, 1, 12, True)
# Suggestion, try the following alternative.
# fpi(g, 2.5, 100, True)
```
However, we prefer **convergent** fixed-point-iterations! Here is interesting way to make a non-convergent FPI into a convergent one.
```python
# This is a "palta" hidden in the code! Think about it. Quick question: what is it doing?
a=-1/(1-(-1.72)) # a = -1 / (1 - g'(r)), where does "a" come from?
g2 = lambda x: x+a*(x-g(x))
fpi(g2, 1, 14, True)
```
<div id='fpi-textbook-example' />
# FPI - example from textbook
[Back to TOC](#toc)
This example is from the textbook. We are trying to find a root of $f(x)=x^3+x-1$.
```python
# These are the three functions proposed.
g1 = lambda x: 1-x**3
g2 = lambda x: (1-x)**(1/3)
g3 = lambda x: (1+2*x**3)/(1+3*x**2)
# Change the input function to evaluate different functions.
# Are the three functions convergent fixed point iterations?
fpi(g3, 0.75, 10, True)
```
```python
# This is a "hack" to improve the convergence of g2!
a = -0.6
g4 = lambda x: x+a*(x-g2(x))
fpi(g4, 0.75, 10, True)
# Why does this hack works?
```
Now that we have found the root, let's compute the derivative of each $g(x)$ used previously and understand what exactly was going on.
```python
g1p = lambda x: -3*x**2
g2p = lambda x: -(1/3)*(1-x)**(-2/3)
g3p = lambda x: ((1+3*x**2)*(6*x**2)-(1+2*x**3)*6*x)/((1+3*x**2)**2)
g4p = lambda x: 1+a*(1-g2p(x))
r=0.6823278038280194
print('What is the conclusion then?')
print([g1p(r), g2p(r), g3p(r), g4p(r)])
```
What is the conclusion then?
[-1.3967136956303043, -0.7159663452349291, 0.0, -0.029579807140957426]
```python
# Or it may be better to apply the absolute value.
print(np.abs([g1p(r), g2p(r), g3p(r), g4p(r)]))
```
[1.3967137 0.71596635 0. 0.02957981]
<div id='nm' />
# Newton's Method
[Back to TOC](#toc)
The Newton's method also finds a root of a function $f(x)$ but it requires its derivative, i.e. $f'(x)$.
The algorithm is as follows:
\begin{align*}
x_0 &= \text{initial guess},\\
x_{i+1} &= x_i - \dfrac{f(x_i)}{f'(x_i)}.
\end{align*}
For roots with multiplicity equal to 1, Newton's method convergens quadratically. However, when the multiplicity is larger that 1, it will show linear convergence. Fortunately, we can modify Newton's method if we know the multiplicity of the root, say $m$, this is as follows:
\begin{align*}
x_0 &= \text{initial guess},\\
x_{i+1} &= x_i - m\,\dfrac{f(x_i)}{f'(x_i)}.
\end{align*}
This modified version will also show quadratic convergence!
```python
def newton_method(f, fp, x0, rel_error=1e-8, m=1, maxNumberIterations=100):
#Initialization of hybrid error and absolute
hybrid_error = 100
error_i = np.inf
#print('i | x_i | x_{i+1} | |x_{i+1}-x_i| | e_{i+1}/e_i | e_{i+1}/e_i^2')
#print('----------------------------------------------------------------------------------------')
# Output table to store the numerical evolution of the algorithm
output_table = []
#Iteration counter
i = 0
while (hybrid_error > rel_error and hybrid_error < 1e12 and i<=maxNumberIterations):
#Newton's iteration
x1 = x0-m*f(x0)/fp(x0)
#Checking if root was found
if f(x1) == 0.0:
hybrid_error = 0.0
break
#Computation of hybrid error
hybrid_error = abs(x1-x0)/np.max([abs(x1),1e-12])
#Computation of absolute error
error_iminus1 = error_i
error_i = abs(x1-x0)
# Storing output data
output_table.append([i,x0,x1,error_i,error_i/error_iminus1,error_i/(error_iminus1**((1+np.sqrt(5))/2.)),error_i/(error_iminus1**2)])
#Updating solution
x0 = x1
#Increasing iteration counter
i += 1
# Showing final output table
columns = ['$i$', '$x_i$', '$x_{i+1}$', '$e_i$', r'$\frac{e_i}{e_{i-1}}$', r'$\frac{e_i}{e_{i-1}^\alpha}$', r'$\frac{e_i}{e_{i-1}^2}$']
df = pd.DataFrame(data=output_table, columns=columns)
display(df)
#Checking if solution was obtained
if hybrid_error < rel_error:
return x1
elif i>=maxNumberIterations:
print('Newton''s Method did not converge. Too many iterations!!')
return None
else:
print('Newton''s Method did not converge!')
return None
```
First example, let's compute a root of $\sin(x)$, near $x_0=3.1$.
```python
# Example funtion
f = lambda x: np.sin(x)
# The derivative of f
fp = lambda x: np.cos(x)
newton_method(f, fp, 3.1,rel_error=1e-15)
```
Now, we will look at the example when Newton's method shows linear convergence.
```python
f = lambda x: x**2
fp = lambda x: 2*x # the derivative of f
newton_method(f, fp, 3.1, rel_error=1e-1, m=1, maxNumberIterations=10)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: center;">
<th></th>
<th>$i$</th>
<th>$x_i$</th>
<th>$x_{i+1}$</th>
<th>$e_i$</th>
<th>$\frac{e_i}{e_{i-1}}$</th>
<th>$\frac{e_i}{e_{i-1}^\alpha}$</th>
<th>$\frac{e_i}{e_{i-1}^2}$</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>3.1000000000</td>
<td>1.5500000000</td>
<td>1.5500000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>1.5500000000</td>
<td>0.7750000000</td>
<td>0.7750000000</td>
<td>0.5000000000</td>
<td>0.3813629916</td>
<td>0.3225806452</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>0.7750000000</td>
<td>0.3875000000</td>
<td>0.3875000000</td>
<td>0.5000000000</td>
<td>0.5853091517</td>
<td>0.6451612903</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>0.3875000000</td>
<td>0.1937500000</td>
<td>0.1937500000</td>
<td>0.5000000000</td>
<td>0.8983220991</td>
<td>1.2903225806</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>0.1937500000</td>
<td>0.0968750000</td>
<td>0.0968750000</td>
<td>0.5000000000</td>
<td>1.3787288159</td>
<td>2.5806451613</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>0.0968750000</td>
<td>0.0484375000</td>
<td>0.0484375000</td>
<td>0.5000000000</td>
<td>2.1160485195</td>
<td>5.1612903226</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>0.0484375000</td>
<td>0.0242187500</td>
<td>0.0242187500</td>
<td>0.5000000000</td>
<td>3.2476737160</td>
<td>10.3225806452</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>0.0242187500</td>
<td>0.0121093750</td>
<td>0.0121093750</td>
<td>0.5000000000</td>
<td>4.9844719855</td>
<td>20.6451612903</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>0.0121093750</td>
<td>0.0060546875</td>
<td>0.0060546875</td>
<td>0.5000000000</td>
<td>7.6500791480</td>
<td>41.2903225806</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>0.0060546875</td>
<td>0.0030273438</td>
<td>0.0030273438</td>
<td>0.5000000000</td>
<td>11.7412057168</td>
<td>82.5806451613</td>
</tr>
<tr>
<th>10</th>
<td>10</td>
<td>0.0030273438</td>
<td>0.0015136719</td>
<td>0.0015136719</td>
<td>0.5000000000</td>
<td>18.0201941726</td>
<td>165.1612903226</td>
</tr>
</tbody>
</table>
</div>
Newtons Method did not converge. Too many iterations!!
So, in the previous example Newton's method showed linear convergence.
But, how can we uss its outcome to improve the convergence?
This can fixed by understanding the following facts:
1. Linear convergence definition: $e_{i+1}/e_i=S$
2. Linear convergence exhibit by Newton's method when the root has multiplicity greater than 1: $S=(m-1)/m$
Connecting the two previous two facts we get,
$$e_{i+1}/e_i=(m-1)/m$$.
From the table we obtain that $e_{i+1}/e_i\approx 0.5$, this implies the following equation,
$$0.5=(m-1)/m.$$
Solving for $m$ we get $m=2$.
Knowing this is very useful because we can use it with the Newton's method and recover its quadratic convergence!
<div id='wilkinson' />
# Wilkinson Polynomial
[Back to TOC](#toc)
https://en.wikipedia.org/wiki/Wilkinson%27s_polynomial
**Final question: Why is the root far far away from $16$?**
```python
x = sym.symbols('x', reals=True)
W=1
for i in np.arange(1,21):
W*=(x-i)
W # Printing W nicely
```
```python
# Expanding the Wilkinson polynomial
We=sym.expand(W)
We
```
```python
# Just computiong the derivative
Wep=sym.diff(We,x)
Wep
```
```python
# Lamdifying the polynomial to be used with sympy
P=sym.lambdify(x,We)
Pp=sym.lambdify(x,Wep)
```
```python
# Using scipy function to compute a root
root = optimize.newton(P,16)
print(root)
```
```python
newton_method(P, Pp, 16.01, rel_error=1e-10, maxNumberIterations=10)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: center;">
<th></th>
<th>$i$</th>
<th>$x_i$</th>
<th>$x_{i+1}$</th>
<th>$e_i$</th>
<th>$\frac{e_i}{e_{i-1}}$</th>
<th>$\frac{e_i}{e_{i-1}^\alpha}$</th>
<th>$\frac{e_i}{e_{i-1}^2}$</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>16.0100000000</td>
<td>16.0425006915</td>
<td>0.0325006915</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>16.0425006915</td>
<td>16.0050204647</td>
<td>0.0374802268</td>
<td>1.1532132093</td>
<td>9.5854070309</td>
<td>35.4827283637</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>16.0050204647</td>
<td>16.0078597186</td>
<td>0.0028392538</td>
<td>0.0757533795</td>
<td>0.5765549711</td>
<td>2.0211558458</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>16.0078597186</td>
<td>15.9851271041</td>
<td>0.0227326145</td>
<td>8.0065452981</td>
<td>300.2282246866</td>
<td>2819.9469801818</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>15.9851271041</td>
<td>16.0029892675</td>
<td>0.0178621634</td>
<td>0.7857505074</td>
<td>8.1457523850</td>
<td>34.5648982597</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>16.0029892675</td>
<td>16.0136315293</td>
<td>0.0106422618</td>
<td>0.5957991522</td>
<td>7.1690902557</td>
<td>33.3553746866</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>16.0136315293</td>
<td>16.0001980846</td>
<td>0.0134334447</td>
<td>1.2622734683</td>
<td>20.9176811505</td>
<td>118.6095110764</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>16.0001980846</td>
<td>16.0178167527</td>
<td>0.0176186681</td>
<td>1.3115525115</td>
<td>18.8204280027</td>
<td>97.6333725295</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>16.0178167527</td>
<td>16.0287977529</td>
<td>0.0109810002</td>
<td>0.6232593802</td>
<td>7.5634001878</td>
<td>35.3749429376</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>16.0287977529</td>
<td>16.0248062796</td>
<td>0.0039914733</td>
<td>0.3634890493</td>
<td>5.9080101682</td>
<td>33.1016340099</td>
</tr>
<tr>
<th>10</th>
<td>10</td>
<td>16.0248062796</td>
<td>15.9972551707</td>
<td>0.0275511088</td>
<td>6.9024910407</td>
<td>209.6945048216</td>
<td>1729.3090772511</td>
</tr>
</tbody>
</table>
</div>
Newtons Method did not converge. Too many iterations!!
<div id='acknowledgements' />
# Acknowledgements
[Back to TOC](#toc)
* _Material created by professor Claudio Torres_ (`[email protected]`) _and assistants: Laura Bermeo, Alvaro Salinas, Axel Simonsen and Martín Villanueva. DI UTFSM. March 2016._ v1.1.
* _Update April 2020 - v1.32 - C.Torres_ : Re-ordering the notebook.
* _Update April 2021 - v1.33 - C.Torres_ : Updating format and re-re-ordering the notebook. Adding 'maxNumberIterations' to bisection, fpi and Newton's method. Adding more explanations.
* _Update April 2021 - v1.33 - C.Torres_ : Updating description and solution of 'Proposed classwork'.
* _Update September 2021 - v1.35 - C.Torres_ : Updating and commeting code more.
* _Update September 2021 - v1.36 - C.Torres_ : Updating the way we show the output tables.
* _Update September 2021 - v1.37 - C.Torres_ : Fixing typo suggested by Nicolás Tapia 2021-2. Thanks Nicolás! And removing extra code.
<div id='extraexamples' />
# Extra examples
[Back to TOC](#toc)
## Propose Classwork
1. Build a FPI such that given $a$ computes $\displaystyle \frac{1}{a}$. The constraint is that you can't use a division in the 'final' FPI. Write down your solution below or go and see the [solution](#sol1).
_Hint: I strongly suggest to use Newton's method._
```python
print('Please try to solve it before you see the solution!!!')
```
Please try to solve it before you see the solution!!!
2. Build an algorithm that computes $\log(x_i)$ for $x_i=0.1*i+0.5$, for $i\in{0,1,2,\dots,10}$. The only special function available is $\exp(x)$, in particular use _np.exp(x)_. You can also use $*$, $÷$, $+$, and $-$. It would be nice to use the result from previous example to replace $÷$.
# In class
Which function shows quadratic convergence? Why?
```python
g1 = lambda x: (4./5.)*x+1./x
g2 = lambda x: x/2.+5./(2*x)
g3 = lambda x: (x+5.)/(x+1)
fpi(g1, 3.0, 10, True)
```
### Building a FPI to compute the cubic root of 7
```python
# What is 'a'? Can we find another 'a'?
a = -3*(1.7**2)
print(a)
```
-8.669999999999998
```python
f = lambda x: x**3-7
g = lambda x: f(x)/a+x
r=fpi(g, 1.7, 14, True)
print(f(r))
```
### Playing with some roots
The following example proposed a particular function $f(x)$.
The idea here is first obtain an initial guess for applying the Newton's method from the plot in semilogy scale.
The plot of $f(x)$ (blue) shows that there seems to be 2 roots in the interval plotted.
Now, the plot of $f'(x)$ (magenta) indicates that the derivative may also have a 0 together with a root, this means that the multiplicity of that root may be higher than 1. **Do you see it?**
```python
f = lambda x: 8*x**4-12*x**3+6*x**2-x
fp = lambda x: 32*x**3-36*x**2+12*x-1
x = np.linspace(-1,1,10000)
plt.figure(figsize=(10,10))
plt.title('What are we seeing with the semiloigy plot? Is this function differentiable?')
plt.semilogy(x,np.abs(f(x)),'b-',label=r'$|f(x)|$')
plt.semilogy(x,np.abs(fp(x)),'m-',label=r'$|fp(x)|$')
plt.grid()
plt.legend()
plt.xlabel(r'$x$',fontsize=16)
plt.show()
```
```python
r=newton_method(f, fp, 0.4, rel_error=1e-8, m=1)
print([r,f(r)])
# Is this showing quadratic convergence? If not, can you fix it?
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: center;">
<th></th>
<th>$i$</th>
<th>$x_i$</th>
<th>$x_{i+1}$</th>
<th>$e_i$</th>
<th>$\frac{e_i}{e_{i-1}}$</th>
<th>$\frac{e_i}{e_{i-1}^\alpha}$</th>
<th>$\frac{e_i}{e_{i-1}^2}$</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0.4000000000</td>
<td>0.4363636364</td>
<td>0.0363636364</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>0.4363636364</td>
<td>0.4586595886</td>
<td>0.0222959522</td>
<td>0.6131386861</td>
<td>4.7546137810</td>
<td>16.8613138686</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>0.4586595886</td>
<td>0.4728665654</td>
<td>0.0142069768</td>
<td>0.6371998210</td>
<td>6.6854108733</td>
<td>28.5791705436</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>0.4728665654</td>
<td>0.4820874099</td>
<td>0.0092208445</td>
<td>0.6490363622</td>
<td>8.9967528627</td>
<td>45.6843403691</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>0.4820874099</td>
<td>0.4881331524</td>
<td>0.0060457425</td>
<td>0.6556603932</td>
<td>11.8718891457</td>
<td>71.1063278311</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>0.4881331524</td>
<td>0.4921210847</td>
<td>0.0039879323</td>
<td>0.6596265522</td>
<td>15.5037819584</td>
<td>109.1059613639</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>0.4921210847</td>
<td>0.4947614808</td>
<td>0.0026403961</td>
<td>0.6620965214</td>
<td>20.1252241907</td>
<td>166.0250150478</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>0.4947614808</td>
<td>0.4965138385</td>
<td>0.0017523577</td>
<td>0.6636722755</td>
<td>26.0285404605</td>
<td>251.3533000745</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>0.4965138385</td>
<td>0.4976786184</td>
<td>0.0011647799</td>
<td>0.6646930088</td>
<td>33.5858180781</td>
<td>379.3135449254</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>0.4976786184</td>
<td>0.4984536173</td>
<td>0.0007749988</td>
<td>0.6653607474</td>
<td>43.2731170072</td>
<td>571.2330225492</td>
</tr>
<tr>
<th>10</th>
<td>10</td>
<td>0.4984536173</td>
<td>0.4989696118</td>
<td>0.0005159945</td>
<td>0.6658003716</td>
<td>55.7008361948</td>
<td>859.0985504567</td>
</tr>
<tr>
<th>11</th>
<td>11</td>
<td>0.4989696118</td>
<td>0.4993133111</td>
<td>0.0003436993</td>
<td>0.6660910430</td>
<td>71.6523877204</td>
<td>1290.8878511100</td>
</tr>
<tr>
<th>12</th>
<td>12</td>
<td>0.4993133111</td>
<td>0.4995423124</td>
<td>0.0002290013</td>
<td>0.6662837900</td>
<td>92.1337096817</td>
<td>1938.5659202606</td>
</tr>
<tr>
<th>13</th>
<td>13</td>
<td>0.4995423124</td>
<td>0.4996949214</td>
<td>0.0001526090</td>
<td>0.6664112926</td>
<td>118.4366832979</td>
<td>2910.0766376211</td>
</tr>
<tr>
<th>14</th>
<td>14</td>
<td>0.4996949214</td>
<td>0.4997966348</td>
<td>0.0001017134</td>
<td>0.6664965150</td>
<td>152.2211476726</td>
<td>4367.3461398087</td>
</tr>
<tr>
<th>15</th>
<td>15</td>
<td>0.4997966348</td>
<td>0.4998644327</td>
<td>0.0000677979</td>
<td>0.6665584076</td>
<td>195.6204335210</td>
<td>6553.3001358038</td>
</tr>
<tr>
<th>16</th>
<td>16</td>
<td>0.4998644327</td>
<td>0.4999096253</td>
<td>0.0000451926</td>
<td>0.6665778509</td>
<td>251.3627148814</td>
<td>9831.8335188516</td>
</tr>
<tr>
<th>17</th>
<td>17</td>
<td>0.4999096253</td>
<td>0.4999397542</td>
<td>0.0000301289</td>
<td>0.6666774378</td>
<td>323.0218359853</td>
<td>14751.9189004836</td>
</tr>
<tr>
<th>18</th>
<td>18</td>
<td>0.4999397542</td>
<td>0.4999598336</td>
<td>0.0000200794</td>
<td>0.6664506601</td>
<td>414.8682245216</td>
<td>22119.9939244004</td>
</tr>
<tr>
<th>19</th>
<td>19</td>
<td>0.4999598336</td>
<td>0.4999732282</td>
<td>0.0000133946</td>
<td>0.6670815682</td>
<td>533.6278356649</td>
<td>33222.1656531949</td>
</tr>
<tr>
<th>20</th>
<td>20</td>
<td>0.4999732282</td>
<td>0.4999821551</td>
<td>0.0000089268</td>
<td>0.6664505480</td>
<td>684.6850645121</td>
<td>49755.1439166074</td>
</tr>
<tr>
<th>21</th>
<td>21</td>
<td>0.4999821551</td>
<td>0.4999881259</td>
<td>0.0000059708</td>
<td>0.6688577677</td>
<td>883.0272572730</td>
<td>74926.5788229439</td>
</tr>
<tr>
<th>22</th>
<td>22</td>
<td>0.4999881259</td>
<td>0.4999921287</td>
<td>0.0000040028</td>
<td>0.6704027562</td>
<td>1134.8168859231</td>
<td>112280.4498807474</td>
</tr>
<tr>
<th>23</th>
<td>23</td>
<td>0.4999921287</td>
<td>0.4999947419</td>
<td>0.0000026133</td>
<td>0.6528511548</td>
<td>1414.9287092325</td>
<td>163097.2875577784</td>
</tr>
<tr>
<th>24</th>
<td>24</td>
<td>0.4999947419</td>
<td>0.4999967498</td>
<td>0.0000020079</td>
<td>0.7683392122</td>
<td>2167.3273576456</td>
<td>294016.2831700209</td>
</tr>
</tbody>
</table>
</div>
[0.49999718771149326, 0.0]
<div id='sol1' />
## Solutions
Problem: Build a FPI such that given $a$ computes $\displaystyle \frac{1}{a}$
```python
# We are finding the 1/a
# Solution code:
a = 2.1
g = lambda x: 2*x-a*x**2
gp = lambda x: 2-2*a*x
r=fpi(g, 0.7, 7, flag_cobweb=False)
print('Reciprocal found :',r)
print('Reciprocal computed explicitly: ', 1/a)
# Are we seeing quadratic convergence?
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: center;">
<th></th>
<th>$i$</th>
<th>$x_i$</th>
<th>$x_{i+1}$</th>
<th>$e_i$</th>
<th>$\frac{e_i}{e_{i-1}}$</th>
<th>$\frac{e_i}{e_{i-1}^\alpha}$</th>
<th>$\frac{e_i}{e_{i-1}^2}$</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0.7000000000</td>
<td>0.3710000000</td>
<td>0.3290000000</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>0.3710000000</td>
<td>0.4529539000</td>
<td>0.0819539000</td>
<td>0.2491000000</td>
<td>0.4951799738</td>
<td>0.7571428571</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>0.4529539000</td>
<td>0.4750566054</td>
<td>0.0221027054</td>
<td>0.2696968100</td>
<td>1.2656881308</td>
<td>3.2908355795</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>0.4750566054</td>
<td>0.4761877763</td>
<td>0.0011311709</td>
<td>0.0511779387</td>
<td>0.5398483531</td>
<td>2.3154603813</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>0.4761877763</td>
<td>0.4761904762</td>
<td>0.0000026999</td>
<td>0.0023867984</td>
<td>0.1580649031</td>
<td>2.1100246103</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>0.4761904762</td>
<td>0.4761904762</td>
<td>0.0000000000</td>
<td>0.0000056698</td>
<td>0.0156742306</td>
<td>2.1000206476</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>0.4761904762</td>
<td>0.4761904762</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
<td>0.0000000000</td>
</tr>
</tbody>
</table>
</div>
Reciprocal found : 0.47619047619047616
Reciprocal computed explicitly: 0.47619047619047616
### What is this plot telling us?
This plots shows that, even if we don't know the exact value of $g'(r)$, we can determine if the FPI will convergan by looking at the plot.
In this plot we observe that when plotting $g'(x)$ (magenta), we can determine that the value of $|g'(r)|$ will be less than 1 since it is between the black lines, that are located at $y=-1$ and $y=1$.
```python
xx=np.linspace(0.2,0.8,1000)
plt.figure(figsize=(10,10))
plt.plot(xx,g(xx),'r-',label=r'$g(x)$')
plt.plot(xx,gp(xx),'m-',label=r'$gp(x)$')
plt.plot(xx,xx,'b-',label=r'$x$')
plt.plot(xx,0*xx+1,'k--')
plt.plot(xx,0*xx-1,'k--')
plt.legend(loc='best')
plt.grid()
plt.show()
```
```python
```
|
(*******************************************************************)
(* This is part of RelationAlgebra, it is distributed under the *)
(* terms of the GNU Lesser General Public License version 3 *)
(* (see file LICENSE for more details) *)
(* *)
(* Copyright 2012: Damien Pous. (CNRS, LIP - ENS Lyon, UMR 5668) *)
(*******************************************************************)
(** * positives: basic facts about binary positive numbers *)
Require Export BinNums.
Require Import comparisons.
(** positives as a [cmpType] *)
Fixpoint eqb_pos i j :=
match i,j with
| xH,xH => true
| xI i,xI j | xO i, xO j => eqb_pos i j
| _,_ => false
end.
Lemma eqb_pos_spec: forall i j, reflect (i=j) (eqb_pos i j).
Proof. induction i; intros [j|j|]; simpl; (try case IHi); constructor; congruence. Qed.
Fixpoint pos_compare i j :=
match i,j with
| xH, xH => Eq
| xO i, xO j | xI i, xI j => pos_compare i j
| xH, _ => Lt
| _, xH => Gt
| xO _, _ => Lt
| _,_ => Gt
end.
Lemma pos_compare_spec: forall i j, compare_spec (i=j) (pos_compare i j).
Proof. induction i; destruct j; simpl; try case IHi; try constructor; congruence. Qed.
Canonical Structure cmp_pos := mk_cmp _ eqb_pos_spec _ pos_compare_spec.
(** positive maps (for making environments) *)
(** we redefine such trees here rather than importing them from the standard library:
since we do not need any proof about them, this avoids us a heavy Require Import *)
Section e.
Variable A: Type.
Inductive sigma := sigma_empty | N(l: sigma)(o: option A)(r: sigma).
Fixpoint sigma_get default m i :=
match m with
| N l o r =>
match i with
| xH => match o with None => default | Some a => a end
| xO i => sigma_get default l i
| xI i => sigma_get default r i
end
| _ => default
end.
Fixpoint sigma_add i v m :=
match m with
| sigma_empty =>
match i with
| xH => N sigma_empty (Some v) sigma_empty
| xO i => N (sigma_add i v sigma_empty) None sigma_empty
| xI i => N sigma_empty None (sigma_add i v sigma_empty)
end
| N l o r =>
match i with
| xH => N l (Some v) r
| xO i => N (sigma_add i v l) o r
| xI i => N l o (sigma_add i v r)
end
end.
End e.
|
'# -*- coding: utf-8 -*-'
import numpy as np
class EuropeanCallOption:
def __init__(self, tau, strike):
self.tau = tau
self.strike = strike
def __call__(self, forward):
return np.maximum(forward - self.strike, 0)
def __str__(self):
out_str = f"tau: {self.tau}\n\r" +\
f"strike: {self.strike}\n\r"
return out_str
|
Ramon isn't currently collecting unemployment benefits. Ann didn't see Murph anymore. Did you leave a tip? Duane smells horrible. Do we need more inflation? Look out for my friend Rolfe. Abel was killed by Cain. I refused her invitation to dinner. I thought Thomas would get fired. Were you the one who advised Al to go to the police?
He is all but dead. I need coloured pencils. Caroline is on the basketball team. Your tea will get cold if you don't drink it soon. In those days, he lived in the house alone. Jacobson is breathing hard. Wait till Tao gets here. The fawn bolted from its hiding place. Louiqa banged his head. Most people have ups and downs in their marriages.
A swine sees no sky. Success is never blamed. Angela begged me to help him. What is all that? Sit down for a second. Why am I up? I'll buy that back from you if that's what you want. Caroline became calm. I went for a swim. Don't tell me what I'm supposed to do.
Making model spaceships is interesting. Anton picked up takeout on his way home. One of my six Facebook accounts was suspended. Aaron opened the door and turned on the light. This dress shrank, and what's more it faded. Chuck tore up the letter he got from Sigurd. I think we were lied to. Don't you have a driver's license? Alberto asked the DJ for a slow song. Just follow my lead.
We're chopping off their head. You should learn Esperanto. Whatever! Knapper couldn't completely rule out the possibility that he might be laid off from work. You have to pay taxes. I went to elementary school in Nagoya. Sherri ambushed a policeman and killed him with an ax. I sometimes get scared. He is still angry with you for your conduct. I had to tell him about us.
Your plan sounds great. What a bad film! Who's your favorite horror movie character? I hope you can still look at yourself in the mirror my friend. Juan is in great shape. You can have it for nothing. She went with him. It's not safe at night around here. They say Pontus did it. The leaves blew off.
It's probably healthier to eat popcorn than it is to eat potato chips. She is more beautiful than you think. We should do this more often. He looks young. He cannot be older than I. I was really, really disappointed. This city is called the Japanese Denmark. Can you put the children to bed? You're going to love it. Once again she was lived in and taken care of. The doctor painted Gale's throat with iodine.
We all love you. Pat leaned back and smiled. Outside, the storm was rumbling. Lucy witnessed a murder in the main square of Florence. How could anything be worse than this? Wendy seems to be afraid of something. He has a son. Tolerant saw Kamel coming towards him. How long have you guys been standing there? He's partially right.
I told you I needed some air. Many of the students were tired. I did everything in my power to protect her from you. It will never happen again. You'd be stupid to trust him. Who's had enough? Bill used to mow lawns as a part time job. Who should we give it to? I'm not asking for money. It is time to empty the garbage.
He has got it. I want to eat Chinese food. Would you accept a Brazilian coffee? She was very surprised at the sight. We're always learning. There's no leash law here. Over 68 percent of Earth's freshwater is locked up in ice and glaciers; and another 30 percent is in groundwater. He likes sweets. Gordon is the last person I want to see now. Ken is too busy to come. |
######################################################################
# pairs(A) is the set of all pairs (a,b) in A^2 with a <> b
`is_element/pairs` := (A::set) -> proc(ab)
type(ab,list) and nops(ab) = 2 and
member(ab[1],A) and member(ab[2],A) and ab[1] <> ab[2];
end;
`is_equal/pairs` := (A::set) -> (ab,cd) -> evalb(ab = cd):
`is_leq/pairs` := NULL;
`random_element/pairs` := (A::set) -> proc()
local n,r,i,j;
n := nops(A);
if n < 2 then return FAIL; fi;
i := rand(1..n)();
j := rand(1..n-1)();
if j >= i then j := j+1; fi;
return [A[i],A[j]];
end;
`list_elements/pairs` := proc(A::set)
local n,i,j;
n := nops(A);
[seq(op([seq([A[i],A[j]],j=1..i-1),seq([A[i],A[j]],j=i+1..n)]),i=1..n)];
end:
`count_elements/pairs` := (A::set) -> nops(A) * (nops(A)-1);
|
The norm of a complex number is nonnegative. |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <functional>
#include <iostream>
#include <memory>
#include <thread>
#include <vector>
#include <boost/asio.hpp>
#include <boost/beast/http.hpp>
#include "context.h"
#include "session.h"
#include "listener.h"
#include "http_server.h"
namespace http = boost::beast::http; // from <boost/beast/http.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace onnxruntime {
namespace server {
App::App() {
http_details.address = boost::asio::ip::make_address_v4("0.0.0.0");
http_details.port = 8001;
http_details.threads = std::thread::hardware_concurrency();
}
App& App::Bind(net::ip::address address, unsigned short port) {
http_details.address = std::move(address);
http_details.port = port;
return *this;
}
App& App::NumThreads(int threads) {
http_details.threads = threads;
return *this;
}
App& App::RegisterStartup(const StartFn& on_start) {
on_start_ = on_start;
return *this;
}
App& App::RegisterPost(const std::string& route, const HandlerFn& fn) {
routes_.RegisterController(http::verb::post, route, fn);
return *this;
}
App& App::RegisterError(const ErrorFn& fn) {
routes_.RegisterErrorCallback(fn);
return *this;
}
App& App::Run() {
net::io_context ioc{http_details.threads};
// Create and launch a listening port
auto listener = std::make_shared<Listener>(routes_, ioc, tcp::endpoint{http_details.address, http_details.port});
auto initialized = listener->Init();
if (!initialized) {
exit(EXIT_FAILURE);
}
auto started = listener->Run();
if (!started) {
exit(EXIT_FAILURE);
}
// Run user on_start function
on_start_(http_details);
// Run the I/O service on the requested number of threads
std::vector<std::thread> v;
v.reserve(http_details.threads - 1);
for (auto i = http_details.threads - 1; i > 0; --i) {
v.emplace_back(
[&ioc] {
ioc.run();
});
}
ioc.run();
return *this;
}
} // namespace server
} // namespace onnxruntime
|
/-
2006 STEP 3 Question 8
-/
import data.polynomial
data.real.basic
tactic
open polynomial
/-
Δ is a function takes takes polynomials in x to polynomials in x; that is, given
any polynomial h(x), there is a polynomial called Δh(x) which is obtained from
h(x) using the rules that define Δ.
-/
variable Δ : polynomial ℝ → polynomial ℝ
include Δ
/-
These rules are as follows
-/
variable Δ1 : Δ X = C 1
variable Δ2 : ∀ (f g : polynomial ℝ), Δ(f + g) = Δ f + Δ g
variable Δ3 : ∀ (k : ℝ) (f : polynomial ℝ), Δ(C k * f) = C k * Δ f
variable Δ4 : ∀ (f g : polynomial ℝ), Δ(f * g) = f * Δ g + g * Δ f
include Δ1 Δ2 Δ3 Δ4
/-
Using these rules show that, if f(x) is a polynomial of degree zero (that is, a
constant), then Δ f(x) = 0.
-/
-- First, show that Δ 1 is 0
lemma Δ_one : Δ (C 1) = 0 := begin
-- If we can prove that Δ 1 + Δ 1 = Δ 1, then Δ 1 = 0 follows
-- Therefore we have a proof of Δ 1 = 0 if we have a proof of Δ 1 + Δ 1 = Δ 1
suffices H : Δ 1 + Δ 1 = Δ 1,
rwa add_left_eq_self at H,
-- Δ 1 + Δ 1 = Δ (1 * 1)
conv begin to_rhs,
rw (show (1 : polynomial ℝ) = 1 * 1, by ring),
end,
-- By rule 4, this expands
rw Δ4,
-- This then simplifies down to our desired result
ring,
end
-- Having shown that Δ 1 is 0, Δ c (for c ∈ ℝ), is also 0
lemma Δ_const (a : ℝ) : Δ (C a) = 0 := begin
-- Δ c = Δ (c * 1)
rw (show (C a) = (C a) * (C 1), by rw [<-C_mul,mul_one]),
-- By rule 3 we can expand this
rw Δ3,
-- We know from above that Δ 1 is 0
rw Δ_one Δ, simp,
-- Therefore Δ c must be 0
repeat {assumption}
end
/-
Calculate Δx^2 and Δx^3
-/
lemma ΔXsquared : Δ (X^2) = 2*X := begin
-- Δ (x^2) = Δ (x*x)
rw pow_two,
-- By rule 4, this is the same as X * Δ X + X * Δ X
rw Δ4,
-- By rule 1, this is just X * 1 + X * 1
rw Δ1,
-- Which is 2*X
rw mul_comm,
rw <-add_mul,
norm_num,
end
lemma ΔXcubed : Δ (X^3) = 3*X^2 := begin
-- Δ (x^3) = Δ (x * x^2)
rw (show (X:polynomial ℝ)^3 = X * X^2, by ring),
-- Use rule 4 to expand this
rw Δ4,
-- Then use the result for Δx^2
rw ΔXsquared Δ, rw <-mul_assoc, rw Δ1, rw mul_comm, rw <-mul_assoc,
rw <-pow_two, rw <-mul_add, rw mul_comm, refl,
repeat {assumption},
end
/-
Prove that Δh(x) ≡ dh(x)/dx for any polynomial h(x). You should make it clear
whenever you use one of the above rules in your proof.
-/
lemma ΔXn (n : ℕ) : Δ (X^(n+1)) = C (n+1)*X^n := begin
induction n with d hd,
{ -- base case
rw [pow_one, Δ1, nat.cast_zero],
simp,
},
rw [nat.succ_eq_add_one, pow_succ, Δ4, Δ1, hd, pow_succ,
nat.cast_add, nat.cast_one],
rw (show C (1:ℝ) = 1, by simp),
simp,
ring,
end
lemma Δ_is_derivative (p : polynomial ℝ) : Δ p = derivative p :=
begin
apply p.induction_on,
{ intro a, rw Δ_const Δ, simp, repeat {assumption}},
{ intros p q hp hq, rw Δ2, rw hp, rw hq, simp,},
intros a n IH,
rw [Δ4, Δ_const Δ, ΔXn Δ, mul_zero, add_zero, derivative_monomial, <-mul_assoc],
simp,
repeat {assumption},
end |
\section{Further work}
Extend raytracer with intersect programs for geometry that is better suited for raytracing than rasterization. Examples:
\begin{itemize}
\item NURBS
\item Meta shapes
\item Distance fields
\end{itemize} |
/**
* @file libhades.c
* @author Michael Hartmann <[email protected]>
* @date February, 2016
* @brief library to access low-level LAPACK functions
*/
#include <cblas.h>
#include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <libhades.h>
#include <libhades/parse_npy_dict.h>
/** \defgroup misc miscellaneous functions
* @{
*/
/** @brief malloc wrapper
*
* This function uses malloc to allocate size bytes of memory and returns a
* pointer to the memory. If an error occures, an error message is printed to
* stderr and the program is aborted.
*
* @param [in] size amount of memory to allocate
* @retval ptr pointer to the allocated memory
*/
void *libhades_malloc(size_t size)
{
void *ptr = malloc(size);
if(ptr == NULL)
{
const int err = errno;
fprintf(stderr, "malloc can't allocate %zu bytes of memory: %s (%d)\n", size, strerror(err), err);
abort();
}
return ptr;
}
/** @brief free wrapper
*
* This function frees the memory allocated by \ref libhades_malloc or \ref
* libhades_realloc (or malloc and realloc). If the pointer given is NULL, an error is
* printed to stderr and the program is aborted.
*
* @param [in] ptr pointer to the memory that should be freed
*/
void libhades_free(void *ptr)
{
if(ptr == NULL)
{
fprintf(stderr, "Trying to free NULL pointer\n");
abort();
}
free(ptr);
}
/** @brief realloc wrapper
*
* This function changes the size of the memory block pointed to by ptr to size
* bytes. If ptr is NULL, this function behaves like \ref libhades_malloc. If
* an error occures, an error is printed to stderr and the program is aboprted.
*
* @param [in] ptr pointer to the memory block
* @param [in] size size of the memory block
* @retval ptr_new pointer to the new memory block
*/
void *libhades_realloc(void *ptr, size_t size)
{
void *ptr_new = realloc(ptr, size);
if(ptr_new == NULL)
{
int err = errno;
fprintf(stderr, "realloc can't allocate %zu bytes of memory: %s (%d)\n", size, strerror(err), err);
abort();
}
return ptr_new;
}
static void *(*malloc_cb)(size_t) = &libhades_malloc;
static void *(*realloc_cb)(void *, size_t) = &libhades_realloc;
static void (*free_cb)(void *) = &libhades_free;
/** macro to create functions argmin, argmax, argabsmin, argabsmax */
#define ARGXXX(FUNCTION_NAME, FUNCTION, RELATION) \
size_t FUNCTION_NAME(double list[], size_t size) \
{ \
size_t index = 0; \
for(size_t i = 0; i < size; i++) \
if(FUNCTION(list[i]) RELATION FUNCTION(list[index])) \
index = i; \
return index; \
}
/** @brief Return index of smallest element in list
*
* @param [in] list
* @param [in] size elements in list
*
* @retval index
*/
ARGXXX(argmin, +, <)
/** @brief Return index of largest element in list
*
* @param [in] list
* @param [in] size elements in list
*
* @retval index
*/
ARGXXX(argmax, +, >)
/** @brief Return index of element with smallest absolute value in list
*
* @param [in] list
* @param [in] size elements in list
*
* @retval index
*/
ARGXXX(argabsmin, fabs, <)
/** @brief Return index of element with largest absolute value in list
*
* @param [in] list
* @param [in] size elements in list
*
* @retval index
*/
ARGXXX(argabsmax, fabs, >)
/** @}*/
/** \defgroup create Creating, printing and freeing matrices
* @{
*/
static inline void _swap_int(int *a, int *b);
static inline void _swap_size_t(size_t *a, size_t *b);
static inline void _swap_int(int *a, int *b)
{
int c = *a;
*a = *b;
*b = c;
}
static inline void _swap_size_t(size_t *a, size_t *b)
{
size_t c = *a;
*a = *b;
*b = c;
}
#define MATRIX_SWAP(FUNCTION_NAME, MATRIX_TYPE, TYPE) \
void FUNCTION_NAME(MATRIX_TYPE *A, MATRIX_TYPE *B) \
{ \
/* swap pointers */ \
TYPE *ptr = A->M; \
A->M = B->M; \
B->M = ptr; \
\
/* swap rows */ \
_swap_int(&A->rows, &B->rows); \
\
/* swap columns */ \
_swap_int(&A->columns, &B->columns); \
\
/* swap min */ \
_swap_int(&A->min, &B->min); \
\
/* swap size */ \
_swap_size_t(&A->size, &B->size); \
\
/* swap type */ \
_swap_int(&A->type, &B->type); \
\
/* swap min */ \
_swap_int(&A->min, &B->min); \
}
/** @brief Swap matrices A and B
*
* This function swaps the matrices A and B. The former content of A will be
* the content of B and vice versa. No data is copied or moved, but the
* pointers are swapped.
*
* @param [in,out] A matrix A
* @param [in,out] B matrix B
*/
MATRIX_SWAP(matrix_swap, matrix_t, double);
/** @brief Swap matrices A and B
*
* See \ref matrix_swap.
*
* @param [in,out] A matrix A
* @param [in,out] B matrix B
*/
MATRIX_SWAP(matrix_complex_swap, matrix_complex_t, complex_t);
/** macro to create a diagnal matrix out of a vector */
#define MATRIX_DIAG(FUNCTION_NAME, MATRIX_TYPE, ZEROS) \
MATRIX_TYPE *FUNCTION_NAME(MATRIX_TYPE *v) \
{ \
int dim = v->size; \
MATRIX_TYPE *A = ZEROS(dim, dim, NULL); \
if(A == NULL) \
return NULL; \
\
for(int i = 0; i < dim; i++) \
matrix_set(A, i,i, v->M[i]); \
\
return A; \
}
/** @brief Construct a real diagonal matrix from a row or columns vector
*
* @param [in] v row or column vector
*
* @retval A A = diag(v) if successfull, NULL otherwise
*/
MATRIX_DIAG(matrix_diag, matrix_t, matrix_zeros)
/** @brief Construct a complex diagonal matrix from a row or columns vector
*
* @param [in] v row or column vector
*
* @retval A A = diag(v) if successfull, NULL otherwise
*/
MATRIX_DIAG(matrix_complex_diag, matrix_complex_t, matrix_complex_zeros)
/** @brief Set functions to allocate and free memory
*
* By default wrappers to malloc and free from <stdlib.h> are used to allocate
* and free memory. If allocation of memory fails or a NULL pointer is freed,
* the program will terminate.
*
* @param [in] _malloc_cb callback to a malloc-alike function
* @param [in] _free_cb callback to a free-alike function
*/
void matrix_set_alloc(void *(*_malloc_cb)(size_t), void (*_free_cb)(void *))
{
malloc_cb = _malloc_cb;
free_cb = _free_cb;
}
/** macro for copying matrices. */
#define MATRIX_COPY(FUNCTION_NAME, MTYPE, TYPE, ALLOC) \
MTYPE *FUNCTION_NAME(MTYPE *A, MTYPE *C) \
{ \
if(C == NULL) { \
C = ALLOC(A->rows, A->columns); \
if(C == NULL) \
return NULL; \
} \
\
C->rows = A->rows; \
C->columns = A->columns; \
C->min = A->min; \
C->size = A->size; \
C->type = A->type; \
C->view = 0; \
memcpy(C->M, A->M, C->size*sizeof(TYPE)); \
return C; \
}
/** @brief Copy real matrix A
*
* Copy matrix A into C. If C is NULL, space for the matrix C will be
* allocated.
*
* @param [in] A real matrix
* @param [in,out] C real matrix
*
* @retval C copy of A
*/
MATRIX_COPY(matrix_copy, matrix_t, double, matrix_alloc)
/** @brief Copy complex matrix A
*
* Copy matrix A into C. If C is NULL, space for the matrix C will be
* allocated.
*
* @param [in] A complex matrix
* @param [in,out] C complex matrix
*
* @retval C copy of A
*/
MATRIX_COPY(matrix_complex_copy, matrix_complex_t, complex_t, matrix_complex_alloc)
/** @brief Copy a real matrix A to a complex matrix C
*
* Copy the matrix A to a complex matrix C. The matrix elements of A and C will
* be identical. If C is NULL, memory for the matrix will be allocated.
*
* @param [in] A real matrix
*
* @retval C copy of A if successfull, NULL otherwise
*/
matrix_complex_t *matrix_tocomplex(matrix_t *A, matrix_complex_t *C)
{
if(C == NULL)
{
C = matrix_complex_alloc(A->rows, A->columns);
if(C == NULL)
return NULL;
}
for(size_t i = 0; i < C->size; i++)
C->M[i] = A->M[i];
return C;
}
/** @brief Print real matrix M to stream
*
* Print the matrix A to the stream given by stream, e.g. stdout or stderr.
* The format is given by the format string format, the separator of two
* columns is sep, the separator between lines is given by sep_line. Both sep
* and sep_line may be NULL.
*
* @param [in] stream output stream
* @param [in] A real matrix
* @param [in] format output format, e.g. "%lf" or "%g"
* @param [in] sep separator between columns, e.g. "\t"
* @param [in] sep_line separator between lines, e.g. "\n"
*/
void matrix_fprintf(FILE *stream, matrix_t *A, const char *format, const char *sep, const char *sep_line)
{
const int rows = A->rows, columns = A->columns;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < columns; j++)
{
fprintf(stream, format, matrix_get(A, i,j));
if(sep != NULL)
fputs(sep, stream);
}
if(sep_line != NULL)
fputs(sep_line, stream);
}
}
/** @brief Print complex matrix A to stream
*
* See matrix_fprintf.
*
* @param [in] stream output stream
* @param [in] A complex matrix
* @param [in] format output format, e.g. "%+lf%+lfi"
* @param [in] sep separator between columns, e.g. "\t"
* @param [in] sep_line separator between lines, e.g. "\n"
*/
void matrix_complex_fprintf(FILE *stream, matrix_complex_t *A, const char *format, const char *sep, const char *sep_line)
{
const int rows = A->rows, columns = A->columns;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < columns; j++)
{
const complex_t c = matrix_get(A, i,j);
fprintf(stream, format, CREAL(c), CIMAG(c));
if(sep != NULL)
fputs(sep, stream);
}
if(sep_line != NULL)
fputs(sep_line, stream);
}
}
/** macro for matrix allocations. */
#define MATRIX_ALLOC(FUNCTION_NAME, MTYPE, TYPE) \
MTYPE *FUNCTION_NAME(int rows, int columns) \
{ \
if(rows <= 0 || columns <= 0) \
return NULL; \
\
MTYPE *A = malloc_cb(sizeof(MTYPE)); \
if(A == NULL) \
return NULL; \
\
const size_t size = (size_t)rows*(size_t)columns; \
\
A->rows = rows; \
A->columns = columns; \
A->min = MIN(rows, columns); \
A->size = size; \
A->type = 0; \
A->view = 0; \
A->M = malloc_cb(size*sizeof(TYPE)); \
if(A->M == NULL) \
{ \
free_cb(A); \
return NULL; \
} \
\
return A; \
}
/** @brief Allocate memory for real matrix of m rows and n columns
*
* This function will allocate and return a matrix of m lines and n columns.
* The function given by matrix_set_alloc will be used to allocate memory; by
* default this is malloc from <stdlib.h>.
*
* The matrix elements will be undefined. To allocate a matrix initialized with
* zeros, see matrix_zeros. To create a unity matrix, see matrix_eye.
*
* @param [in] rows rows of matrix M (rows > 0)
* @param [in] columns columns of matrix M (columns > 0)
*
* @retval A real matrix if successful, NULL otherwise
*/
MATRIX_ALLOC(matrix_alloc, matrix_t, double)
/** @brief Allocate memory for complex matrix of m rows and n columns
*
* See matrix_alloc.
*
* @param [in] rows rows of matrix M (rows > 0)
* @param [in] columns columns of matrix M (columns > 0)
*
* @retval A complex matrix if successful, otherwise NULL
*/
MATRIX_ALLOC(matrix_complex_alloc, matrix_complex_t, complex_t)
/** macro to create zero matrix */
#define MATRIX_ZEROS(FUNCTION_NAME, MTYPE, TYPE, ALLOC, SETALL) \
MTYPE *FUNCTION_NAME(int rows, int columns, MTYPE *A) \
{ \
if(A == NULL) \
{ \
A = ALLOC(rows,columns); \
if(A == NULL) \
return NULL; \
} \
\
SETALL(A, 0); \
\
return A; \
}
/** @brief Generate a real zero matrix of m lines and n columns
*
* Set every element of matrix A to 0. If A is NULL, the matrix will be
* created. In this case, you have to free the matrix yourself.
*
* @param [in] rows rows of matrix M
* @param [in] columns columns of matrix M
* @param [in,out] A: matrix
*
* @retval A real matrix 0 if successful, otherwise NULL
*/
MATRIX_ZEROS(matrix_zeros, matrix_t, double, matrix_alloc, matrix_setall)
/** @brief Generate a complex zero matrix of m lines and n columns
*
* See matrix_zeros.
*
* @param [in] rows rows of matrix M
* @param [in] columns columns of matrix M
* @param [in,out] A: matrix
*
* @retval A complex matrix 0 if successful, otherwise NULL
*/
MATRIX_ZEROS(matrix_complex_zeros, matrix_complex_t, complex_t, matrix_complex_alloc, matrix_complex_setall)
/** macro to create matrix x*Id */
#define MATRIX_SETALL(FUNCTION_NAME, MATRIX_TYPE, TYPE) \
void FUNCTION_NAME(MATRIX_TYPE *A, TYPE x) \
{ \
const size_t size = A->size; \
TYPE *M = A->M; \
for(size_t i = 0; i < size; i++) \
*M++ = x; \
}
/** @brief Set matrix elements of A to x
*
* @param [in,out] A real matrix
* @param [in] x real number
*/
MATRIX_SETALL(matrix_setall, matrix_t, double)
/** @brief Set matrix elements of A to x
*
* @param [in,out] A complex matrix
* @param [in] x complex number
*/
MATRIX_SETALL(matrix_complex_setall, matrix_complex_t, complex_t)
/** macro to create unity matrix */
#define MATRIX_EYE(FUNCTION_NAME, MTYPE, TYPE, ALLOC, SETALL) \
MTYPE *FUNCTION_NAME(int dim, MTYPE *A) \
{ \
if(A == NULL) \
{ \
A = ALLOC(dim,dim); \
if(A == NULL) \
return NULL; \
} \
\
const int min = A->min; \
TYPE *M = A->M; \
SETALL(A,0); \
for(int i = 0; i < min; i++) \
*(M+i*(min+1)) = 1; \
\
return A; \
}
/** @brief Create real identity matrix
*
* If A == NULL, a identity matrix of dimension dim times dim is created and
* returned.
* If A != NULL, the matrix A is set to the identity matrix and the parameter
* dim is ignored. More specific, for a general (e.g. not square) matrix A, the
* matrix elements are set to A_ij = Delta_ij.
*
* @param [in] dim dimension of identity matrix (ignored for A == NULL)
* @param [in,out] A real matrix
*
* @retval A identity matrix if successful, NULL otherwise
*/
MATRIX_EYE(matrix_eye, matrix_t, double, matrix_alloc, matrix_setall)
/** @brief Create complex identity matrix
*
* See matrix_eye.
*
* @param [in] dim dimension of identity matrix (ignored for A == NULL)
* @param [in,out] A complex matrix
*
* @retval A identity matrix if successful, NULL otherwise
*/
MATRIX_EYE(matrix_complex_eye, matrix_complex_t, complex_t, matrix_complex_alloc, matrix_complex_setall)
/** macro to free matrices */
#define MATRIX_FREE(FUNCTION_NAME, MTYPE) \
void FUNCTION_NAME(MTYPE *A) \
{ \
if(A != NULL) \
{ \
if(!A->view && A->M != NULL) \
{ \
free_cb(A->M); \
A->M = NULL; \
} \
free_cb(A); \
} \
}
/** @brief Free real matrix
*
* This function will free the memory allocated for matrix M. If M is NULL this
* function will do nothing.
*
* @param [in,out] A matrix to free
*/
MATRIX_FREE(matrix_free, matrix_t)
/** @brief Free complex matrix
*
* See matrix_free.
*
* @param [in,out] A matrix to free
*/
MATRIX_FREE(matrix_complex_free, matrix_complex_t)
/** @}*/
/** \defgroup trdet trace and determinant
* @{
*/
/** macro to calculate trace of matrix */
#define MATRIX_TRACE(FUNCTION_NAME, MATRIX_TYPE, TYPE) \
TYPE FUNCTION_NAME(MATRIX_TYPE *A) \
{ \
const int min = A->min; \
const TYPE *M = A->M; \
TYPE trace = 0; \
for(int i = 0; i < min; i++) \
trace += *(M+i*(1+min)); \
\
return trace; \
}
/** @brief Calculate Tr(A) of real matrix A
*
* @param [in] A complex matrix
*
* @retval x with x=Tr(A)
*/
MATRIX_TRACE(matrix_trace, matrix_t, double)
/** @brief Calculate Tr(A) of complex matrix A
*
* @param [in] A complex matrix
*
* @retval z with z=Tr(A)
*/
MATRIX_TRACE(matrix_complex_trace, matrix_complex_t, complex_t)
/** @brief Calculate Tr(A*B)
*
* This will calculate the trace of A*B: Tr(A*B). Matrix A and B must be square
* matrices of dimension dim, dim.
*
* A and B must not point to the same matrix or the behaviour will be
* undefined!
*
* @param [in] A real matrix
* @param [in] B real matrix
*
* @retval x with x=Tr(A*B)
*/
double matrix_trace_AB(matrix_t *A, matrix_t *B)
{
const int dim = A->min;
double sum = 0;
double *M1 = A->M;
double *M2 = B->M;
for(int i = 0; i < dim; i++)
sum += cblas_ddot(dim, M1+i, dim, M2+i*dim, 1);
return sum;
}
/** @brief Calculate Tr(A*B) for A,B complex
*
* See matrix_trace_AB.
*
* @param [in] A complex matrix
* @param [in] B complex matrix
*
* @retval z with z=Tr(A*B)
*/
complex_t matrix_trace_complex_AB(matrix_complex_t *A, matrix_complex_t *B)
{
const int dim = A->min;
complex_t sum = 0;
for(int i = 0; i < dim; i++)
for(int k = 0; k < dim; k++)
sum += matrix_get(A, i,k)*matrix_get(B, k,i);
return sum;
}
/** @brief Calculate Re(Tr(A*B)) for A,B complex
*
* See matrix_trace_AB.
*
* @param [in] A complex matrix
* @param [in] B complex matrix
*
* @retval x with x=Re(Tr(A*B))
*/
double matrix_trace_complex_AB_real(matrix_complex_t *A, matrix_complex_t *B)
{
const int dim = A->rows;
double sum = 0;
const complex_t *M1 = A->M;
const complex_t *M2 = B->M;
for(int i = 0; i < dim; i++)
for(int k = 0; k < dim; k++)
sum += CREAL(M1[i*dim+k]*M2[k*dim+i]);
return sum;
}
/** @}*/
/** \defgroup kron Kronecker product
* @{
*/
/** macro to create Kronecker product */
#define MATRIX_KRON(FUNCTION_NAME, MTYPE, TYPE, ALLOC, SETALL) \
MTYPE *FUNCTION_NAME(MTYPE *A, MTYPE *B, MTYPE *C) \
{ \
const int Am = A->rows, An = A->columns; \
const int Bm = B->rows, Bn = B->columns; \
if(C == NULL) \
{ \
C = ALLOC(Am*Bm, An*Bn); \
if(C == NULL) \
return NULL; \
} \
SETALL(C, 0); \
\
for(int m = 0; m < Am; m++) \
for(int n = 0; n < An; n++) \
{ \
const TYPE c = matrix_get(A,m,n); \
if(c != 0) \
{ \
for(int im = 0; im < Bm; im++) \
for(int in = 0; in < Bn; in++) \
matrix_set(C, m*Bm+im, n*Bn+in, c*matrix_get(B,im,in)); \
} \
} \
\
return C; \
}
/** @brief Calculate Kronecker product of real matrices A and B
*
* Matrix A has dimension Am,An, matrix B has dimension Bm,Bn.
*
* If C is not NULL, the Kronecker product will be stored in C. C must have
* dimension Am+Bm,An+Bn. If C is NULL, memory for the matrix will be allocated
* and the matrix will be returned. You have to free the memory for the
* returned matrix yourself.
*
* @param [in] A real matrix
* @param [in] B real matrix
* @param [in,out] C Kronecker product
*
* @retval C Kronecker product of A and B, NULL if no memory could be allocated
*/
MATRIX_KRON(matrix_kron, matrix_t, double, matrix_alloc, matrix_setall)
/** @brief Calculate Kronecker product of complex matrices A and B
*
* See matrix_kron.
*
* @param [in] A complex matrix
* @param [in] B complex matrix
* @param [in,out] C Kronecker product
*
* @retval C Kronecker product of A and B
*/
MATRIX_KRON(matrix_complex_kron, matrix_complex_t, complex_t, matrix_complex_alloc, matrix_complex_setall)
/** @}*/
/** \defgroup addsubmult Add, subtract and multiply matrices
* @{
*/
/** macro to multiply matrix with scalar factor */
#define MATRIX_MULT_SCALAR(FUNCTION_NAME, MTYPE, TYPE) \
void FUNCTION_NAME(MTYPE *A, TYPE alpha) \
{ \
const size_t size = A->size; \
TYPE *M = A->M; \
for(size_t i = 0; i < size; i++) \
M[i] *= alpha; \
}
/** @brief Multiply real matrix with a real scalar
*
* alpha*A -> A
*
* @param [in,out] A real matrix
* @param [in] alpha real scalar
* @retval A, A=alpha*A
*/
MATRIX_MULT_SCALAR(matrix_mult_scalar, matrix_t, double)
/** @brief Multiply complex matrix with a complex scalar
*
* alpha*A -> A
*
* @param [in,out] A complex matrix
* @param [in] alpha complex scalar
* @retval A, A=alpha*A
*/
MATRIX_MULT_SCALAR(matrix_complex_mult_scalar, matrix_complex_t, complex_t)
/** @brief Multiply real matrix with a complex scalar
*
* alpha*A -> C
*
* If C is NULL, memory for the matrix C will be allocated. If C is not NULL, C
* must have the correct dimension.
*
* @param [in] A real matrix
* @param [in] alpha complex scalar
* @param [in,out] C complex matrix
*
* @retval C, C = alpha*A
*/
matrix_complex_t *matrix_mult_complex_scalar(matrix_t *A, complex_t alpha, matrix_complex_t *C)
{
const int rows = A->rows;
const int columns = A->columns;
const double *AM = A->M;
complex_t *CM;
if(C == NULL)
{
C = matrix_complex_alloc(rows, columns);
if(C == NULL)
return NULL;
}
CM = C->M;
for(size_t i = 0; i < C->size; i++)
CM[i] = alpha*AM[i];
return C;
}
/* compute alpha*A*B */
#define MATRIX_MULT(FUNCTION_NAME, MATRIX_TYPE, TYPE, ALLOC, BLAS_xGEMM, PTR) \
MATRIX_TYPE *FUNCTION_NAME(MATRIX_TYPE *A, MATRIX_TYPE *B, TYPE alpha, MATRIX_TYPE *C) \
{ \
TYPE beta = 0; \
\
if(A->columns != B->rows) \
return NULL; \
\
if(C == NULL) \
{ \
C = ALLOC(A->rows, B->columns); \
if(C == NULL) \
return NULL; \
} \
\
/* xGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA, B, LDB, BETA, C, LDC) */ \
BLAS_xGEMM(CblasColMajor, /* column order */ \
CblasNoTrans, /* don't transpose/conjugate A */ \
CblasNoTrans, /* don't transpose/conjugate B */ \
A->rows, /* M: rows of A and C */ \
B->columns, /* N: columns of B and C */ \
A->columns, /* K: columns of A and rows of B */ \
PTR alpha, /* alpha: scalar */ \
A->M, /* A: matrix A */ \
A->rows, /* LDA: leading dimension of A (columns) */ \
B->M, /* B: matrix B */ \
A->columns, /* LDB: leading dimension of B (columns) */ \
PTR beta, /* beta: scalar */ \
C->M, /* C: matrix C */ \
C->rows /* LDC: leading dimension of C (columns) */ \
); \
\
return C; \
}
/** @brief Multiply real matrices
*
* alpha*A*B -> C
*
* If C is NULL, memory for the matrix C will be allocated.
*
* @param [in] A real matrix
* @param [in] B real matrix
* @param [in] alpha real scalar
* @param [in,out] C real matrix
*
* @retval C, C = alpha*A*B
*/
MATRIX_MULT(matrix_mult, matrix_t, double, matrix_alloc, cblas_dgemm, +)
/** @brief Multiply complex matrices
*
* alpha*A*B -> C
*
* If C is NULL, memory for the matrix C will be allocated.
*
* @param [in] A complex matrix
* @param [in] B complex matrix
* @param [in] alpha complex scalar
* @param [in,out] C complex matrix
*
* @retval C, C = alpha*A*B
*/
MATRIX_MULT(matrix_complex_mult, matrix_complex_t, complex_t, matrix_complex_alloc, cblas_zgemm, &)
/** macro to add two matrices */
#define MATRIX_ADD(FUNCTION_NAME, TYPE1, MTYPE1, TYPE2, MTYPE2) \
int FUNCTION_NAME(MTYPE1 *A, MTYPE2 *B, TYPE1 alpha, MTYPE1 *C) \
{ \
const size_t size = A->size; \
TYPE1 *M3; \
TYPE1 *M1 = A->M; \
TYPE2 *M2 = B->M; \
\
if(C == NULL) \
M3 = A->M; \
else \
M3 = C->M; \
\
if(A->rows != B->rows || A->columns != B->columns) \
return LIBHADES_ERROR_SHAPE; \
\
for(size_t i = 0; i < size; i++) \
M3[i] = M1[i] + (alpha*M2[i]); \
\
return 0; \
}
/** @brief Add real matrices A and B
*
* Calculate A+alpha*B -> C.
*
* The result will be stored in C. If C is NULL, the result is stored in A.
*
* @param [in,out] A real matrix
* @param [in] B real matrix
* @param [in] alpha real scalar
* @param [in,out] C real matrix or NULL
*
* @retval 0 if successfull
* @retval LIBHADES_ERROR_SHAPE if matrices have wrong shape
*/
MATRIX_ADD(matrix_add, double, matrix_t, double, matrix_t)
/** @brief Add complex matrices A and B
*
* Calculate A+alpha*B -> C.
*
* The result will be stored in C. If C is NULL, the result is stored in A.
*
* @param [in,out] A complex matrix
* @param [in] B complex matrix
* @param [in] alpha complex number
* @param [in,out] C complex matrix or NULL
*
* @retval 0 if successfull
* @retval LIBHADES_ERROR_SHAPE if matrices have wrong shape
*/
MATRIX_ADD(matrix_complex_add, complex_t, matrix_complex_t, complex_t, matrix_complex_t)
/** @brief Add complex matrix A and real matrix B
*
* Calculate A+alpha*B -> C.
*
* The result will be stored in C. If C is NULL, the result is stored in A.
*
* @param [in,out] A complex matrix
* @param [in] B real matrix
* @param [in] alpha complex scalar
* @param [in,out] C complex matrix or NULL
*
* @retval 0 if successfull
* @retval LIBHADES_ERROR_SHAPE if matrices have wrong shape
*/
MATRIX_ADD(matrix_complex_add_real, complex_t, matrix_complex_t, double, matrix_t)
/** @}*/
/** \defgroup transconj Transpose, conjugate
* @{
*/
#define MATRIX_TRANSPOSE(FUNCTION_NAME, MTYPE, TYPE) \
void FUNCTION_NAME(MTYPE *A) \
{ \
const int rows = A->rows; \
const int columns = A->columns; \
for(int im = 0; im < rows; im++) \
for(int in = im+1; in < columns; in++) \
{ \
TYPE temp = matrix_get(A, im, in); \
matrix_set(A, im, in, matrix_get(A, in, im)); \
matrix_set(A, in, im, temp); \
} \
\
A->rows = columns; \
A->columns = rows; \
}
/** @brief Transpose real matrix A
*
* The matrix will be transposed.
*
* @param [in,out] A real matrix
*
* @retval C with C=A^T
*/
MATRIX_TRANSPOSE(matrix_transpose, matrix_t, double)
/** @brief Transpose complex matrix A
*
* The matrix will be transposed.
*
* @param [in,out] A complex matrix
*
* @retval C with C=A^T
*/
MATRIX_TRANSPOSE(matrix_complex_transpose, matrix_complex_t, complex_t)
/** @}*/
/** \defgroup ev Eigenvalue problems
* @{
*/
/** @brief Compute eigenvalues and optionally eigenvectors of symmetric matrix A
*
* This function computes all eigenvalues and, optionally, eigenvectors of a
* real symmetric matrix A.
*
* See dsyev.
*
* @param [in] A real matrix
* @param [in] JOBZ 'N': only eigenvalues, 'V' eigenvalues and eigenvectors
* @param [in] UPLO 'U': upper triangle part of A is stored; 'L': lower triangle part of A is stored
* @param [in] w real matrix of dimension (dim,1) (i.e. a vector); the eigenvalues will be stored in w
*
* @retval 0 on success
*/
int eig_sym(matrix_t *A, char *JOBZ, char *UPLO, matrix_t *w)
{
int info, lwork = -1, N = A->min;
double workopt;
double *work;
dsyev_(JOBZ, UPLO, &N, A->M, &N, w->M, &workopt, &lwork, &info);
if(info != 0)
return info;
lwork = workopt;
work = malloc_cb(lwork*sizeof(double));
if(work == NULL)
return LIBHADES_ERROR_OOM;
dsyev_(JOBZ, UPLO, &N, A->M, &N, w->M, work, &lwork, &info);
free_cb(work);
return info;
}
/** @brief Compute eigenvalues and optionally eigenvectors of Hermitian matrix A
*
* This function computes all eigenvalues and, optionally, eigenvectors of a
* Hermitian symmetric matrix A.
*
* See zheev.
*
* @param [in] A complex matrix
* @param [in] JOBZ 'N': only eigenvalues, 'V' eigenvalues and eigenvectors
* @param [in] UPLO 'U': upper triangle part of A is stored; 'L': lower triangle part of A is stored
* @param [in] w real matrix of dimension (dim,1) (i.e. a vector); the eigenvalues will be stored in w
*
* @retval 0 on success
*/
int eig_herm(matrix_complex_t *A, char *JOBZ, char *UPLO, matrix_t *w)
{
int info, lwork = -1, N = A->min;
complex_t workopt;
complex_t *work = NULL;
double *rwork;
rwork = malloc_cb(MAX(1, 3*N-2)*sizeof(double));
if(rwork == NULL)
return LIBHADES_ERROR_OOM;
zheev_(JOBZ, UPLO, &N, A->M, &N, w->M, &workopt, &lwork, rwork, &info);
if(info != 0)
{
free_cb(rwork);
return info;
}
lwork = CREAL(workopt);
work = malloc_cb(lwork*sizeof(complex_t));
if(work == NULL)
{
free_cb(rwork);
return LIBHADES_ERROR_OOM;
}
zheev_(JOBZ, UPLO, &N, A->M, &N, w->M, work, &lwork, rwork, &info);
free_cb(rwork);
free_cb(work);
return info;
}
/** @brief Compute eigenvalues and optionally eigenvectors of matrix A
*
* Compute for an N-by-N complex nonsymmetric matrix A, the eigen- values and,
* optionally, right eigenvectors
*
* See zgeev.
*
* @param [in] A real matrix
* @param [in] w list containing the eigenvalues of A
* @param [in] vr if vr != NULL, right eigenvectors are computed and will be stored in vr; vr must be a complex matrix of dimension (dim,dim)
* @param [in] vl if vl != NULL, lefft eigenvectors are computed and will be stored in vl; vl must be a complex matrix of dimension (dim,dim)
*
* @retval 0 on success
*/
/* Parameters */
int eig_complex_generic(matrix_complex_t *A, matrix_complex_t *w, matrix_complex_t *vl, matrix_complex_t *vr)
{
int N = A->min;
int lwork = -1;
char jobvr = 'N';
char jobvl = 'N';
int info;
complex_t *evr = NULL;
complex_t *evl = NULL;
complex_t *work = NULL;
double *rwork = NULL;
complex_t wopt;
/* if vr is not NULL, calculate right eigenvectors */
if(vr != NULL)
{
jobvr = 'V';
evr = vr->M;
}
/* if vl is not NULL, calculate left eigenvectors */
if(vl != NULL)
{
jobvl = 'V';
evl = vl->M;
}
/* get the optimal size for workspace work */
zgeev_(&jobvl, &jobvr, &N, A->M, &N, w->M, evl, &N, evr, &N, &wopt, &lwork, rwork, &info);
if(info != 0)
return info;
lwork = CREAL(wopt);
rwork = malloc_cb(2*N*sizeof(double));
work = malloc_cb(lwork*sizeof(complex_t));
if(rwork == NULL || work == NULL)
{
if(rwork != NULL)
free(rwork);
if(work != NULL)
free(work);
return LIBHADES_ERROR_OOM;
}
/* SUBROUTINE ZGEEV( JOBVL, JOBVR, N, A, LDA, W, VL, LDVL, VR, LDVR, WORK, LWORK, RWORK, INFO ) */
zgeev_(
&jobvl, /* left eigenvectors of A are not computed */
&jobvr, /* calculate/don't calculate right eigenvectors */
&N, /* order of matrix A */
A->M, /* matrix A */
&N, /* LDA - leading dimension of A */
w->M, /* eigenvalues */
evl, /* left eigenvectors */
&N, /* leading dimension of array vl */
evr, /* eigenvectors */
&N, /* leading dimension of the array VR */
work, /* COMPLEX*16 array, dimension (LWORK) */
&lwork, /* dimension of the array WORK; LWORK >= max(1,2*N) */
rwork, /* (workspace) DOUBLE PRECISION array, dimension (2*N) */
&info /* 0 == success */
);
free_cb(work);
free_cb(rwork);
return info;
}
int eig_complex_vr(matrix_complex_t *M, complex_t lambda, matrix_complex_t *vr)
{
matrix_complex_t *A = NULL, *b = NULL;
const int rows = M->rows;
const int columns = M->columns;
/* lapack */
char trans = 'N';
int info, lwork;
complex_t work_size, *work = NULL;
int m = rows+1, n = columns, nrhs = 1;
int lda = m, ldb = MAX(m,n);
if((A = matrix_complex_alloc(rows+1, columns)) == NULL)
return LIBHADES_ERROR_OOM;
if((b = matrix_complex_zeros(rows+1,1,NULL)) == NULL)
{
matrix_complex_free(A);
return LIBHADES_ERROR_OOM;
}
matrix_set(b, rows,0, 1);
for(int j = 0; j < columns; j++)
{
for(int i = 0; i < rows; i++)
matrix_set(A, i, j, matrix_get(M, i,j));
/* - lambda Id */
matrix_set(A, j,j, matrix_get(A,j,j)-lambda);
/* set nomalization condition */
matrix_set(A, rows,j, 1);
}
/* determine work size */
lwork = -1;
zgels_(&trans, &m, &n, &nrhs, A->M, &lda, b->M, &ldb, &work_size, &lwork, &info);
lwork = (int)CREAL(work_size);
if((work = malloc_cb(lwork*sizeof(complex_t))) == NULL)
{
matrix_complex_free(A);
matrix_complex_free(vr);
return LIBHADES_ERROR_OOM;
}
/* calculate eigenvector */
zgels_(
&trans, /* find least squares solution of overdetermined system */
&m, /* number of rows of matrix A */
&n, /* number of columns of matrix A */
&nrhs, /* number of columns of matrix B */
A->M, /* matrix A (will be overwritten) */
&lda, /* leading dimension of A, LDA >= max(1,M) */
b->M, /* on entry: right hand side vectors, on exit: solution */
&ldb, /* leading dimension of B, LDB >= MAX(1,M,N) */
work, /* workspace */
&lwork, /* dimension of work */
&info /* status */
);
free_cb(work);
matrix_complex_free(A);
for(int i = 0; i < rows; i++)
matrix_set(vr, i,0, matrix_get(b, i,0));
matrix_complex_free(b);
return info;
}
/** @}*/
/** \defgroup exp Calculate matrix exponential
* @{
*/
/** macro to calculate matrix norm */
#define MATRIX_NORM(FUNCTION_NAME, MTYPE, LAPACK_FUNC) \
int FUNCTION_NAME(MTYPE *A, char norm_type, double *norm) \
{ \
double *work = NULL; \
\
if(norm_type == 'I') \
{ \
work = malloc_cb(A->rows*sizeof(double)); \
if(work == NULL) \
return LIBHADES_ERROR_OOM; \
} \
\
*norm = LAPACK_FUNC(&norm_type, &A->rows, &A->columns, A->M, &A->rows, work); \
\
if(work != NULL) \
free_cb(work); \
\
return 0; \
}
/** @brief Compute matrix norm for real matrix
*
* See dlange.
*
* Returns
* max(abs(A(i,j))), norm_type = 'M' or 'm'
* norm1(A), norm_type = '1', 'O' or 'o'
* normI(A), norm_type = 'I' or 'i'
* normF(A), norm_type = 'F', 'f', 'E' or 'e'
*
* @param [in] A real matrix
* @param [in] norm_type type of norm, e.g. 'F' for Frobenius norm
* @param [out] norm value of norm
*
* @retval ret 0 if successfull, <0 otherwise
*/
MATRIX_NORM(matrix_norm, matrix_t, dlange_);
/** @brief Compute matrix norm for complex matrix
*
* See matrix_norm.
*
* @param [in] A complex matrix
* @param [in] norm_type type of norm, e.g. 'F' for Frobenius norm
* @param [out] norm value of norm
*
* @retval ret 0 if successfull, <0 otherwise
*/
MATRIX_NORM(matrix_complex_norm, matrix_complex_t, zlange_);
matrix_complex_t *matrix_complex_exp_taylor(matrix_complex_t *A, int order)
{
const int rows = A->min;
matrix_complex_t *C = matrix_complex_alloc(rows,rows);
/* B = E+A/order */
matrix_complex_t *B = matrix_complex_copy(A,NULL);
matrix_complex_mult_scalar(B, 1./order);
for(int i = 0; i < rows; i++)
matrix_set(B, i,i, 1+matrix_get(B,i,i));
for(int k = order-1; k > 0; k--)
{
matrix_complex_mult(A, B, 1./order, C);
/* swap */
{
matrix_complex_t *temp;
temp = C;
C = B;
B = temp;
}
for(int i = 0; i < rows; i++)
matrix_set(B, i,i, 1+matrix_get(B,i,i));
}
matrix_complex_free(C);
return B;
}
/** @}*/
/** \defgroup LA LU decomposition, inverting
* @{
*/
#define LU_DECOMPOSITION(FUNCTION_NAME, MATRIX_TYPE, XGETRF) \
int FUNCTION_NAME(MATRIX_TYPE *A, int ipiv[]) \
{ \
int info; \
\
XGETRF( \
&A->rows, /* M number of rows of A */ \
&A->columns, /* N number of columns of A */ \
A->M, /* matrix A to be factored */ \
&A->columns, /* LDA: leading dimension of A */ \
ipiv, /* pivot indices of dimension (min(M,N)) */ \
&info \
); \
\
return info; \
}
/** @brief Compute LU decomposition of real matrix A
*
* See dgetrf.
*
* The factorization has the form
* A = P * L * U
* where P is a permutation matrix, L is lower triangular with unit
* diagonal elements (lower trapezoidal if rows > columns), and U is upper
* triangular (upper trapezoidal if m < n).
*
* @param [in,out] A real matrix
* @param [out] ipiv pivot indices; array of dimension MIN(rows,columns)
*
* @retval INFO
*/
LU_DECOMPOSITION(matrix_lu_decomposition, matrix_t, dgetrf_)
/** @brief Compute LU decomposition of complex matrix A
*
* See matrix_lu_decomposition.
*
* @param [in,out] A complex matrix
* @param [out] ipiv pivot indices; array of dimension MIN(rows,columns)
*
* @retval INFO
*/
LU_DECOMPOSITION(matrix_complex_lu_decomposition, matrix_complex_t, zgetrf_)
#define MATRIX_INVERT(FUNCTION_NAME, TYPE, MATRIX_TYPE, LU_DECOMPOSITION, XGETRI) \
int FUNCTION_NAME(MATRIX_TYPE *A) \
{ \
int info, lwork, dim = A->min; \
int *ipiv = NULL; \
TYPE *work = NULL; \
TYPE workopt; \
\
ipiv = malloc_cb(dim*sizeof(int)); \
if(ipiv == NULL) \
return LIBHADES_ERROR_OOM; \
\
info = LU_DECOMPOSITION(A, ipiv); \
if(info != 0) \
goto out; \
\
lwork = -1; \
XGETRI(&dim, A->M, &dim, ipiv, &workopt, &lwork, &info); \
if(info != 0) \
goto out; \
\
lwork = (int)workopt; \
work = malloc_cb(lwork*sizeof(TYPE)); \
if(work == NULL) \
{ \
info = LIBHADES_ERROR_OOM; \
goto out; \
} \
\
XGETRI( \
&dim, /* order of matrix A */ \
A->M, /* factors L and U from LU decomposition */ \
&dim, /* LDA: leading dimension of A */ \
ipiv, /* pivot indices */ \
work, /* workspace of dimension LWORK */ \
&lwork, /* length of work */ \
&info \
); \
\
out: \
free_cb(ipiv); \
if(work != NULL) \
free_cb(work); \
\
return info; \
}
/** @brief Invert real matrix A
*
* The inverse of A is computed using the LU factorzation of A
*
* @param [in] A real matrix
*
* @retval INFO
*/
MATRIX_INVERT(matrix_invert, double, matrix_t, matrix_lu_decomposition, dgetri_)
/** @brief Invert complex matrix A
*
* See matrix_invert.
*
* @param [in] A complex matrix
*
* @retval INFO
*/
MATRIX_INVERT(matrix_complex_invert, complex_t, matrix_complex_t, matrix_complex_lu_decomposition, zgetri_)
#define MATRIX_SOLVE(FUNCTION_NAME, MATRIX_TYPE, LU_DECOMPOSITION, XGETRS) \
int FUNCTION_NAME(MATRIX_TYPE *A, MATRIX_TYPE *b) \
{ \
int N = A->min; \
char trans = 'N'; \
int nrhs = b->columns; \
int info = -1; \
int *ipiv = malloc_cb(N*sizeof(int)); \
\
if(ipiv == NULL) \
return LIBHADES_ERROR_OOM; \
\
LU_DECOMPOSITION(A, ipiv); \
XGETRS(&trans, &N, &nrhs, A->M, &N, ipiv, b->M, &N, &info); \
\
free_cb(ipiv); \
\
return info; \
}
/** @brief Solve system of linear equations
*
* Solve the system of linear equations:
* A*x = b
*
* @param [in,out] A matrix
* @param [in,out] b vector/matrix
*
* @retval INFO
*/
/** @}*/
MATRIX_SOLVE(matrix_solve, matrix_t, matrix_lu_decomposition, dgetrs_);
/** @brief Solve system of linear equations
*
* Solve the system of complex linear equations:
* A*x = b
*
* @param [in,out] A complex matrix
* @param [in,out] b complex vector/matrix
*
* @retval INFO
*/
/** @}*/
MATRIX_SOLVE(matrix_complex_solve, matrix_complex_t, matrix_complex_lu_decomposition, zgetrs_);
/*
static void _cblas_zaxpy(const int N, const double alpha, const void *X, const int incX, void *Y, const int incY)
{
complex_t beta = alpha;
cblas_zaxpy(N, &beta, X, incX, Y, incY);
}
*/
/** @brief Calculate dot product of vectors x and y
*
* Calculate dot product of first column of x,y. If x and y have different
* rows, the minimum is used.
*
* @param [in] x vector
* @param [in] y vector
* @retval x*y
*/
double vector_dot(matrix_t *x, matrix_t *y)
{
int incx = 1;
int incy = 1;
int N = MIN(x->rows, y->rows);
return ddot_(&N, x->M, &incx, y->M, &incy);
}
/** @brief Calculate dot product of vectors x and y
*
* Calculate dot product of first column of x,y. If x and y have different
* rows, the minimum is used.
*
* @param [in] x vector
* @param [in] y vector
* @retval x*y
*/
complex_t vector_complex_dot(matrix_complex_t *x, matrix_complex_t *y)
{
/*
int incx = 1;
int incy = 1;
int N = MIN(x->rows, y->rows);
complex_t z = 0;
zdotc_(&z, &N, x->M, &incx, y->M, &incy);
return z;
*/
int N = MIN(x->rows, y->rows);
complex_t z = 0;
for(int i = 0; i < N; i++)
z += matrix_get(x,i,0)*matrix_get(y,i,0);
return z;
}
#define MATRIX_GET_COLUMN(FUNCTION_NAME, TYPE, MATRIX_TYPE) \
MATRIX_TYPE *FUNCTION_NAME(MATRIX_TYPE *A, int i) \
{ \
const int rows = A->rows; \
MATRIX_TYPE *v = malloc_cb(sizeof(MATRIX_TYPE)); \
if(v == NULL) \
return NULL; \
\
v->rows = rows; \
v->columns = 1; \
v->min = 1; \
v->size = rows; \
v->type = 0; \
v->view = 1; \
v->M = &A->M[i*rows]; \
\
return v; \
}
/** @brief Get i-th column of matrix A
*
* @param [in] A matrix
* @param [in] i column number
* @retval x*y
*/
MATRIX_GET_COLUMN(matrix_get_column, double, matrix_t);
/** @brief Get i-th column of matrix A
*
* @param [in] A matrix
* @param [in] i column number
* @retval x*y
*/
MATRIX_GET_COLUMN(matrix_complex_get_column, complex_t, matrix_complex_t);
/** \defgroup sparse Functions for sparse matrices
* @{
*/
#ifdef SUPPORT_SPARSE
/** @brief Calculate eigenvalues of a sparse complex matrix
*
* @param [in] N number of columns/rows of matrix
* @param [in] nev number of eigenvalues to compute
* @param [in] which LM (largest magnitude), SM (smallest magnitude), LR (largest real part), SR (smallest real part), LI (largest imaginary part), SI (smallest imaginary part)
* @param [in] Av callback function that implements the matrix-vector operation Av; the input vector is given as in, the vector Av must be written in out
* @param [in,out] d on exit d contains the Rith approximations (must be of length nev+1)
* @param [in] mxiter maximum number of Arnoldi update iterations allowed
* @param [in] tol relative accuracy of the Ritz value
* @param [in] data pointer that is given to callback function Av
*
* @retval 0 if successful
*/
int sparse_complex_eig(int N, int nev, char *which, void (*Av)(int N, complex_t *in, complex_t *out, void *data), complex_t *d, int mxiter, double tol, void *data)
{
int ret = LIBHADES_ERROR_OOM;
int info = 0;
int ido = 0;
char *bmat = "I"; /* standard eigenproblem */
int ncv = MIN(2*nev+2, N);
int ishift = 1;
int mode = 1;
int iparam[11] = { ishift, 0, mxiter, 1, 0, 0, mode, 0, 0, 0, 0 };
int ipntr[14];
int lworkl = ncv*(3*ncv + 5);
int rvec = 0;
char *howmny = "P";
complex_t *workd = NULL, *workl = NULL, *resid = NULL, *v = NULL, *workev = NULL;
int *select = NULL;
double *rwork = NULL;
/* allocate memory */
ret = LIBHADES_ERROR_OOM;
workd = malloc_cb(3*N*sizeof(complex_t));
if(workd == NULL)
goto out;
workl = malloc_cb(lworkl*sizeof(complex_t));
if(workl == NULL)
goto out;
rwork = malloc_cb(ncv*sizeof(double));
if(rwork == NULL)
goto out;
resid = malloc_cb(N*sizeof(complex_t));
if(resid == NULL)
goto out;
v = malloc_cb(N*ncv*sizeof(complex_t));
if(v == NULL)
goto out;
select = malloc_cb(ncv*sizeof(int));
if(select == NULL)
goto out;
workev = malloc_cb((2*ncv)*sizeof(complex_t));
if(workev == NULL)
goto out;
/* loop */
while(1)
{
/* http://www.caam.rice.edu/software/ARPACK/UG/node138.html */
znaupd_(
&ido, bmat, &N, which, &nev, &tol, resid, &ncv, v, &N, iparam,
ipntr, workd, workl, &lworkl, rwork, &info, strlen(bmat),
strlen(which)
);
if(ido == 1 || ido == -1)
Av(N, &workd[ipntr[0]-1], &workd[ipntr[1]-1], data);
else if(ido == 99)
break;
else
{
ret = ido;
goto out;
}
}
if(info != 0)
{
ret = info;
goto out;
}
/* http://www.mathkeisan.com/usersguide/man/zneupd.html */
zneupd_(
&rvec, howmny, select, d, NULL, &N, NULL, workev, bmat, &N, which,
&nev, &tol, resid, &ncv, v, &N, iparam, ipntr, workd, workl, &lworkl,
rwork, &info, strlen(howmny), strlen(bmat), strlen(which)
);
ret = info;
out:
if(resid != NULL)
free_cb(resid);
if(v != NULL)
free_cb(v);
if(workd != NULL)
free_cb(workd);
if(workl != NULL)
free_cb(workl);
if(rwork != NULL)
free_cb(rwork);
if(select != NULL)
free_cb(select);
if(workev != NULL)
free_cb(workev);
return ret;
}
#endif
/** @}*/
/** \defgroup io Save/load matrices
* @{
*/
#define MATRIX_LOAD_FROM_STREAM(FUNCTION_NAME, MATRIX_TYPE, TYPE, ALLOC, TRANSPOSE, IS_COMPLEX) \
MATRIX_TYPE *FUNCTION_NAME(FILE *stream, int *ret) \
{ \
MATRIX_TYPE *M; \
uint16_t len; \
int rows, columns, fortran_order, is_complex; \
char header[10] = { 0 }; \
char dict[2048] = { 0 }; \
\
if(ret != NULL) \
*ret = 0; \
\
/* read magic string, major and minor number */ \
fread(header, 8, 1, stream); \
if(memcmp(header, "\x93NUMPY\x01\x00", 8) != 0) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_HEADER; \
return NULL; \
} \
\
/* read length of dict */ \
fread(&len, sizeof(uint16_t), 1, stream); \
\
if(len >= sizeof(dict)/sizeof(dict[0])) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_INV_LENGTH; \
return NULL; \
} \
\
fread(dict, sizeof(char), len, stream); \
\
if(npy_dict_get_fortran_order(dict, &fortran_order) != 0) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_ORDER; \
return NULL; \
} \
\
if(npy_dict_get_shape(dict, &rows, &columns) != 0) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_SHAPE; \
return NULL; \
} \
\
if(npy_dict_get_descr(dict, &is_complex) != 0) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_DESCR; \
return NULL; \
} \
\
if(is_complex != IS_COMPLEX) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_FORMAT; \
return NULL; \
} \
\
M = ALLOC(rows,columns); \
fread(M->M, sizeof(TYPE), rows*columns, stream); \
\
if(!fortran_order) \
TRANSPOSE(M); \
\
M->min = MIN(rows,columns); \
M->size = (size_t)rows*(size_t)columns; \
M->view = 0; \
M->type = 0; \
\
return M; \
}
/** @brief Load real matrix from stream
*
* Load real matrix A from a stream. This function will also allocate memory
* for the matrix.
*
* If error != NULL, error will be set to:
* 0 if successful
* LIBHADES_ERROR_HEADER if magic or major/minor number is invalid
* LIBHADES_ERROR_INV_LENGTH if length of dictionary is invalid (too long)
* LIBHADES_ERROR_ORDER if order is invalid (Fortran/C order)
* LIBHADES_ERROR_SHAPE if shape is invalid (rows/columns)
* LIBHADES_ERROR_DESCR if dtype is wrong/not supported
* LIBHADES_ERROR_FORMAT if wrong format (real instead of complex)
* 0 rows/columns wrong
*
* @param [in] stream file handle of a opened file
* @param [out] error error code
* @retval A matrix
* @retval NULL if an error occured
*/
MATRIX_LOAD_FROM_STREAM(matrix_load_from_stream, matrix_t, double, matrix_alloc, matrix_transpose, 0);
/** @brief Load complex matrix from stream
*
* Load complex matrix A from a stream. This function will also allocate memory
* for the matrix.
*
* If error != NULL, error will be set to:
* 0 if successful
* LIBHADES_ERROR_HEADER if magic or major/minor number is invalid
* LIBHADES_ERROR_INV_LENGTH if length of dictionary is invalid (too long)
* LIBHADES_ERROR_ORDER if order is invalid (Fortran/C order)
* LIBHADES_ERROR_SHAPE if shape is invalid (rows/columns)
* LIBHADES_ERROR_DESCR if dtype is wrong/not supported
* LIBHADES_ERROR_FORMAT if wrong format (complex instead of real)
* 0 rows/columns wrong
*
* @param [in] stream file handle of a opened file
* @param [out] error error code
* @retval A matrix
* @retval NULL if an error occured
*/
MATRIX_LOAD_FROM_STREAM(matrix_complex_load_from_stream, matrix_complex_t, complex_t, matrix_complex_alloc, matrix_complex_transpose, 1);
#define MATRIX_LOAD(FUNCTION_NAME, MATRIX_TYPE, LOAD_FUNCTION) \
MATRIX_TYPE *FUNCTION_NAME(const char *filename, int *ret) \
{ \
FILE *stream; \
MATRIX_TYPE *M; \
\
if((stream = fopen(filename, "r")) == NULL) \
{ \
if(ret != NULL) \
*ret = LIBHADES_ERROR_IO; \
return NULL; \
} \
\
M = LOAD_FUNCTION(stream, ret); \
\
fclose(stream); \
\
return M; \
}
/** @brief Load real matrix from file
*
* Load real matrix A from file given by filename. This function will also
* allocate memory for the matrix. See \ref matrix_load_from_stream for errors.
*
* @param [in] filename path to the file
* @param [out] error error code
* @retval A matrix
* @retval NULL if an error occured
*/
MATRIX_LOAD(matrix_load, matrix_t, matrix_load_from_stream);
/** @brief Load complex matrix from file
*
* Load complex matrix A from file given by filename. This function will also
* allocate memory for the matrix. See \ref matrix_complex_load_from_stream for
* errors.
*
* @param [in] filename path to the file
* @param [out] error error code
* @retval A matrix
* @retval NULL if an error occured
*/
MATRIX_LOAD(matrix_complex_load, matrix_complex_t, matrix_complex_load_from_stream);
#define MATRIX_SAVE_TO_STREAM(FUNCTION_NAME, TYPE, MATRIX_TYPE, DTYPE) \
void FUNCTION_NAME(MATRIX_TYPE *M, FILE *stream) \
{ \
char d_str[512] = { 0 }; \
uint16_t len = 0; \
const int rows = M->rows, columns = M->columns; \
\
/* write magic string, major number and minor number */ \
fwrite("\x93NUMPY\x01\x00", sizeof(char), 8, stream); \
\
/* write length of header and header */ \
snprintf(d_str, sizeof(d_str)/sizeof(d_str[0]), "{'descr': '%s', 'fortran_order': True, 'shape': (%d, %d), }", DTYPE, rows, columns); \
\
len = strlen(d_str); \
\
fwrite(&len, sizeof(len), 1, stream); \
fwrite(d_str, sizeof(char), len, stream); \
\
/* write matrix */ \
fwrite(M->M, sizeof(TYPE), M->size, stream); \
}
/** @brief Save real matrix A to stream
*
* Save real matrix A to a file handle given by stream. The datatype
* corresponds to Numpy's npy file format.
*
* @param [in] A matrix to be dumped to file
* @param [in] stream file handle of opened file
*/
MATRIX_SAVE_TO_STREAM(matrix_save_to_stream, double, matrix_t, "<f8");
/** @brief Save complex matrix A to stream
*
* Save complex matrix A to a file handle given by stream. The datatype
* corresponds to Numpy's npy file format.
*
* @param [in] A matrix to be dumped to file
* @param [in] stream file handle of opened file
*/
MATRIX_SAVE_TO_STREAM(matrix_complex_save_to_stream, complex_t, matrix_complex_t, "<c16");
#define MATRIX_SAVE(FUNCTION_NAME, MATRIX_TYPE, SAVE_FUNCTION) \
int FUNCTION_NAME(MATRIX_TYPE *M, const char *filename) \
{ \
FILE *stream = fopen(filename, "w"); \
if(stream == NULL) \
return LIBHADES_ERROR_IO; \
\
SAVE_FUNCTION(M, stream); \
\
fclose(stream); \
\
return 0; \
};
/** @brief Save real matrix A to file
*
* Save real matrix A to file given by filename. The datatype corresponds to
* Numpy's .npy file format.
*
* @param [in] A matrix to be dumped to file
* @param [in] filename path to the file
* @retval 0 if successful
* @retval LIBHADES_ERROR_IO if file could not be opened
*/
MATRIX_SAVE(matrix_save, matrix_t, matrix_save_to_stream);
/** @brief Save complex matrix A to file
*
* Save complex matrix A to file given by filename. The datatype corresponds to
* Numpy's .npy file format.
*
* @param [in] A matrix to be dumped to file
* @param [in] filename path to the file
* @retval 0 if successful
* @retval LIBHADES_ERROR_IO if file could not be opened
*/
MATRIX_SAVE(matrix_complex_save, matrix_complex_t, matrix_complex_save_to_stream);
/** @}*/
|
#pragma once
#include <cblas.h>
#include <stdlib.h>
#include <immintrin.h>
#include <faiss/IndexIVF.h>
#include <faiss/utils/Heap.h>
#ifdef OPT_DTYPE_UTILS
namespace faiss {
//==================================Convertion================================
inline const float* convert_x_T_impl (size_t, const float* x, float*) {
return x;
}
template <typename T>
const T* convert_x_T_impl (size_t d, const float* x, T*) {
T* conv_x = new T[d];
for (size_t i = 0; i < d; i++) {
conv_x[i] = static_cast<T> (x[i]);
}
return conv_x;
}
template <typename T>
inline const T* convert_x_T (size_t d, const float* x) {
return convert_x_T_impl (d, x, (T*)nullptr);
}
inline void del_converted_x_T (size_t, const float*) {
}
template <typename T>
inline void del_converted_x_T (size_t, const T* conv_x) {
delete[] conv_x;
}
template <typename T>
struct Converter_T {
const size_t d;
const T* const x;
Converter_T (size_t d, const float* x):
d (d), x (convert_x_T<T> (d, x)) {
}
~Converter_T () {
del_converted_x_T (d, x);
}
};
//==============================Distance Function=============================
template <typename Tdis, typename T>
Tdis vec_IP_ref_T (const T* x, const T* y, size_t d, Tdis sum = 0) {
for (size_t i = 0; i < d; i++) {
sum += static_cast<Tdis> (x[i]) * static_cast<Tdis> (y[i]);
}
return sum;
}
inline float vec_IP_ref_T (const float* x, const float* y, size_t d) {
return vec_IP_ref_T<float> (x, y, d);
}
inline float vec_IP_ref_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_IP_ref_T<float> (x, y, d);
}
template <typename Tdis, typename T>
Tdis vec_L2Sqr_ref_T (const T* x, const T* y, size_t d, Tdis sum = 0) {
for (size_t i = 0; i < d; i++) {
Tdis diff = static_cast<Tdis> (x[i]) - static_cast<Tdis> (y[i]);
sum += diff * diff;
}
return sum;
}
inline float vec_L2Sqr_ref_T (const float* x, const float* y, size_t d) {
return vec_L2Sqr_ref_T<float> (x, y, d);
}
inline float vec_L2Sqr_ref_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_L2Sqr_ref_T<float> (x, y, d);
}
#ifdef __SSE4_1__
#define USE_SIMD_128
inline __m128 _mm_loadu_ps_T (const float* x) {
return _mm_loadu_ps (x);
}
inline __m128 _mm_loadu_ps_T (const bfp16_t* x) {
return _mm_castsi128_ps (_mm_unpacklo_epi16 (
_mm_setzero_si128 (),
_mm_loadl_epi64 ((const __m128i*)x)));
}
template <typename T>
float vec_IP_fp_128b_T (const T* x, const T* y, size_t d,
__m128 msum = _mm_setzero_ps ()) {
while (d >= 4) {
__m128 mx = _mm_loadu_ps_T (x);
x += 4;
__m128 my = _mm_loadu_ps_T (y);
y += 4;
msum = _mm_add_ps (msum, _mm_mul_ps (mx, my));
d -= 4;
}
msum = _mm_hadd_ps (msum, msum);
msum = _mm_hadd_ps (msum, msum);
float sum = _mm_cvtss_f32 (msum);
return d == 0 ? sum : vec_IP_ref_T<float> (x, y, d, sum);
}
inline float vec_IP_128b_T (const float* x, const float* y, size_t d) {
return vec_IP_fp_128b_T (x, y, d);
}
inline float vec_IP_128b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_IP_fp_128b_T (x, y, d);
}
template <typename T>
float vec_L2Sqr_fp_128b_T (const T* x, const T* y, size_t d,
__m128 msum = _mm_setzero_ps ()) {
while (d >= 4) {
__m128 mx = _mm_loadu_ps_T (x);
x += 4;
__m128 my = _mm_loadu_ps_T (y);
y += 4;
__m128 mdiff = _mm_sub_ps (mx, my);
msum = _mm_add_ps (msum, _mm_mul_ps (mdiff, mdiff));
d -= 4;
}
msum = _mm_hadd_ps (msum, msum);
msum = _mm_hadd_ps (msum, msum);
float sum = _mm_cvtss_f32 (msum);
return d == 0 ? sum : vec_L2Sqr_ref_T<float> (x, y, d, sum);
}
inline float vec_L2Sqr_128b_T (const float* x, const float* y, size_t d) {
return vec_L2Sqr_fp_128b_T (x, y, d);
}
inline float vec_L2Sqr_128b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_L2Sqr_fp_128b_T (x, y, d);
}
#endif
#ifdef __AVX2__
#ifndef USE_SIMD_128
#error "SIMD 256 must have SIMD 128 enabled"
#endif
#define USE_SIMD_256
inline __m256 _mm256_loadu_ps_T (const float* x) {
return _mm256_loadu_ps (x);
}
inline __m256 _mm256_loadu_ps_T (const bfp16_t* x) {
return
_mm256_castsi256_ps (
_mm256_unpacklo_epi16 (
_mm256_setzero_si256 (),
_mm256_insertf128_si256 (
_mm256_castsi128_si256 (
_mm_loadl_epi64 ((const __m128i*)x)),
_mm_loadl_epi64 ((const __m128i*)(x + 4)),
1)));
}
template <typename T>
float vec_IP_fp_256b_T (const T* x, const T* y, size_t d,
__m256 msum = _mm256_setzero_ps ()) {
while (d >= 8) {
__m256 mx = _mm256_loadu_ps_T (x);
x += 8;
__m256 my = _mm256_loadu_ps_T (y);
y += 8;
msum = _mm256_add_ps (msum, _mm256_mul_ps (mx, my));
d -= 8;
}
__m128 msum2 = _mm256_extractf128_ps (msum, 1);
msum2 = _mm_add_ps (msum2, _mm256_extractf128_ps (msum, 0));
return vec_IP_fp_128b_T (x, y, d, msum2);
}
inline float vec_IP_256b_T (const float* x, const float* y, size_t d) {
return vec_IP_fp_256b_T (x, y, d);
}
inline float vec_IP_256b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_IP_fp_256b_T (x, y, d);
}
template <typename T>
float vec_L2Sqr_fp_256b_T (const T* x, const T* y, size_t d,
__m256 msum = _mm256_setzero_ps ()) {
while (d >= 8) {
__m256 mx = _mm256_loadu_ps_T (x);
x += 8;
__m256 my = _mm256_loadu_ps_T (y);
y += 8;
__m256 mdiff = _mm256_sub_ps (mx, my);
msum = _mm256_add_ps (msum, _mm256_mul_ps (mdiff, mdiff));
d -= 8;
}
__m128 msum2 = _mm256_extractf128_ps (msum, 1);
msum2 = _mm_add_ps (msum2, _mm256_extractf128_ps (msum, 0));
return vec_L2Sqr_fp_128b_T (x, y, d, msum2);
}
inline float vec_L2Sqr_256b_T (const float* x, const float* y, size_t d) {
return vec_L2Sqr_fp_256b_T (x, y, d);
}
inline float vec_L2Sqr_256b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_L2Sqr_fp_256b_T (x, y, d);
}
#endif
#if defined (__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512BW__) \
&& defined(__AVX512VL__)
#ifndef USE_SIMD_256
#error "SIMD 512 must have SIMD 256 enabled"
#endif
#define USE_SIMD_512
inline __m512 _mm512_loadu_ps_T (const float* x) {
return _mm512_loadu_ps (x);
}
inline __m512 _mm512_loadu_ps_T (const bfp16_t* x) {
return
_mm512_castsi512_ps (
_mm512_unpacklo_epi16 (
_mm512_setzero_si512 (),
_mm512_inserti64x4 (
_mm512_castsi256_si512 (
_mm256_inserti32x4 (
_mm256_castsi128_si256 (
_mm_loadl_epi64 ((const __m128i*)x)),
_mm_loadl_epi64 ((const __m128i*)(x + 4)),
1)),
_mm256_inserti32x4 (
_mm256_castsi128_si256 (
_mm_loadl_epi64 ((const __m128i*)(x + 8))),
_mm_loadl_epi64 ((const __m128i*)(x + 12)),
1),
1)));
}
template <typename T>
float vec_IP_fp_512b_T (const T* x, const T* y, size_t d,
__m512 msum = _mm512_setzero_ps ()) {
while (d >= 16) {
__m512 mx = _mm512_loadu_ps_T (x);
x += 16;
__m512 my = _mm512_loadu_ps_T (y);
y += 16;
msum = _mm512_add_ps (msum, _mm512_mul_ps (mx, my));
d -= 16;
}
__m256 msum2 = _mm512_extractf32x8_ps (msum, 1);
msum2 = _mm256_add_ps (msum2, _mm512_extractf32x8_ps (msum, 0));
return vec_IP_fp_256b_T (x, y, d, msum2);
}
inline float vec_IP_512b_T (const float* x, const float* y, size_t d) {
return vec_IP_fp_512b_T (x, y, d);
}
inline float vec_IP_512b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_IP_fp_512b_T (x, y, d);
}
template <typename T>
float vec_L2Sqr_fp_512b_T (const T* x, const T* y, size_t d,
__m512 msum = _mm512_setzero_ps ()) {
while (d >= 16) {
__m512 mx = _mm512_loadu_ps_T (x);
x += 16;
__m512 my = _mm512_loadu_ps_T (y);
y += 16;
__m512 mdiff = _mm512_sub_ps (mx, my);
msum = _mm512_add_ps (msum, _mm512_mul_ps (mdiff, mdiff));
d -= 16;
}
__m256 msum2 = _mm512_extractf32x8_ps (msum, 1);
msum2 = _mm256_add_ps (msum2, _mm512_extractf32x8_ps (msum, 0));
return vec_L2Sqr_fp_256b_T (x, y, d, msum2);
}
inline float vec_L2Sqr_512b_T (const float* x, const float* y, size_t d) {
return vec_L2Sqr_fp_512b_T (x, y, d);
}
inline float vec_L2Sqr_512b_T (const bfp16_t* x, const bfp16_t* y,
size_t d) {
return vec_L2Sqr_fp_512b_T (x, y, d);
}
#endif
#if defined (USE_SIMD_512)
template <typename T>
inline float vec_IP_T (const T* x, const T* y, size_t d) {
return vec_IP_512b_T (x, y, d);
}
template <typename T>
inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) {
return vec_L2Sqr_512b_T (x, y, d);
}
#elif defined (USE_SIMD_256)
template <typename T>
inline float vec_IP_T (const T* x, const T* y, size_t d) {
return vec_IP_256b_T (x, y, d);
}
template <typename T>
inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) {
return vec_L2Sqr_256b_T (x, y, d);
}
#elif defined (USE_SIMD_128)
template <typename T>
inline float vec_IP_T (const T* x, const T* y, size_t d) {
return vec_IP_128b_T (x, y, d);
}
template <typename T>
inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) {
return vec_L2Sqr_128b_T (x, y, d);
}
#else
template <typename T>
inline float vec_IP_T (const T* x, const T* y, size_t d) {
return vec_IP_ref_T (x, y, d);
}
template <typename T>
inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) {
return vec_L2Sqr_ref_T (x, y, d);
}
#endif
}
#endif
#ifdef OPT_FLAT_DTYPE
#define FLAT_BATCH_THRESHOLD 4
namespace faiss {
//=================================KNN Routine================================
template <typename T, typename D>
void knn_less_better_alone_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res, D& distance) {
size_t k = res->k;
size_t check_period = InterruptCallback::get_period_hint (ny * d);
check_period *= omp_get_max_threads ();
for (size_t i0 = 0; i0 < nx; i0 += check_period) {
size_t i1 = std::min (i0 + check_period, nx);
#pragma omp parallel for
for (size_t i = i0; i < i1; i++) {
const T* x_i = x + i * d;
const T* y_j = y;
float* simi = res->get_val (i);
int64_t* idxi = res->get_ids (i);
maxheap_heapify (k, simi, idxi);
for (size_t j = 0; j < ny; j++) {
float dis = distance (i, j, x_i, y_j, d);
if (dis < simi[0]) {
maxheap_pop (k, simi, idxi);
maxheap_push (k, simi, idxi, dis, j);
}
y_j += d;
}
maxheap_reorder (k, simi, idxi);
}
InterruptCallback::check ();
}
}
template <typename T, typename D>
void knn_greater_better_alone_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_minheap_array_t* res, D& distance) {
size_t k = res->k;
size_t check_period = InterruptCallback::get_period_hint (ny * d);
check_period *= omp_get_max_threads ();
for (size_t i0 = 0; i0 < nx; i0 += check_period) {
size_t i1 = std::min (i0 + check_period, nx);
#pragma omp parallel for
for (size_t i = i0; i < i1; i++) {
const T* x_i = x + i * d;
const T* y_j = y;
float* simi = res->get_val (i);
int64_t* idxi = res->get_ids (i);
minheap_heapify (k, simi, idxi);
for (size_t j = 0; j < ny; j++) {
float dis = distance (i, j, x_i, y_j, d);
if (dis > simi[0]) {
minheap_pop (k, simi, idxi);
minheap_push (k, simi, idxi, dis, j);
}
y_j += d;
}
minheap_reorder (k, simi, idxi);
}
InterruptCallback::check ();
}
}
template <typename T>
inline void knn_inner_product_alone_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_minheap_array_t* res) {
struct IP {
inline float operator () (size_t /*ix*/, size_t /*jy*/,
const T* xi, const T* yj, size_t d) const {
return vec_IP_T (xi, yj, d);
}
}
distance;
knn_greater_better_alone_T (x, y, d, nx, ny, res, distance);
}
template <typename T>
inline void knn_L2Sqr_alone_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res) {
struct L2Sqr {
inline float operator () (size_t /*ix*/, size_t /*jy*/,
const T* xi, const T* yj, size_t d) const {
return vec_L2Sqr_T (xi, yj, d);
}
}
distance;
knn_less_better_alone_T (x, y, d, nx, ny, res, distance);
}
template <typename T>
inline void knn_L2Sqr_expand_alone_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res,
const float* y_norm) {
struct L2SqrExpand {
const float* y_norm_sqr;
inline float operator () (size_t /*ix*/, size_t jy,
const T* xi, const T* yj, size_t d) const {
return y_norm_sqr[jy] - 2 * vec_IP_T (xi, yj, d);
}
}
distance = {
.y_norm_sqr = y_norm,
};
knn_less_better_alone_T (x, y, d, nx, ny, res, distance);
}
template <typename T>
inline void knn_inner_product_batch_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_minheap_array_t* res) {
knn_inner_product_alone_T (x, y, d, nx, ny, res);
}
template <typename T>
inline void knn_L2Sqr_batch_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res) {
knn_L2Sqr_alone_T (x, y, d, nx, ny, res);
}
template <typename T>
inline void knn_L2Sqr_expand_batch_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res,
const float* y_norm) {
knn_L2Sqr_expand_alone_T (x, y, d, nx, ny, res, y_norm);
}
template <typename H, typename D>
void knn_batch_T (const float* x, const float* y,
size_t d, size_t nx, size_t ny, H* heap, D& distance) {
heap->heapify ();
if (nx == 0 || ny == 0) {
return;
}
float* distances = new float [nx * ny];
distance (x, y, d, nx, ny, distances);
heap->addn (ny, distances, 0, 0, nx);
delete[] distances;
InterruptCallback::check ();
heap->reorder ();
}
inline void knn_inner_product_batch_T (const float* x, const float* y,
size_t d, size_t nx, size_t ny, float_minheap_array_t* res) {
struct IP {
inline void operator () (const float* x, const float* y,
size_t d, size_t nx, size_t ny, float* distances) const {
cblas_sgemm (CblasRowMajor, CblasNoTrans, CblasTrans, nx, ny, d,
1.0f, x, d, y, d, 0.0f, distances, ny);
}
}
distance;
knn_batch_T (x, y, d, nx, ny, res, distance);
}
inline void knn_L2Sqr_expand_batch_T (const float* x, const float* y,
size_t d, size_t nx, size_t ny, float_maxheap_array_t* res,
const float* y_norm) {
struct L2SqrExpand {
const float* y_norm;
inline void operator () (const float* x, const float* y,
size_t d, size_t nx, size_t ny, float* distances) const {
float* distances_i = distances;
size_t step = ny * sizeof(float);
for (size_t i = 0; i < nx; i++) {
memcpy (distances_i, y_norm, step);
distances_i += ny;
}
cblas_sgemm (CblasRowMajor, CblasNoTrans, CblasTrans, nx, ny, d,
-2.0f, x, d, y, d, 1.0f, distances, ny);
}
}
distance = {
.y_norm = y_norm,
};
knn_batch_T (x, y, d, nx, ny, res, distance);
}
template <typename T>
inline void knn_inner_product_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_minheap_array_t* res) {
if (nx < FLAT_BATCH_THRESHOLD) {
knn_inner_product_alone_T (x, y, d, nx, ny, res);
}
else {
knn_inner_product_batch_T (x, y, d, nx, ny, res);
}
}
template <typename T>
inline void knn_L2Sqr_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res) {
if (nx < FLAT_BATCH_THRESHOLD) {
knn_L2Sqr_alone_T (x, y, d, nx, ny, res);
}
else {
knn_L2Sqr_batch_T (x, y, d, nx, ny, res);
}
}
template <typename T>
inline void knn_L2Sqr_expand_T (const T* x, const T* y, size_t d,
size_t nx, size_t ny, float_maxheap_array_t* res,
const float* y_norm) {
if (nx < FLAT_BATCH_THRESHOLD) {
knn_L2Sqr_expand_alone_T (x, y, d, nx, ny, res, y_norm);
}
else {
knn_L2Sqr_expand_batch_T (x, y, d, nx, ny, res, y_norm);
}
}
}
#endif
#ifdef OPT_IVFFLAT_DTYPE
#define SCANNER_USE_BATCH false
namespace faiss {
//===========================Inverted List Scanner============================
template <typename T>
class InvertedListScanner_T : public InvertedListScanner {
using idx_t = InvertedListScanner::idx_t;
protected:
size_t d;
size_t code_size;
bool store_pairs;
const T* converted_x;
idx_t list_no;
public:
InvertedListScanner_T (size_t d, bool store_pairs):
d (d), code_size (sizeof(T) * d), store_pairs (store_pairs),
converted_x(nullptr), list_no(-1) {
}
virtual ~InvertedListScanner_T () {
if (converted_x) {
del_converted_x_T (d, converted_x);
}
}
virtual void set_query (const float* query) override {
if (converted_x) {
del_converted_x_T (d, converted_x);
}
converted_x = convert_x_T<T> (d, query);
}
virtual void set_list (idx_t lidx, float) override {
list_no = lidx;
}
virtual float distance_to_code (const uint8_t*) const override {
FAISS_THROW_MSG ("not implemented");
}
virtual size_t scan_codes (size_t list_size, const uint8_t* codes,
const idx_t* ids, float* simi, idx_t* idxi, size_t k)
const = 0;
};
template <typename T, typename C, typename D>
class AloneInvertedListScanner_T : public InvertedListScanner_T<T> {
using idx_t = InvertedListScanner::idx_t;
using Scanner = InvertedListScanner_T<T>;
private:
D* distance;
public:
AloneInvertedListScanner_T (size_t d, bool store_pairs, D* distance):
Scanner (d, store_pairs), distance (distance) {
}
virtual ~AloneInvertedListScanner_T () {
delete distance;
}
virtual size_t scan_codes (size_t list_size, const uint8_t* codes,
const idx_t* ids, float* simi, idx_t* idxi,
size_t k) const override {
size_t nup = 0;
for (size_t i = 0; i < list_size; i++) {
float dis = (*distance) (Scanner::list_no, i,Scanner::converted_x,
(const T*)codes, Scanner::d);
codes += Scanner::code_size;
if (C::cmp (simi[0], dis)) {
heap_pop<C> (k, simi, idxi);
int64_t id = Scanner::store_pairs ?
lo_build (Scanner::list_no, i) :
ids[i];
heap_push<C> (k, simi, idxi, dis, id);
nup++;
}
}
return nup;
}
};
template <typename T>
InvertedListScanner* get_IP_alone_scanner_T (size_t d, bool store_pairs) {
struct IP {
inline float operator () (size_t /*ilist*/, size_t /*jy*/,
const T* x, const T* yj, size_t d) const {
return vec_IP_T (x, yj, d);
}
}
*distance = new IP;
return new AloneInvertedListScanner_T<T, CMin<float, int64_t>, IP> (d,
store_pairs, distance);
}
template <typename T>
InvertedListScanner* get_L2Sqr_alone_scanner_T (size_t d, bool store_pairs) {
struct L2Sqr {
inline float operator () (size_t /*ilist*/, size_t /*jy*/,
const T* x, const T* yj, size_t d) const {
return vec_L2Sqr_T (x, yj, d);
}
}
*distance = new L2Sqr;
return new AloneInvertedListScanner_T<T, CMax<float, int64_t>, L2Sqr> (d,
store_pairs, distance);
}
template <typename T, typename TNorm>
InvertedListScanner* get_L2Sqr_expand_alone_scanner_T (size_t d,
bool store_pairs, const TNorm y_norm) {
struct L2SqrExpand {
const TNorm y_norm;
inline float operator () (size_t ilist, size_t jy,
const T* x, const T* yj, size_t d) {
return y_norm [ilist] [jy] - 2 * vec_IP_T (x, yj, d);
}
}
*distance = new L2SqrExpand {
.y_norm = y_norm,
};
return new AloneInvertedListScanner_T<T, CMax<float, int64_t>,
L2SqrExpand> (d, store_pairs, distance);
}
template <typename T, typename C, typename D>
class BatchInvertedListScanner_T : public InvertedListScanner_T<T> {
using idx_t = InvertedListScanner::idx_t;
using Scanner = InvertedListScanner_T<T>;
private:
D* distance;
public:
BatchInvertedListScanner_T (size_t d, bool store_pairs, D* distance):
Scanner (d, store_pairs), distance (distance) {
}
virtual ~BatchInvertedListScanner_T () {
delete distance;
}
virtual size_t scan_codes (size_t list_size, const uint8_t* codes,
const idx_t* ids, float* simi, idx_t* idxi,
size_t k) const override {
float* distances = new float [list_size];
(*distance) (Scanner::converted_x, Scanner::list_no, list_size,
(const T*)codes, Scanner::d, distances);
size_t nup = 0;
for (size_t i = 0; i < list_size; i++) {
float dis = distances [i];
if (C::cmp (simi[0], dis)) {
heap_pop<C> (k, simi, idxi);
int64_t id = Scanner::store_pairs ?
lo_build (Scanner::list_no, i) : ids[i];
heap_push<C> (k, simi, idxi, dis, id);
nup++;
}
}
delete[] distances;
return nup;
}
};
template <typename T>
inline InvertedListScanner* get_IP_batch_scanner_T (size_t d, bool store_pairs,
T*) {
return get_IP_alone_scanner_T<T> (d, store_pairs);
}
inline InvertedListScanner* get_IP_batch_scanner_T (size_t d, bool store_pairs,
float*) {
struct IP {
inline void operator () (const float* x, size_t /*ilist*/,
size_t list_size, const float* y, size_t d, float* distances)
const {
cblas_sgemv (CblasRowMajor, CblasNoTrans, list_size, d, 1.0f,
y, d, x, 1, 0.0f, distances, 1);
}
}
*distance = new IP;
return new BatchInvertedListScanner_T<float, CMin<float, int64_t>, IP> (d,
store_pairs, distance);
}
template <typename T>
inline InvertedListScanner* get_L2Sqr_batch_scanner_T (size_t d,
bool store_pairs, T*) {
return get_L2Sqr_alone_scanner_T<T> (d, store_pairs);
}
template <typename T, typename TNorm>
inline InvertedListScanner* get_L2Sqr_expand_batch_scanner_T (size_t d,
bool store_pairs, const TNorm y_norm, T*) {
return get_L2Sqr_expand_alone_scanner_T<T> (d, store_pairs,
y_norm);
}
template <typename TNorm>
inline InvertedListScanner* get_L2Sqr_expand_batch_scanner_T (size_t d,
bool store_pairs, const TNorm y_norm, float*) {
struct L2SqrExpand {
const TNorm y_norm;
inline void operator () (const float* x, size_t ilist,
size_t list_size, const float* y, size_t d, float* distances)
const {
memcpy (distances, &(y_norm [ilist] [0]),
list_size * sizeof(float));
cblas_sgemv (CblasRowMajor, CblasNoTrans, list_size, d, -2.0f,
y, d, x, 1, 1.0f, distances, 1);
}
}
*distance = new L2SqrExpand {
.y_norm = y_norm,
};
return new BatchInvertedListScanner_T<float, CMax<float, int64_t>,
L2SqrExpand> (d, store_pairs, distance);
}
template <typename T>
inline InvertedListScanner* get_IP_scanner_T (size_t d, bool store_pairs) {
if (!SCANNER_USE_BATCH) {
return get_IP_alone_scanner_T<T> (d, store_pairs);
}
else {
return get_IP_batch_scanner_T (d, store_pairs, (T*)nullptr);
}
}
template <typename T>
inline InvertedListScanner* get_L2Sqr_scanner_T (size_t d, bool store_pairs) {
if (!SCANNER_USE_BATCH) {
return get_L2Sqr_alone_scanner_T<T> (d, store_pairs);
}
else {
return get_L2Sqr_batch_scanner_T (d, store_pairs, (T*)nullptr);
}
}
template <typename T, typename TNorm>
inline InvertedListScanner* get_L2Sqr_expand_scanner_T (size_t d,
bool store_pairs, const TNorm y_norm) {
if (!SCANNER_USE_BATCH) {
return get_L2Sqr_expand_alone_scanner_T<T> (d, store_pairs, y_norm);
}
else {
return get_L2Sqr_expand_batch_scanner_T (d, store_pairs, y_norm,
(T*)nullptr);
}
}
}
#endif |
%!TEX root = ../thesis.tex
%*******************************************************************************
%*********************************** Analysis Preservation *********
%*******************************************************************************
\chapter{Preservation and reusability}\label{ch:preservation}
\graphicspath{{chapter-preservation/Figs/Vector/}{chapter-preservation/Figs/}}
Particle physics experiments such as the \gls{lhc} experiments are designed to collect physics data over several decades and operate at scales and complexities that make an independent and complete replication unfeasible or even a futile endeavour~\cite{open_is_not_enough}.
Due to their uniqueness, the data taken at these experiments and the physics results derived are highly valuable and challenge the scientific method from a reproducibility and reusability point of view~\cite{open_is_not_enough}.
In the following, reusability problems directly related to the computational analysis of a given dataset\footnote{This is in contrast to also considering the actual collection of data. As such, the implementation of a computational analysis of a dataset can, in the following, be seen as an experimental setup that needs to be preserved and reusable.} are discussed, and approaches taken in view of analysis preservation and reusability are presented.
This chapter starts with a brief motivation for \textit{reinterpretations}, \ie reusing an analysis in light of additional signal models, which is followed by a description of the main ingredients required.
The remaining sections discuss three separate efforts aiming to improve the reusability of the \onelepton analysis. All three efforts are not only relevant in the scope this thesis, but also for reinterpretation activities currently ongoing within ATLAS.
\section{The case for reinterpretations}\label{sec:reinterpretations}
\subsection{Motivation}
Designing and performing searches for \gls{bsm} physics requires a substantial amount of person-power and computing resources. As laid out in detail in \cref{part:simplified_model_analysis} of this thesis, an analysis generally aims to design signal regions in which a given \gls{bsm} signal can be efficiently discriminated against \gls{sm} background. Although the careful design of such regions already requires a significant amount of resources, it constitutes only a fraction of the work necessary for concluding the search.
Contributions in the signal regions from \gls{sm} processes need to be estimated, usually requiring expensive \gls{mc} simulations and the development of background estimation strategies. Systematic uncertainties arising from numerous sources need to be considered and their impact estimated.
For the \gls{bsm} signal, a similar processing pipeline involving \gls{mc} simulation, event reconstruction and event selection including uncertainties needs to be executed.
Furthermore, recorded data also has to be reconstructed and processed through the analysis-specific event selection.
Only after the expected and observed event rates in all regions are known, the statistical evaluation can be performed, and the final analysis results, \eg, quantifying excesses in data or setting limits on model parameters, can be determined.
\Cref{fig:pipeline_analysis} illustrates the formal structure of such an analysis, consisting of three main processing pipelines; a \textit{background pipeline}, a \textit{signal pipeline} and a \textit{data pipeline}; followed by the \textit{statistical inference}.
Due to the substantial amount of resources necessary for developing and performing an analysis, it is not feasible to develop dedicated searches for every possible \gls{bsm} scenario.
Instead, analyses are typically only interpreted in a finite set of models with a small number of free parameters that need to be varied.
Still, it is likely that a given analysis is sensitive to a variety of different \gls{bsm} scenarios not considered in the original publication.
Consequently, it is not surprising that there is significant interest in the \gls{hep} community to reinterpret \gls{bsm} searches in different signal models. Reinterpretations of ATLAS searches for \gls{susy} are routinely performed by various reinterpretation efforts.
In the context of direct constraints on \gls{bsm} physics\footnote{As discussed to some extent in \cref{sec:shortcomings_sm}, indirect constraints on \gls{bsm} models can also come from \gls{sm} precision measurements.}, the search results published by the experimental collaborations represent the only windows into the \gls{lhc} data that are available to the wider \gls{hep} community.
Reinterpretations of \gls{bsm} searches are thus the only possibility to determine the direct implications of \gls{lhc} data for a broad range of models~\cite{reinterpretation_workshop}.
As will be discussed in detail in \cref{ch:pmssm}, reinterpretations are not only of interest for the wider \gls{hep} community, but also for the experimental collaborations themselves. Within the ATLAS Collaboration, reinterpretations of \gls{susy} searches in complete \gls{susy} models can, for example, serve as powerful tools to state a comprehensive summary of the overall sensitivity to more realistic supersymmetric models. As such, the efforts discussed in the remainder of this chapter, as well as in \cref{ch:simplify,ch:pmssm}, are not only relevant for the work presented in this thesis, but also reinterpretation efforts currently ongoing within the ATLAS Collaboration.
\begin{figure}
\centering\includegraphics[width=\textwidth]{pipeline}
\caption{Full analysis workflow including the three main processing pipelines for deriving background and signal estimates as well as observed data counts. The outputs of the three processing pipelines are combined into a likelihood forming the basis for the statistical inference. In a \textsc{Recast} setup (details in the text), the estimated background rates and observed data counts are archived, and the signal pipeline is fully preserved, such that it can be re-executed with different inputs at any time. Figure created by the author but based on \mbox{\reference\cite{ATL-PHYS-PUB-2019-032}}.}
\label{fig:pipeline_analysis}
\end{figure}
\subsection{Approaches for reinterpretations}
%Although of high interest, reinterpretations of searches are a daunting undertaking. Whilst analysis efforts typically maintain a comprehensive documentation of the methods and algorithms developed, the complexity of the software implementations may hide minute but crucial details, which can lead to a loss of knowledge concerning how the analysis results were derived.
As the event selection of an analysis is fixed, the \textit{pre-fit} background estimates (\ie the estimated background rates before the background-only fit described in \cref{sec:results_background_only}) and observed data counts in the regions of interest of the analysis do not change.
The data and background pipelines shown in~\cref{fig:pipeline_analysis}, entering the statistical inference of the analysis only by means of event rates, can therefore be archived in a format that is significantly smaller than the original input data.
Hence, reinterpreting a search in the light of a new signal model requires the re-execution of only two of the main analysis ingredients with (partially) new inputs; the signal pipeline and the statistical inference.
Recently, it has become possible to preserve the partial analysis likelihood\footnote{As before, this only refers to likelihoods built using the \textsc{HistFactory} template.} built from the background estimates and observed data in a pure-text format~\cite{ATL-PHYS-PUB-2019-029}, including all nuisance parameters and auxiliary data. Once the signal estimates are known, a new full analysis likelihood can be built, and the viability of the new signal model can be tested with respect to the analysis in question. In \cref{fig:pipeline_analysis}, the preserved partial likelihood is indicated through a red rectangle. The pure-text format of the likelihood readily lends itself to publication of the likelihood, an effort that is further discussed in~\cref{sec:full_likelihood}.
Different approaches can be taken for rendering the signal pipeline reusable to the extent that event rate estimates for new \gls{bsm} scenarios of interest can be derived.
Whilst analysis efforts typically maintain a comprehensive documentation of the methods and algorithms developed, the complexity of the software implementations may hide minute but crucial details, which can lead to a loss of knowledge concerning how the analysis results were derived.
Manifestly the most precise approach thus involves executing the original analysis software, but using a different \gls{bsm} model as input.
This requires the preservation of the entirety of the original software environment, including the exact workflows in a parameterised form, and therefore constitutes the most technologically demanding and involved approach. A framework designed to facilitate such an effort, called \textsc{Recast}, was originally proposed in \reference\cite{RECAST_cranmer} and is currently under development. It aims to provide the cyber-infrastructure needed for offering \textit{reinterpretations as a service}.
Physicists, wishing to reinterpret a search with \textsc{Recast}, would provide an alternative \gls{bsm} model through a web interface and trigger an ATLAS-internal computational workflow that would re-execute the original analysis using the new signal inputs, ultimately delivering the \textit{recasted} results. An attempt to fully preserve the \onelepton search using the \textsc{Recast} paradigm is discussed in \cref{sec:recast_implementation}.
As the details of the existing \textsc{Recast} implementations of ATLAS searches for \gls{susy} are not publicly available, but only meant to be interacted with through a formal \textsc{Recast} request, the exact implementation of the analysis selection is in general not available outside the ATLAS Collaboration\footnote{As a matter of fact, the exact implementation is often not fully re-executable outside the small original analysis team to begin with. Recastable analyses are therefore already important for various efforts (some of which are discussed in the following) within the ATLAS Collaboration itself.}.
For this reason, a number of public tools aiming to reimplement an approximated version of the event selections of a number of \gls{bsm} searches at the \gls{lhc} are available.
Prominent examples include \textsc{CheckMate}~\cite{Checkmate2:2016npn,Checkmate:2013wra} and \textsc{MadAnalysis5}~\cite{MadAnalysis:2012fm}.
ATLAS has internally maintained a similar catalogue of its \gls{susy} analyses and is publishing event selection snippets in \Cpp for many \gls{susy} searches on \textsc{HEPData}~\cite{HEPData:2017ypu}, a repository for high energy physics data.
Recently, this package maintained by ATLAS, called \textsc{SimpleAnalysis}~\cite{simpleanalysis}, has been made publicly available, allowing the \Cpp snippets to be executed outside the collaboration.
A crucial step, necessary for achieving a reliable reimplementation of the signal pipeline, is the detector simulation.
Executing the full detector simulation requires access to the collaboration's detector description and is computationally expensive, disfavouring\footnote{This is especially true for reinterpretation efforts outside the ATLAS Collaboration, which, for reasons not discussed herein, cannot make use of the collaboration's detector description.} its usage in the context of large-scale reinterpretations over a large set of models.
For this reason, it is often approximated using simplified detector geometries and granularities.
The most common package for a fast detector simulation outside of the ATLAS Collaboration is \textsc{Delphes}~\cite{Delphes:2009tx}, which is used in, \eg, \textsc{CheckMate} and \textsc{MadAnalysis5}.
Other packages like, \eg, \textsc{Rivet}~\cite{Rivet1:2010ar,Rivet2:2019stt} approximate the detector response using dedicated four-vector smearing techniques, assuming that the detector response roughly factorises into the responses of single particles.
Internally, the ATLAS Collaboration also maintains a dedicated framework for four-vector smearing, used in scenarios where other fast simulation techniques are still too expensive.
As they will be heavily exploited in \cref{ch:pmssm}, these dedicated smearing functions are further discussed in \cref{sec:truth_smearing}.
Finally, instead of attempting to estimate the signal rates of a new model using \gls{mc} simulation and (reimplemented) analysis event selections, some reinterpretation efforts, as for example \textsc{SModelS}~\cite{SModelS1:2013mwa,SModelS2:2017neo}, use \textit{efficiency maps} encoding the selection and acceptance efficiencies of the analysis as a function of the model parameters (typically the sparticle masses in the case of \gls{susy} searches) and analysis selections.
Such efficiency maps are routinely published on \textsc{HEPData} by ATLAS searches for \gls{susy}, and allow for efficient reinterpretations, as long as the signal efficiencies mostly depend on the signal kinematics and are largely independent from the specific details of the signal model~\cite{SModelS1:2013mwa}. For the \onelepton search presented herein, the efficiency maps, including additional analysis data products, are available at \reference\cite{HEPdata_1Lbb}.
\section{Public full likelihood}\label{sec:full_likelihood}
The likelihood is arguably one of the most information-dense and important data products of an analysis.
If the exact likelihood function of the original analysis is not known in reinterpretation efforts\footnote{Up until recently, the exact likelihood function was not part of the data products published by ATLAS searches for \gls{susy}, hence approximations of the statistical models were naturally a crucial part of most reinterpretation efforts outside the collaboration.}, approximations need to be made for the statistical inference, \eg, in terms of the correlations between event rate estimates as well as the treatment of uncertainties.
Recently, ATLAS has started to publish full analysis likelihoods built using the \textsc{HistFactory} \gls{pdf} template~\cite{ATL-PHYS-PUB-2019-029}.
This effort has been facilitated by the development of \texttt{pyhf}~\cite{pyhf_joss,pyhf} (cf. \cref{sec:likelihood_function}), in conjunction with the introduction of a \texttt{JSON} specification fully describing the \textsc{HistFactory} template.
As a pure-text format, the \texttt{JSON} likelihoods are human- and machine-readable, highly compressible and can easily be put under version control, all of which are properties that make them suitable for long-term preservation, a property that is a crucial condition for reinterpretations.
The full likelihood of the \onelepton search is publicly available at \reference\cite{fullLH_1Lbb} and is not only heavily used in the following chapters, but also in various analysis reinterpretation and combination efforts currently ongoing in the ATLAS Collaboration.
Several efforts outside of the ATLAS Collaboration have already included the analysis likelihood into their reinterpretations, and the \textsc{SModelS} and \textsc{MadAnalysis5} Collaborations have both reported significant precision improvements through its use~\cite{SModelS_pyhf:2020grj,Goodsell:2020ddr,Fuks:2021wpe}. Furthermore, the full likelihood of the search presented herein has recently been used to demonstrate the concept of scalable, distributed statistical inference on high-performance computers~\cite{Feickert:2021sua}.
Through the \texttt{funcX} package~\cite{chard20funcx}, \texttt{pyhf} is leveraged as a highly scalable \textit{function as a service} to fit the entire \onelepton signal grid of 125 signal points with a wall time of $\SI{156}{\second}$ using 85 available worker nodes\footnote{These benchmarks use \texttt{pyhf}'s \textsc{NumPy} backend and \textsc{SciPy} optimiser, a combination that has a slower log-likelihood minimisation time than \eg \textsc{PyTorch} coupled with \textsc{SciPy}, as will be shown in \cref{sec:cpu_performance}. In that sense, the performance quoted in \reference\cite{Feickert:2021sua} is slightly conservative.}.
\section{Full analysis preservation using containerised workflows}\label{sec:recast_implementation}
For an analysis to be fully reusable under the \textsc{Recast} paradigm, the signal pipeline of the original analysis (cf. \cref{fig:pipeline_analysis}) needs to be preserved such that it can be re-executed on new inputs.
As typically only the processing steps after the event reconstruction are analysis-specific, it is sufficient to preserve this part of the signal pipeline.
Processing steps including and preceding the event reconstruction only involve the central ATLAS production system, introduced in \cref{sec:mc_simulation}, and result in an ATLAS-internal data format serving as input for physics analyses. These processing steps are preserved using centrally provided ATLAS infrastructure and thus do not need to be within the scope of the preservation discussed in the following.
In the following, the term \textit{signal analysis} will, as indicated in \cref{fig:pipeline_analysis}, refer to the analysis-specific processing steps that are not handled by the central ATLAS production system, typically starting with the selection of events that have passed the reconstruction step and are provided in the aforementioned internal data format. Preserving the signal analysis not only needs preservation of the full software environment required for the different processing steps, but also knowledge of the correct usage of the software through parameterised job templates together with a workflow graph connecting the different processing steps. A graph representation of the entire analysis, implemented in \textsc{Recast}, is shown in \cref{fig:recast_workflow}.
\subsection{Software preservation}
As much of the software is only tested, validated and deployed on a narrow set of architectures and platforms, the full software environment defining an analysis pipeline not only includes the original analysis-specific code used for object definitions, calibrations, event selection and statistical inference, but also the operating system used, and a number of low-level system libraries that the applications depend upon.
Preserving the full software environment can be achieved through the use of \textit{Docker containers}~\cite{docker,Binet:2134524}, a technology that---except for the operating system kernel---packages the full software environment into a portable data format, including a layered file system, the operating system as well as the actual application and all of its dependencies.
As opposed to full virtualisation, Docker containers do not rely on actual hardware virtualisation but share the operating system kernel with the host, \ie the computing system that the containers are run on. As such, they only interact with the host through system calls to the Linux kernel~\cite{Binet:2134524}, offering a highly stable interface. This makes Docker containers a well-suited, lightweight solution for deploying isolated applications on a heterogeneous computing infrastructure.
Due to the specific software structure of the \onelepton search, a containerisation requires a total of three container images.
Two images contain the software necessary for performing the physics object calibrations and event selection, as well as the conversion of the information in a format that can be used by the downstream steps. The third image contains the software necessary for the statistical inference, relying on the \texttt{pyhf}-implementation of the \textsc{HistFactory} models in order to benefit from the possibility of using a partial \texttt{JSON} likelihood to preserve background and data rates.
%
%\begin{itemize}
% \item Event selection and physics object calibration: this step reads events in the ATLAS-internal analysis format and produces \textsc{ROOT} files with a flat structure to be used in the following.
% \item Determination of expected signal rates in analysis regions: the histogram-building features of \textsc{HistFitter} are exploited to generate the necessary signal histograms in the relevant selections including all systematic variations. The histograms are subsequently converted into a \textsc{JSON} patch file that can be combined with the partial, preserved likelihood to create a full analysis likelihood function.
% \item Statistical inference: although the original analysis used \textsc{HistFitter} for the statistical inference, the \textsc{Recast} implementation uses the \texttt{pyhf}-implementation of the \textsc{HistFactory} models in order to benefit from the possibility of using a partial \texttt{JSON} likelihood to preserve background and data rates. Studies have shown that the \textsc{HistFitter} and \texttt{pyhf} implementations of the statistical inference produce the same results (cf. \eg \reference\cite{ATL-PHYS-PUB-2019-029}).
%\end{itemize}
The Docker images are built from suitable base images containing the software environment used for deriving the published \onelepton search results, expanded with the relevant analysis software. All docker images are subject to version control and continuous integration, such that changes to the underlying software environment can be automatically tracked and tagged. This enables a consistent preservation of multiple versions of the analysis pipeline.
\subsection{Processing steps preservation}
Preserving the software environment is not sufficient, as detailed instructions on how to use it have to be given. This is achieved through parameterised job templates that specify the precise commands and arguments required to re-execute the analysis code for specific processing steps. As re-executing the analysis pipeline using different signal models involves varying input parameters, all job template parameters are exposed to the user. In \cref{fig:recast_workflow}, the parameterised job templates are shown as blue rectangles, while their input arguments and outputs are illustrated as red oval nodes.
%In the following, a brief overview of the user-specifiable arguments and inputs to the job templates is given.
The user-specifiable arguments for the event selection and physics object calibration step require the actual reconstructed events in the aforementioned ATLAS-internal format as input, as well as corresponding inputs necessary for the pile-up correction. The cross section of the signal process in question needs to be provided together with any generator-level efficiencies. Furthermore, a separate input file containing the theory uncertainties on the expected signal rates needs to be given. Finally, the statistical inference step, generating a new full analysis likelihood and performing the necessary hypothesis tests, requires the partial likelihood previously discussed as input.
%The event selection and physics object calibration step require the actual reconstructed \gls{mc} events in the aforementioned ATLAS-internal format as input (referred to as `\texttt{input\_mc16(a,d,e)}' in \cref{fig:recast_workflow}), obtained through the central ATLAS production system, as well as corresponding files necessary for the pile-up correction in \gls{mc} (`\texttt{prwfile\_mc16(a,d,e)}').
%For each new signal model to be tested, three \gls{mc} samples need to be provided, generated with specific pile-up profiles close to the pile-up profile in data during the 2015--2016, 2017 and 2018 data-taking periods, respectively\footnote{This allows to have pile-up weights relatively close to unity, avoiding unnecessary statistical dilution.}.
%In all three jobs, the events processed are weighted according to the integrated luminosity of the data-taking period they represent within the full Run~2 dataset.
%For the correct normalisation of the estimated signal rates to the integrated luminosity of the full Run~2 dataset, the signal process cross section (\texttt{xsec}), as well as \gls{mc} generator-level efficiencies (\texttt{filter\_eff}) need to be given.
%A subsequent \textit{merging} step relies on the same docker image as the previous processing step, and serves to merge the three produced outputs into a single \textsc{ROOT} file containing bin-wise expected signal rates and experimental uncertainties.
%
%In addition, a \texttt{JSON} file containing theory uncertainties on the expected signal rates can be provided (\texttt{signal\_theory\_uncertainties}). These are optional and do not have to be specified if deemed to be negligible for the signal model under consideration.
%
%The statistical inference step requires, as external input, the archived partial likelihood containing observed data as well as expected background rates including systematic variations thereof (\texttt{partial\_likelihood}). This step generates a new full analysis likelihood and performs the necessary hypothesis tests.
\subsection{Workflow preservation}
\begin{sidewaysfigure}
\centering\includegraphics[width=0.95\textwidth]{yadage_workflow_instance}
\caption{Graph of the workflow as specified for the analysis pipeline. The containerised processing steps are represented as blue rectangular nodes, while input parameters, input files and outputs are shown as red oval nodes. The workflow is comprised of four processing steps: \texttt{signal\_analysis\_stage\_mc16(a,d,e)}, \texttt{merging\_stage}, \texttt{workspace\_creation\_stage} and \texttt{statistical\_inference\_stage}. The first two steps perform the object calibration, event selection and merging of the three \gls{mc} datasets representing the three data-taking periods 2015--2016, 2017 and 2018. The latter two steps implement the patching of the partial likelihood with the expected signal rates, as well as the final statistical inference. Compared to \cref{fig:pipeline_analysis} the first two steps implement the \textit{signal analysis} part, while the latter two steps implement the \textit{statistical inference} deriving the final results. Figured created using \textsc{GraphViz}~\cite{Gansner00anopen,neato}.}
\label{fig:recast_workflow}
\end{sidewaysfigure}
Finally, the preserved processing steps need to be linked together, creating a parameterised workflow completely defining the analysis pipeline, starting from centrally produced \gls{mc} datasets up to the statistical inference results. Within \textsc{Recast}, this is achieved using the workflow description language \texttt{yadage}~\cite{yadage:2017frf}, capturing the full workflow in \texttt{YAML} format. The workflow connects the job templates and defines their processing order and dependencies. In \cref{fig:recast_workflow}, it is indicated through the black arrows connecting the nodes of the graph.
The \textsc{Recast} implementation of the analysis presented in this work has been validated against original analysis inputs. The expected and observed CL$_s$ values derived in the original analysis were successfully re-derived using the containerised workflow implementation. On a non-isolated CPU, the full preserved analysis pipeline for a single signal model can be executed with a wall time of about $\SI{50}{\minute}$. Due to the highly portable nature of the containerised workflow, the pipeline can easily be run in a distributed setup, allowing scalable reinterpretations at full analysis precision. Although not explicitly used in the remainder of this thesis, the \textsc{Recast} implementation of the \onelepton search is crucial for the large-scale reinterpretation efforts in the \gls{pmssm} currently ongoing in ATLAS (and discussed to some extent in \cref{ch:pmssm}). In these efforts, the \textsc{Recast} implementation allows the systematic reinterpretation of the \onelepton search in any \gls{pmssm} model of interest using the full analysis precision.
%\FloatBarrier
\section{Truth-level analysis}\label{sec:truth_analysis}
\graphicspath{{chapter-pmssm/Figs/Vector/}{chapter-pmssm/Figs/}}
A full preservation of the entire analysis pipeline, as discussed in the previous section, is highly desirable, since it allows for a maximum precision reinterpretation of the original analysis using a new \gls{bsm} model.
As the full detector simulation needs a significant amount of computing resources in addition to the non-negligible wall time of the actual preserved analysis pipeline, this approach can only be used on a limited set of models.
In large-scale reinterpretations over high-dimensional parameter spaces, the amount of models that need to be sampled and investigated using the analysis is too large to run the fully preserved analysis pipeline in every case.
In order to significantly reduce the number of models that need to be passed through the full analysis pipeline, a pre-sorting using a simplified analysis implementation can be exploited. Models that can be safely considered to be (non-)excluded based on this simplified analysis implementation consequently do not need to be evaluated at the full analysis precision, potentially saving a significant amount of computing resources.
In the following, two complementary approaches to analysis simplifications are discussed, targeting both the \textit{signal pipeline} as well as the \textit{statistical inference} blocks in~\cref{fig:pipeline_analysis}.
This section discusses the \textsc{SimpleAnalysis} implementation of the analysis, an approach implementing the signal pipeline at \textit{truth-level}, \ie using the generator-level objects without running a dedicated detector simulation. An approximation of the detector response using four-vector smearing techniques is discussed.
The second simplification is discussed in \cref{ch:simplify}, introducing a procedure for building simplified likelihoods from the full likelihoods of ATLAS searches for \gls{susy}, allowing a significant decrease of the wall time needed for the statistical inference.
In \cref{ch:pmssm}, both approximations are combined and applied, in the context of the \onelepton search, on a set of \gls{susy} models sampled from the \gls{pmssm}.
%As discussed in~\cref{ch:preservation}, the reinterpretation of an analysis involves re-executing the analysis pipeline in order to derived signal rate estimates in all regions. In large-scale reinterpretations, running a \textsc{Recast} implementation on all signal models considered is not computationally feasible and instead a \textit{truth-level} analysis is first performed for all signal models sampled. Only models with uncertain exclusion at truth-level are processed through the computationally expensive full analysis chain implemented in \textsc{Recast}. The truth-level analysis skips the detector simulation and uses generator-level objects instead. Any detector-level effects and inefficiencies will thus not be reflected in truth-level observables. In order to reproduce the kinematic distributions observed in the full analysis (using reconstruction-level objects), a dedicated \textit{truth smearing}---discussed in detail in~\cref{sec:truth_smearing}---is applied.
\subsection{Truth-level selection}\label{sec:truth_selection}
\begin{figure}
\centering
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{20210324_noLabel_noOR/700_150/lep1Pt_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{20210324_noLabel_noOR/700_150/jet1Pt_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\caption{Impact of the overlap removal (OR) procedure at truth-level illustrated in the lepton and leading jet transverse momenta distributions. The truth-distributions with (blue) and without (green) overlap removal are compared with a reconstruction-level (orange) distribution. The representative benchmark signal point with \mbox{$m(\charg$/$\neutr), m(\lsp) = 700, \SI{150}{\GeV}$} is shown in both plots. Both truth-level distributions are shown after smearing. All distributions are shown in a loose preselection requiring an electron or a muon, $\met>\SI{50}{\GeV}$, $\mt > \SI{50}{\GeV}$, and 2--3 jets, two of which need to be \textit{b}-tagged.}
\label{fig:overlap_removal_truth}
\end{figure}
All signal and control regions considered in the original \onelepton search are implemented at truth-level using the \textsc{SimpleAnalysis} framework.
The exact implementation has been published, together with the previously discussed efficiency maps and analysis likelihood, as part of the auxiliary analysis data at \reference\cite{HEPdata_1Lbb}.
In fact, the \textsc{SimpleAnalysis} implementation of the search was already used in~\cref{ch:uncertainties} for the derivation of some of the theory uncertainties.
The truth-level implementation explicitly specifies all object definitions introduced in~\cref{sec:object_definitions}, even though some of them, like the lepton isolation, are technically not well-defined at truth-level.
The four-vector smearing described in the following, is in many cases, however, implemented as a function of said object definitions and hence still allows to consider them to some extent.
Additionally, as discussed in~\cref{sec:reinterpretations}, the full specification of the original analysis event selection, including all object definitions, allows for more straightforward reinterpretations by efforts outside of the ATLAS Collaboration that generally do not have access to the original analysis software.
Following the object definitions, an overlap removal procedure, adhering to the same prescription for the reconstruction-level\footnote{The term \textit{reconstruction-level} here refers to distributions obtained with \gls{mc} simulated datasets for which either the full detector simulation using \textsc{Geant4}, or the \textsc{ATLFAST-II} fast simulation have been run with subsequent object reconstruction.} analysis (cf.~\cref{sec:overlap_removal}), is performed. The truth-level overlap removal especially also relies on the same shrinking cone definitions used at reconstruction-level.
Since tracking information is not available at truth-level, the overlap removal step removing electrons sharing a track with a muon is approximated by using a distance parameter of $\upDelta R = 0.01$ between the objects.
Although often neglected\footnote{The overlap removal procedures in ATLAS \gls{susy} searches tend to be quite intricate, rendering them non-trivial to re-implement without ATLAS and analysis-specific knowledge.} in reinterpretation efforts outside of the collaboration, the correct implementation of the overlap removal procedure employed in the original analysis is crucial to reproduce the signal estimates of the original analysis.
\Cref{fig:overlap_removal_truth} illustrates this by showing the lepton and leading jet $\pt$ distributions of a representative signal point in configurations with and without overlap removal at truth-level, and comparing it with the distributions obtained at reconstruction-level.
Not implementing the overlap removal procedure of the original \onelepton search, results in many truth-level events not passing the analysis selections. This is due to additional truth-level objects in the final state that would otherwise have been removed through the overlap removal.
Finally, the exact implementation of all analysis observables is explicitly given, followed by the definition of all control and signal regions.
\subsection{Truth smearing}\label{sec:truth_smearing}
The general assumption of the truth smearing discussed herein is that the detector response roughly factorises into the responses of single particles.
This allows to use the ATLAS detector performance results for constructing detector response maps parameterised in different observables for each physics object.
Detector response maps include object reconstruction and identification efficiencies as well as scale factors to correct for differences between \gls{mc} simulation and observed data.
Likewise, effects from the finite resolution of energy measurements in the detector are modelled through energy or momentum resolution maps. In the following, the four-vector components of electrons, muons, jets and $\etmiss$ are smeared.
%The implementation of the smearing functions is internal to ATLAS and originates predominantly from various upgrade studies.
In the case of truth electrons, the identification efficiencies considered are parameterised in $\vert\eta\vert$ and $\pt$~\cite{PERF-2017-01}. In $\vert\eta\vert$, nine fixed-width bins are used to parameterise the identification efficiency. In $\pt$, six bins are implemented and a linear interpolation between two adjacent $\pt$-bins is employed to get the efficiency for the $\pt$ of each truth electron.
Different efficiency maps exist for the different working points of the likelihood-based identification discriminant introduced in \cref{sec:reco_electrons}~\cite{PERF-2017-01}.
The probability of finding a fake electron in a truth jet is estimated through a similar two-dimensional map depending on the truth jet $\eta$ and $\pt$, again relying on fixed-width bins in $\vert\eta\vert$ and a linear interpolation in $\pt$. %~\cite{PERF-2017-01}
The range of the $\pt$ interpolation for identification efficiencies and fake rates extends from $\SI{7}{\GeV}$ to $\SI{120}{\GeV}$, covering the majority of all electrons in the analysis.
If the truth $\pt$ of the electron is outside of this range, the identification efficiency and fake rate from the respective bound of the corresponding $\vert\eta\vert$-bin are taken.
The probability of misidentifying an electron as a photon is estimated with different fixed values for the barrel and end-cap regions~\cite{PERF-2017-02}.
Finally, the transverse energy of the electron is smeared with a random number drawn from a Gaussian distribution with standard deviation corresponding to the $\vert\eta\vert$- and $\pt$-dependent energy resolution, measured in $Z\to ee$ and $J/\Psi\to ee$ events~\cite{PERF-2017-03}.
For truth muons, the identification efficiencies are also parameterised in $\vert\eta\vert$ and $\pt$~\cite{Aad:2020gmm}. Different efficiency maps exist again for the different identification working points (cf. \cref{sec:reco_muon})~\cite{Aad:2020gmm}. Similar to truth electrons, the $\pt$ of the muon is smeared using a Gaussian distribution with standard deviation corresponding to the momentum resolution. The momentum resolution of combined truth muons ($\sigma_\mathrm{CB}$) is computed from the resolutions in the inner detector ($\sigma_\mathrm{ID}$) and the muon spectrometer ($\sigma_\mathrm{MS}$) as
\begin{equation}
\sigma_\mathrm{CB} = \frac{\sigma_\mathrm{ID}\sigma_\mathrm{MS}}{\sqrt{\sigma_\mathrm{ID}^2 + \sigma_\mathrm{MS}^2}},
\end{equation}
where $\sigma_\mathrm{ID}$ and $\sigma_\mathrm{MS}$ are parameterised in $\vert\eta\vert$ and $\pt$ and measured in $Z\to \mu\mu$ and $J/\Psi\to \mu\mu$ events~\cite{PERF-2015-10}.
The transverse momentum of truth jets is smeared using a Gaussian with standard deviation equal to the \gls{jer}, provided in a map parameterised in five bins in $\vert\eta\vert$, ranging from $\vert\eta\vert = 0$ to $\vert\eta\vert = 4.5$. The jet energy resolutions are measured in dijet events~\cite{Aad:2020flx} and provided as parameterisations of a noise $N$, stochastic $S$ and constant $C$ term for each of the seven bins in $\vert\eta\vert$, such that the resolution can be computed as
\begin{equation}
\frac{\sigma(\pt)}{\pt} = \frac{N}{\pt}\oplus\frac{S}{\sqrt{\pt}}\oplus C.
\end{equation}
Only truth jets with $\SI{10}{\GeV} < \pt < \SI{1.5}{\TeV}$ are smeared. For truth jets with $\pt > \SI{20}{\GeV}$, the flavour tagging efficiency is considered through efficiencies parameterised in $\vert\eta\vert$ and $\pt$. Different flavour tagging efficiency maps are available for the different \textsc{MV2c10} efficiency working points (introduced in~\cref{sec:object_definitions}). All flavour tagging efficiencies are measured in fully reconstructed simulated $\ttbar$ events~\cite{FTAG-2018-01}.
Finally, the smeared missing transverse energy is computed by considering the transverse momenta of all smeared truth objects in the event. An approximation for the track soft term is estimated through resolution measurements from $Z\rightarrow \ell\ell$ events~\cite{ATLAS-CONF-2018-023}, allowing to infer a distribution of the mean soft term projected in the direction longitudinal to the total transverse momentum of all hard objects in an event, $\makemebold{p}_\mathrm{T}^\mathrm{hard}$. The measured resolution parallel and perpendicular to $\makemebold{p}_\mathrm{T}^\mathrm{hard}$ is then used to smear the nominal soft track value.
\begin{figure}
\centering
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/met_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/mt_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/mct_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/mbb_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/lep1Pt_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/jet1Pt_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/mlb1_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\linewidth}
\centering\includegraphics[width=\textwidth]{20210324/700_150/nBJet30_C1N2_Wh_hbb_700p0_150p0_smeared.pdf}
\end{subfigure}\hfill
\caption{Comparisons of the kinematic distributions of relevant observables at (smeared) truth- and reconstruction-level. A representative benchmark signal point with electroweakino mass parameters \mbox{$m(\charg$/$\neutr), m(\lsp) = 700, \SI{150}{\GeV}$} is shown. The ratio pad shows the ratio of smeared and unsmeared truth-level distributions (blue and green) to reconstruction-level distributions (orange). Only \gls{mc} statistical uncertainties are included in the error bars. All distributions are shown in a loose preselection requiring exactly one electron or muon, $\met>\SI{50}{\GeV}$, $\mt > \SI{50}{\GeV}$, and 2--3 jets, two of which need to be \textit{b}-tagged. The latter requirement is dropped for the \textit{b}-jet multiplicity distribution.}
\label{fig:smearing_preselection}
\end{figure}
\begin{figure}
\centering
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-LM_unsmeared}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-LM_smeared}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-MM_unsmeared}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-MM_smeared}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-HM_unsmeared}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.49\linewidth}
\centering\includegraphics[width=\textwidth]{yields_SR-HM_smeared}
\end{subfigure}
\caption{Comparison of the expected event rates at truth- and reconstruction-level before (left) and after (right) truth smearing. From top to bottom, the SR-LM, SR-MM and SR-HM signal regions are shown, with cumulative (integrated) $\mct$ bins. Every single point in the scatter plots represents a single signal model considered in the \onelepton search. Uncertainty bars include \gls{mc} statistical uncertainties.}
\label{fig:smearing_signal_regions}
\end{figure}
\section{Validation of the truth-level analysis}
\subsection{Validation in the loose preselection}
The performance of the truth smearing is illustrated in~\cref{fig:smearing_preselection} in a loose preselection for a representative benchmark signal point.
The loose preselection applied requires a final state with an electron or muon, $\met>\SI{50}{\GeV}$, $\mt > \SI{50}{\GeV}$, and 2--3 jets, two of which need to be \textit{b}-tagged.
The reconstruction-level distributions are compared with the truth-level distributions before and after truth smearing. It can be observed that the truth smearing noticeably improves the agreement between the truth- and reconstruction-level distributions.
While the lepton and jet reconstruction and identification efficiencies are---due to their dependence on $\eta$, $\pt$ and individual identification and isolation working points---crucial for the overall agreement in shape, especially at low $\pt$, the inclusion of flavour-tagging efficiencies significantly improves the overall agreement in normalisation.
Although some minor differences remain, a good agreement is observed across the relevant kinematic distributions at loose preselection level.
Most of the differences remaining between smeared truth-level and reconstruction-level distributions in individual bins are well within the \gls{mc} statistical uncertainties, arising from the relatively limited \gls{mc} statistics available.
\subsection{Validation in the signal regions}
As the expected signal rates in the signal regions are ultimately what is entering the statistical inference, it is important that the good agreement observed at preselection is still present in the kinematically tighter selections of the signal regions.
Additionally, it is worth investigating the agreement across all signal points considered in the original analysis, as opposed to only validating specific benchmark models.
A comparison of the reconstruction-level and truth-level event rates before and after smearing in the signal regions SR-LM, SR-MM and SR-HM for all signal models considered in the \onelepton search is shown in~\cref{fig:smearing_signal_regions}.
For the sake of conciseness, only the cumulative $\mct$ bins are shown in each signal region in~\cref{fig:smearing_signal_regions}.
The agreement in the individual $\mct$ bins in each SR-LM, SR-MM and SR-HM is provided in~\cref{fig:smearing_signal_regions_1,fig:smearing_signal_regions_2,fig:smearing_signal_regions_3}.
The truth smearing drastically improves the agreement in event rate estimates at truth- and reconstruction-level across all \gls{sr} bins.
While, compared to reconstruction-level, the event rates are generally overestimated at truth-level before smearing, both tend to agree within statistical uncertainties after smearing.
\begin{figure}
\floatbox[{\capbeside\thisfloatsetup{capbesideposition={right,center},capbesidewidth=0.35\textwidth}}]{figure}[\FBwidth]
{\caption{Expected and observed exclusion contours obtained with the full likelihood using reconstruction-level inputs (orange) as well as truth-level inputs before (purple) and after (green) smearing. Uncertainties include all statistical and systematic uncertainties on the background and signal for the reconstruction-level contours, but only statistical and systematic uncertainties on the background for truth-level signal inputs.}\label{fig:simplified_likelihood_after_smearing}}
{\includegraphics[width=0.60\textwidth]{exclusion_1Lbb_truthInput_compareReco_BkgOnly_noLabel}}
\end{figure}
\subsection{Validation using the likelihood}
Using the nominal expected event rates at (smeared) truth-level for every signal model in the original signal grid considered in the \onelepton search, expected and observed CL$_s$ values can be computed and exclusion contours can be derived.
\Cref{fig:simplified_likelihood_after_smearing} compares the expected and observed exclusion contours obtained using the full likelihood and reconstruction-level signal inputs with those obtained using the full likelihood and truth-level signal inputs before and after truth smearing.
While all systematic uncertainties on the signal are included in the reconstruction-level contours, no signal uncertainties are considered when obtaining both the smeared and unsmeared truth-level contours. The full treatment of the systematic uncertainties on the background estimates is performed in both cases.
As expected from the previous validation steps in the signal regions, the sensitivity using unsmeared truth-level signal inputs is significantly overestimated compared to the published analysis exclusion limit using reconstruction-level inputs.
The smeared truth-level inputs, however, yield exclusion contours with an acceptable match compared to the reconstruction-level results.
In summary, the above validation process, performed at multiple selection levels of the analysis, shows that the truth-level analysis with dedicated smearing functions yields a reasonable approximation of the signal pipeline.
For signal models producing final states with kinematics close to those of the scenarios validated in the previous sections, this approach allows to determine the event rate estimates with high computational efficiency.
In large-scale reinterpretations, the smeared truth-level analysis can be used as a basis for an efficient classification of models into two categories: models that are safely excluded (or not excluded) based on truth-level analysis only, and models where (non-)exclusion is in doubt and instead the precision of the full analysis pipeline using \textsc{Recast} is required.
|
\section{xlate}
\index{xlate}
\begin{shaded}
\begin{alltt}
/** xlate
<---------------------+
>>--+-XLATE-----+--+----------------------+---+-------------------++---->
+-TRANSlate-+ +-inputRange-----------+ +-+ default-table +-+
| <------------------+ |
+---(--inputRange--)-+-+
<------------------+
>---+----------------++------------------------------------------------><
+-xrange--xrange-+
default-table:
+--+-UPper--------------------------+-----------------------------------+
+-LOWer--------------------------+
+-INput--------------------------+
{ +-OUTput-------------------------+ }
{ +-+-TO---+--+----------+--number-+ }
{ +-FROM-+ +-CODEPAGE-+ }
{ }
{ Not yet in njPipes }
XLATE Contributed by Rene Jansen
\end{alltt}
\end{shaded}
|
function [d,g,rr,ss]=v_sigalign(s,r,maxd,m,fs)
%V_SIGALIGN align a clean reference with a noisy signal [d,g,rr,ss]=(s,r,maxd,m,fs)
% Inputs:
% m mode
% u = unity gain
% g = find optimal gain [default]
% a = A-weight the signals
% b = weight signals by BS-468
% s = find delay to maximize the correlation coefficient between r and s [default]
% S = find delay to maximize the energy of the component of r in s
% p = plot result
% s test signal
% r reference signal
% maxd [+-max] or [min max] delay allowed in samples or fractions of length(r)
% default is maximum that ensures at least 50% of r or s in the overlap
% fs sample frequency (only used for filtering and plotting)
%
% Outputs:
% d = optimum delay to apply to r
% g = optimal gain to apply to r
% rr = g*r(* -d) [zero padded to match s if ss output is not given]
% ss = s truncated if necessary to match the length of rr
% Copyright (C) Mike Brookes 2011
% Version: $Id: v_sigalign.m 10865 2018-09-21 17:22:45Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Bugs/Suggestions
% 1. add option to calculate a DC offset
% 2. optionally find optimal fractional time shift
% 3. split long signals into chunks to reduce memory requirements
ns=length(s);
nr=length(r);
if numel(s)~=ns || numel(r)~=nr
error('Inputs cannot be matrices');
end
s=s(:);
r=r(:);
if nargin<3
maxd=[];
end
switch numel(maxd)
case 0
if nr<ns
lmm=[-0.25*nr ns-0.75*nr];
else
lmm=[-0.25*ns nr-0.75*ns];
end
case 1
lmm=[-maxd maxd];
otherwise
lmm=maxd(1:2);
end
lmm=round(lmm.*(1+(nr-1)*(abs(lmm)<1))); % convert fractions of nr to samples
lmin=lmm(1);
lmax=lmm(2);
lags=lmax-lmin+1;
if lags<=0
error('Invalid lag limits');
end
if nargin<4 || ~numel(m)
m='gs';
end
if nargin<5 || ~numel(fs)
fs=[];
else
if any(m=='a')
[b,a]=v_stdspectrum(2,'z',fs);
s=filter(b,a,s);
r=filter(b,a,r);
elseif any(m=='b')
[b,a]=v_stdspectrum(8,'z',fs);
s=filter(b,a,s);
r=filter(b,a,r);
end
end
% now do cross correlation
rxi=max(1,1-lmin); % first reference sample needed
rxj=min(nr,ns-lmax); % last reference sample needed
nrx=rxj-rxi+1; % length of reference segment
if nrx<1
error('Reference signal too short');
end
fl=2^nextpow2(lmax-lmin+nrx);
sxi=max(1,rxi+lmin); % first signal sample needed
sxj=min(ns,rxj+lmax); % last signal sample needed
rs=v_irfft(v_rfft([s(sxi:sxj); zeros(fl-sxj+sxi-1,1)]).*conj(v_rfft([r(rxi:rxj); zeros(fl-rxj+rxi-1,1)])));
rsu=rs(1:lags);
ssq=cumsum(s(sxi:sxj).^2);
ssqd=[ssq(nrx); ssq(nrx+1:lmax-lmin+nrx)-ssq(1:lmax-lmin)];
if any (m=='S') % maximize energy of common component
[cmx,icx]=max(abs(rsu)); % maximize cross correlation
else
[cmx,icx]=max(rsu.^2./ssqd); % maximize correlation coefficient
end
d=icx-1+lmin;
ia=max(1,d+1); % first sample of s in common region
ja=min(ns,d+nr); % last sample of s in common region
ija=ia:ja;
ijad=ija-d;
rr=r(ijad);
ss=s(ija);
if any (m=='u')
g=1;
else
g=sum(rr.*ss)/sum(rr.^2); % gain to apply to r
end
rr=rr*g;
if ~nargout || any(m=='p')
xco=sum(rr.*ss)/sqrt(sum(rr.^2)*sum(ss.^2));
snr=sum(rr.^2)/sum((rr-ss).^2);
if numel(fs)==1
tun='s';
else
tun='samples';
fs=1;
end
subplot(311);
plot(ija/fs,rr);
pm='+-';
title(sprintf('Ref delay = %.2g %s, %cGain = %.2g dB, Xcorr = %.2g, SNR = %.2g dB',d/fs,tun,pm(1+(g<0)),20*log10(g),xco,10*log10(snr)));
ylabel('Reference');
set(gca,'XLim',ija([1 end])/fs);
axh(2)=gca;
subplot(312);
plot(ija/fs,ss);
ylabel('Signal');
set(gca,'XLim',ija([1 end])/fs);
axh(1)=gca;
subplot(313);
plot(ija/fs,ss-rr);
ylabel('Residual');
xlabel(sprintf('Time (%s)',tun));
set(gca,'XLim',ija([1 end])/fs);
axh(3)=gca;
linkaxes(axh(1:3),'x');
end
if nargout==3
rr=[zeros(ia-1,1); rr; zeros(ns-ja,1)]; % force to be the size of s
end
|
If $f$ is continuous on the interval $[a,b]$, then $f$ is integrable on $[a,b]$. |
# GraphHopper Directions API
#
# You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' TimeWindow Class
#'
#' @field earliest
#' @field latest
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
TimeWindow <- R6::R6Class(
'TimeWindow',
public = list(
`earliest` = NULL,
`latest` = NULL,
initialize = function(`earliest`, `latest`){
if (!missing(`earliest`)) {
stopifnot(is.numeric(`earliest`), length(`earliest`) == 1)
self$`earliest` <- `earliest`
}
if (!missing(`latest`)) {
stopifnot(is.numeric(`latest`), length(`latest`) == 1)
self$`latest` <- `latest`
}
},
toJSON = function() {
TimeWindowObject <- list()
if (!is.null(self$`earliest`)) {
TimeWindowObject[['earliest']] <- self$`earliest`
}
if (!is.null(self$`latest`)) {
TimeWindowObject[['latest']] <- self$`latest`
}
TimeWindowObject
},
fromJSON = function(TimeWindowJson) {
TimeWindowObject <- jsonlite::fromJSON(TimeWindowJson)
if (!is.null(TimeWindowObject$`earliest`)) {
self$`earliest` <- TimeWindowObject$`earliest`
}
if (!is.null(TimeWindowObject$`latest`)) {
self$`latest` <- TimeWindowObject$`latest`
}
},
toJSONString = function() {
sprintf(
'{
"earliest": %d,
"latest": %d
}',
self$`earliest`,
self$`latest`
)
},
fromJSONString = function(TimeWindowJson) {
TimeWindowObject <- jsonlite::fromJSON(TimeWindowJson)
self$`earliest` <- TimeWindowObject$`earliest`
self$`latest` <- TimeWindowObject$`latest`
}
)
)
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Natural numbers represented in binary.
------------------------------------------------------------------------
-- This module aims to create an alternative formulation of ℕ that is
-- still reasonably computationally efficient without having to call out
-- to Haskell.
{-# OPTIONS --without-K --safe #-}
module Data.Nat.Binary.Base where
open import Algebra.Core using (Op₂)
open import Data.Nat.Base as ℕ using (ℕ)
open import Data.Sum.Base using (_⊎_)
open import Function using (_on_)
open import Level using (0ℓ)
open import Relation.Binary using (Rel)
open import Relation.Binary.PropositionalEquality using (_≡_)
open import Relation.Nullary using (¬_)
------------------------------------------------------------------------
-- Definition
data ℕᵇ : Set where
zero : ℕᵇ
2[1+_] : ℕᵇ → ℕᵇ -- n → 2*(1+n) = nonzero even numbers
1+[2_] : ℕᵇ → ℕᵇ -- n → 1 + 2*n = odd numbers
------------------------------------------------------------------------
-- Ordering relations
infix 4 _<_ _>_ _≤_ _≮_ _≯_ _≰_ _≱_
data _<_ : Rel ℕᵇ 0ℓ where
0<even : ∀ {x} → zero < 2[1+ x ]
0<odd : ∀ {x} → zero < 1+[2 x ]
even<even : ∀ {x y} → x < y → 2[1+ x ] < 2[1+ y ]
even<odd : ∀ {x y} → x < y → 2[1+ x ] < 1+[2 y ]
odd<even : ∀ {x y} → x < y ⊎ x ≡ y → 1+[2 x ] < 2[1+ y ]
odd<odd : ∀ {x y} → x < y → 1+[2 x ] < 1+[2 y ]
-- In these constructors "even" stands for nonzero even.
_>_ : Rel ℕᵇ 0ℓ
x > y = y < x
_≤_ : Rel ℕᵇ 0ℓ
x ≤ y = x < y ⊎ x ≡ y
_≥_ : Rel ℕᵇ 0ℓ
x ≥ y = y ≤ x
_≮_ : Rel ℕᵇ 0ℓ
x ≮ y = ¬ (x < y)
_≯_ : Rel ℕᵇ 0ℓ
x ≯ y = ¬ (x > y)
_≰_ : Rel ℕᵇ 0ℓ
x ≰ y = ¬ (x ≤ y)
_≱_ : Rel ℕᵇ 0ℓ
x ≱ y = ¬ (x ≥ y)
------------------------------------------------------------------------
-- Basic operations
double : ℕᵇ → ℕᵇ
double zero = zero
double 2[1+ x ] = 2[1+ 1+[2 x ] ]
double 1+[2 x ] = 2[1+ (double x) ]
suc : ℕᵇ → ℕᵇ
suc zero = 1+[2 zero ]
suc 2[1+ x ] = 1+[2 (suc x) ]
suc 1+[2 x ] = 2[1+ x ]
pred : ℕᵇ → ℕᵇ
pred zero = zero
pred 2[1+ x ] = 1+[2 x ]
pred 1+[2 x ] = double x
------------------------------------------------------------------------
-- Addition, multiplication and certain related functions
infixl 6 _+_
infixl 7 _*_
_+_ : Op₂ ℕᵇ
zero + y = y
x + zero = x
2[1+ x ] + 2[1+ y ] = 2[1+ suc (x + y) ]
2[1+ x ] + 1+[2 y ] = suc 2[1+ (x + y) ]
1+[2 x ] + 2[1+ y ] = suc 2[1+ (x + y) ]
1+[2 x ] + 1+[2 y ] = suc 1+[2 (x + y) ]
_*_ : Op₂ ℕᵇ
zero * _ = zero
_ * zero = zero
2[1+ x ] * 2[1+ y ] = double 2[1+ x + (y + x * y) ]
2[1+ x ] * 1+[2 y ] = 2[1+ x + y * 2[1+ x ] ]
1+[2 x ] * 2[1+ y ] = 2[1+ y + x * 2[1+ y ] ]
1+[2 x ] * 1+[2 y ] = 1+[2 x + y * 1+[2 x ] ]
------------------------------------------------------------------------
-- Conversion between ℕᵇ and ℕ
toℕ : ℕᵇ → ℕ
toℕ zero = 0
toℕ 2[1+ x ] = 2 ℕ.* (ℕ.suc (toℕ x))
toℕ 1+[2 x ] = ℕ.suc (2 ℕ.* (toℕ x))
-- Costs O(n), could be improved using `_/_` and `_%_`
fromℕ : ℕ → ℕᵇ
fromℕ 0 = zero
fromℕ (ℕ.suc n) = suc (fromℕ n)
-- An alternative ordering lifted from ℕ
infix 4 _<ℕ_
_<ℕ_ : Rel ℕᵇ 0ℓ
_<ℕ_ = ℕ._<_ on toℕ
------------------------------------------------------------------------
-- Other functions
-- Useful in some termination proofs.
size : ℕᵇ → ℕ
size zero = 0
size 2[1+ x ] = ℕ.suc (size x)
size 1+[2 x ] = ℕ.suc (size x)
------------------------------------------------------------------------
-- Constants
0ᵇ = zero
1ᵇ = suc 0ᵇ
2ᵇ = suc 1ᵇ
3ᵇ = suc 2ᵇ
4ᵇ = suc 3ᵇ
5ᵇ = suc 4ᵇ
6ᵇ = suc 5ᵇ
7ᵇ = suc 6ᵇ
8ᵇ = suc 7ᵇ
9ᵇ = suc 8ᵇ
|
function [mus,sigmas,R] = restore_from_projection(mus_p,sigmas_p,R_p,c,E)
% restore from projection.
% input:
% mus_p - 1 by M cell array (each cell is a K_j by B matrix)
% sigmas_p - 1 by M cell array (each cell is a B by B by K_j matrix)
% R_p - M by d projected data or M by d by N projected data
% c - 1 by B center of the PCA
% E - B by d projection matrix
% output:
% mus, sigmas, R - restored versions of mus_p, sigmas_p and R_p
if ~isempty(mus_p) && ~isempty(sigmas_p)
M = length(mus_p);
elseif ~isempty(R_p)
M = size(R_p,1);
end
mus = cell(1,M);
sigmas = cell(1,M);
B = size(E,1);
R = zeros(M,B);
if ~isempty(mus_p) && ~isempty(sigmas_p)
for j = 1:M
K_j = size(mus_p{j},1);
mus{j} = zeros(K_j,B);
sigmas{j} = zeros(B,B,K_j);
for k = 1:size(mus_p{j},1)
% mus{j}(k,:) = solve_linsys_mu(E,mus_p{j}(k,:)')' + c;
% sigmas{j}(:,:,k) = solve_linsys_sigma(E, sigmas_p{j}(:,:,k));
mus{j}(k,:) = recover_mu(E,c,mus_p{j}(k,:));
sigmas{j}(:,:,k) = recover_sigma(E,sigmas_p{j}(:,:,k));
end
end
end
if ~isempty(R_p)
if ndims(R_p) == 2
R = recover_mu(E,c,R_p);
elseif ndims(R_p) == 3
R = zeros(M,B,size(R_p,3));
for i = 1:size(R_p,3)
R(:,:,i) = recover_mu(E,c,R_p(:,:,i));
end
end
end
function mu = recover_mu(E,c,mu0)
mu = E*mu0' + repmat(c',[1, size(mu0,1)]);
mu = mu';
function sigma = recover_sigma(E, sigma0)
sigma = E*sigma0*E' + 1e-9*eye(size(E,1));
function x = solve_linsys_mu(E,mu0)
EE1 = E*E';
B = size(EE1,1);
EE1 = EE1 + 1e-9*eye(B);
x = EE1 \ (E* mu0);
function sigma = solve_linsys_sigma(E,sigma0)
[B,d] = size(E);
w = 1e-9;
EE = kron(E,E);
tmp1 = EE * sigma0(:);
tmp2 = (eye(d^2) + (1/w)*EE'*EE) \ (EE' * tmp1);
sigma = (1/w) * tmp1 - (1/w^2) * EE * tmp2;
sigma = reshape(sigma,B,B);
|
[STATEMENT]
lemma (in encoding) symm_closure_of_indRelLT:
fixes TRel :: "('procT \<times> 'procT) set"
assumes refl: "refl TRel"
and symm: "sym TRel"
shows "indRelT TRel = symcl (indRelLT TRel)"
and "indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. indRelT TRel = symcl (indRelLT TRel) &&& indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. indRelT TRel = symcl (indRelLT TRel)
2. indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
show "indRelT TRel = symcl (indRelLT TRel)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. indRelT TRel = symcl (indRelLT TRel)
[PROOF STEP]
proof auto
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b. a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b \<Longrightarrow> (a, b) \<in> symcl (indRelLT TRel)
2. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
fix P Q
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b. a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b \<Longrightarrow> (a, b) \<in> symcl (indRelLT TRel)
2. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
assume "P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q"
[PROOF STATE]
proof (state)
this:
P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q
goal (2 subgoals):
1. \<And>a b. a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b \<Longrightarrow> (a, b) \<in> symcl (indRelLT TRel)
2. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
thus "(P, Q) \<in> symcl (indRelLT TRel)"
[PROOF STATE]
proof (prove)
using this:
P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q
goal (1 subgoal):
1. (P, Q) \<in> symcl (indRelLT TRel)
[PROOF STEP]
by (induct, simp_all add: symcl_def indRelLT.encL indRelLT.target)
[PROOF STATE]
proof (state)
this:
(P, Q) \<in> symcl (indRelLT TRel)
goal (1 subgoal):
1. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
fix P Q
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
assume "(P, Q) \<in> symcl (indRelLT TRel)"
[PROOF STATE]
proof (state)
this:
(P, Q) \<in> symcl (indRelLT TRel)
goal (1 subgoal):
1. \<And>a b. (a, b) \<in> symcl (indRelLT TRel) \<Longrightarrow> a \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> b
[PROOF STEP]
thus "P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q"
[PROOF STATE]
proof (prove)
using this:
(P, Q) \<in> symcl (indRelLT TRel)
goal (1 subgoal):
1. P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q
[PROOF STEP]
proof (auto simp add: symcl_def indRelLT.simps)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>S. S \<in>S P \<and> \<lbrakk>S\<rbrakk> \<in>T Q \<Longrightarrow> SourceTerm S \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm (\<lbrakk>S\<rbrakk>)
4. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
fix S
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>S. S \<in>S P \<and> \<lbrakk>S\<rbrakk> \<in>T Q \<Longrightarrow> SourceTerm S \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm (\<lbrakk>S\<rbrakk>)
4. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
show "SourceTerm S \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm (\<lbrakk>S\<rbrakk>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SourceTerm S \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm (\<lbrakk>S\<rbrakk>)
[PROOF STEP]
by (rule indRelT.encR)
[PROOF STATE]
proof (state)
this:
SourceTerm S \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm (\<lbrakk>S\<rbrakk>)
goal (3 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
fix T1 T2
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
assume "(T1, T2) \<in> TRel"
[PROOF STATE]
proof (state)
this:
(T1, T2) \<in> TRel
goal (3 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T P \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T Q \<Longrightarrow> TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
3. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
thus "TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2"
[PROOF STATE]
proof (prove)
using this:
(T1, T2) \<in> TRel
goal (1 subgoal):
1. TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
[PROOF STEP]
by (rule indRelT.target)
[PROOF STATE]
proof (state)
this:
TargetTerm T1 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T2
goal (2 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
fix S
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>S. S \<in>S Q \<and> \<lbrakk>S\<rbrakk> \<in>T P \<Longrightarrow> TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
2. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
show "TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
[PROOF STEP]
by (rule indRelT.encL)
[PROOF STATE]
proof (state)
this:
TargetTerm (\<lbrakk>S\<rbrakk>) \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> SourceTerm S
goal (1 subgoal):
1. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
fix T1 T2
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
assume "(T1, T2) \<in> TRel"
[PROOF STATE]
proof (state)
this:
(T1, T2) \<in> TRel
goal (1 subgoal):
1. \<And>T1 T2. T1 \<in>T Q \<and> (T1, T2) \<in> TRel \<and> T2 \<in>T P \<Longrightarrow> TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
with symm
[PROOF STATE]
proof (chain)
picking this:
sym TRel
(T1, T2) \<in> TRel
[PROOF STEP]
show "TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1"
[PROOF STATE]
proof (prove)
using this:
sym TRel
(T1, T2) \<in> TRel
goal (1 subgoal):
1. TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
unfolding sym_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x y. (x, y) \<in> TRel \<longrightarrow> (y, x) \<in> TRel
(T1, T2) \<in> TRel
goal (1 subgoal):
1. TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
[PROOF STEP]
by (simp add: indRelT.target)
[PROOF STATE]
proof (state)
this:
TargetTerm T2 \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> TargetTerm T1
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P \<R>\<lbrakk>\<cdot>\<rbrakk>T<TRel> Q
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
indRelT TRel = symcl (indRelLT TRel)
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
with refl
[PROOF STATE]
proof (chain)
picking this:
refl TRel
indRelT TRel = symcl (indRelLT TRel)
[PROOF STEP]
show "indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+"
[PROOF STATE]
proof (prove)
using this:
refl TRel
indRelT TRel = symcl (indRelLT TRel)
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
using refl_symm_trans_closure_is_symm_refl_trans_closure[where Rel="indRelLT TRel"]
refl_trans_closure_of_indRelT
[PROOF STATE]
proof (prove)
using this:
refl TRel
indRelT TRel = symcl (indRelLT TRel)
(symcl ((indRelLT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelLT TRel))\<^sup>*
refl ?TRel \<Longrightarrow> indRelTEQ ?TRel = (indRelT ?TRel)\<^sup>*
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
indRelTEQ TRel = (symcl ((indRelLT TRel)\<^sup>=))\<^sup>+
goal:
No subgoals!
[PROOF STEP]
qed |
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__88.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__88 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__88 and some rule r*}
lemma n_PI_Remote_GetVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Get))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__88:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Nak_HomeVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__88:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Put_HomeVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__88:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__88:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__88:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__88:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__88:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_Nak_HomeVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__88:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__88:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_Get_GetVsinv__88:
assumes a1: "(r=n_PI_Local_Get_Get )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__0Vsinv__88:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__0 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__1Vsinv__88:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__1 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Nak_HomeVsinv__88:
assumes a1: "(r=n_NI_Nak_Home )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutVsinv__88:
assumes a1: "(r=n_NI_Local_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutXAcksDoneVsinv__88:
assumes a1: "(r=n_NI_Local_PutXAcksDone )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__88 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__0Vsinv__88:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__88:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__88:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__88:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__88:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__88:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__88:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__88:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__88:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__88:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__88:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__88:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__88:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__88:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__88:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__88:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__88 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Croatian and Serbian (Matic): patronymic from the personal name Matija or Matej (see Matthew), or from the pet forms Mate (Croatian) or Mato.
Where is the Matic family from?
You can see how Matic families moved over time by selecting different census years. The Matic family name was found in the USA in 1920. In 1920 there were 3 Matic families living in Illinois. This was about 21% of all the recorded Matic's in the USA. Illinois and 2 other states had the highest population of Matic families in 1920.
Use census records and voter lists to see where families with the Matic surname lived. Within census records, you can often find information like name of household members, ages, birthplaces, residences, and occupations.
What did your Matic ancestors do for a living?
Census records can tell you a lot of little known facts about your Matic ancestors, such as occupation. Occupation can tell you about your ancestor's social and economic status.
What Matic family records will you find?
There are 3,000 census records available for the last name Matic. Like a window into their day-to-day life, Matic census records can tell you where and how your ancestors worked, their level of education, veteran status, and more.
There are 642 immigration records available for the last name Matic. Passenger lists are your ticket to knowing when your ancestors arrived in the UK, and how they made the journey - from the ship name to ports of arrival and departure.
There are 1,000 military records available for the last name Matic. For the veterans among your Matic ancestors, military collections provide insights into where and when they served, and even physical descriptions.
You've only scratched the surface of Matic family history.
What is the average Matic lifespan?
Between 1964 and 2004, in the United States, Matic life expectancy was at its lowest point in 1964, and highest in 1985. The average life expectancy for Matic in 1964 was 41, and 70 in 2004.
An unusually short lifespan might indicate that your Matic ancestors lived in harsh conditions. A short lifespan might also indicate health problems that were once prevalent in your family. The SSDI is a searchable database of more than 70 million names. You can find birthdates, death dates, addresses and more. |
A topological space $S$ is locally connected if and only if for every open set $v$ containing a point $x$, there exists an open set $u$ containing $x$ such that $u$ is connected and $u \subseteq v$. |
During a trip to San Francisco we were amazed by how convenient and fast things happen. Whether it's taking an Uber from the airport or checking in in your hotel, everything runs smoothly. Often there's a lot of technology involved to increase convenience. But what's more important is that the human connection isn't lost in the process. This is important to keep in mind, because customers will always want to be treated as human beings first. So even though they can easily book a cab on their phone, they still want to have a pleasant conversation with the cab driver too.
Convenience and the human connection are interlinked. When you focus on extreme convenience for your customers, it also becomes extremely convenient for your employees to offer the best possible customer experience. Convenience opens up space mentally and in the timetable for your employees to forge a human connection which improves CX. But when everything happens as frictionless as possible, your employees will be happier in their jobs and they'll be happy to talk to their customers.
And as we all know, happy employees = happy customers!
If companies want to change for future success, they have to cater to the needs of the modern customer. Who is this modern customer, and how can companies respond to their wishes and needs?
The modern customer is not to be put in a box or can't be segmented. This is Generation C. They are breaking through barriers of demographics. Gen C isn't an age group, it's a mindset defined by creation, connection and community.
In light of this, companies need to 'unlearn' everything they have learned in the past. Since customer profiles are changing, old adages will not cut it anymore in the future.
Customer Experience is the true engine of innovation. The customer should be the starting point for any changes companies wish to make. Start from understanding and really knowing, then reverse engineer.
If they really want to understand their customers, it's important for companies to get out of the bubble. Many organisations design experience and map journeys within their bubble without speaking to a single customer. That simply doesn't make any sense, since your customers are your prime source of information when it comes to customer experience.
3. AI + Humans = recipe for success!
Artificial Intelligence still tends to scare a lot of people. But underneath the layer of 'magic' lies a system that can improve our lives drastically. More than anything AI can automate a lot of work, predict with insane precision and most importantly it can help humans in doing their work better and more efficiently.
DigitalGenius has amazing expertise on how AI is already helping organizations win in creating amazing customer service. Their AI learns from former conversations in a customer service department. This enables the platform to understand incoming chats and mails, to automatically handle it with bots (limited for the time being) and to suggest answers to the real reps.
There's an evolution going on in AI as well. It used to go from human to digital, but it's getting more human again as AI helps companies cater to the needs of their customers. It gives organizations an in-depth insight into customer conversations to improve customer experience.
Let's make one thing clear. Retail isn't dead and people will not disappear from it either. It will become more digital, but feel more human. Retailers however will have to adapt to the changing landscape if they want to stay relevant.
We see a lot of juxtapositions these days. Ecommerce versus retail, omnichannel versus multichannel. The solution is not to think in terms of digital and physical as separate markets, but to blend the two together.
Whether they buy something online or in a store, customer understanding remains key. Design your service and experience around them. Make sure you understand everything about them, what drives customers in real life? E.g. discounters need to understand that a large part of their audience is people living from paycheck to paycheck, which requires a different approach.
Keep in mind that in retail customer journeys are changing as well. In-store experience probably constitutes only 30% of the entire journey anymore. You have to improve the befores and afters if you want to deliver the most complete customer experience.
If you want to reach customers through advertising, there are some rules of thumb to keep in mind before you spend any budget on it. Pereira & O'Dell, a famous advertising and content agency advises to focus on compelling storytelling. Why?
People hate advertising, but they are willing to engage in amazing content.
Therefore advertising must bring value for customers as they are willing to spend their time on it.
Authenticity is probably the most important aspect of your advertising. Show the good, the bad and the ugly. When companies aren't afraid to show that they're only human as well, customers will be able to relate.
Include clever and compelling storytelling that moves away from old advertising.
If you are interested in how Hello Customer can help you transition to a truly customer-centric company, don't hesitate to reach out! |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj8eqsynthconj2 : forall (lv0 : natural) (lv1 : natural), (@eq natural (mult lv0 lv1) (plus Zero (mult lv1 lv0))).
Admitted.
QuickChick conj8eqsynthconj2.
|
R = input ('Clear data? [y/n] \n','s');
if strcmp(R,'y')
clear
end
|
module Metis
using SparseArrays
using LinearAlgebra
import LightGraphs
import TriangleMesh
using METIS_jll: libmetis
# Metis C API
include("metis_h.jl")
const options = fill(Cint(-1), METIS_NOPTIONS)
options[METIS_OPTION_NUMBERING] = 1
# Julia interface
"""
Metis.Graph
1-based CSR representation of a graph as defined in
section 5.5 "Graph data structure" in the Metis manual.
"""
struct Graph
nvtxs::idx_t
xadj::Vector{idx_t}
adjncy::Vector{idx_t}
vwgt::Vector{idx_t}
Graph(nvtxs, xadj, adjncy) = new(nvtxs, xadj, adjncy)
Graph(nvtxs, xadj, adjncy, vwgt) = new(nvtxs, xadj, adjncy, vwgt)
end
"""
Metis.Mesh
Representation of a mesh as defined in section 5.6
"Mesh data structure" in the Metis manual.
"""
struct Mesh
ne::idx_t
nn::idx_t
eptr::Vector{idx_t}
eind::Vector{idx_t}
Mesh(ne, nn, eptr, eind) = new(ne, nn, eptr, eind)
end
"""
Mesh(mesh::TriangleMesh.TriMesh)
Constructor of Metis.Mesh using a TriangleMesh.TriMesh.
"""
function Mesh(mesh::TriangleMesh.TriMesh)
ne = idx_t(mesh.n_cell)
nn = idx_t(mesh.n_point)
eptr = Vector{idx_t}(undef, ne+1)
eind = Vector{idx_t}(undef, length(mesh.cell))
nn_per_elem, _ = size(mesh.cell)
eptr[:] = [idx_t(nn_per_elem*i) for i in 0:ne]
eind[:] = reshape(mesh.cell, length(mesh.cell))
return Mesh(ne, nn, eptr, eind)
end
"""
Metis.graph(G::SparseMatrixCSC; check_hermitian=true)
Construct the 1-based CSR representation of the sparse matrix `G`.
If `check_hermitian` is `false` the matrix is not checked for being hermitian
before constructing the graph.
"""
function graph(G::SparseMatrixCSC; check_hermitian=true)
if check_hermitian
ishermitian(G) || throw(ArgumentError("matrix must be Hermitian"))
end
N = size(G, 1)
xadj = Vector{idx_t}(undef, N+1)
xadj[1] = 1
adjncy = Vector{idx_t}(undef, nnz(G))
adjncy_i = 0
@inbounds for j in 1:N
n_rows = 0
for k in G.colptr[j] : (G.colptr[j+1] - 1)
i = G.rowval[k]
if i != j # don't include diagonal elements
n_rows += 1
adjncy_i += 1
adjncy[adjncy_i] = i
end
end
xadj[j+1] = xadj[j] + n_rows
end
resize!(adjncy, adjncy_i)
return Graph(idx_t(N), xadj, adjncy)
end
"""
graph(G::LightGraphs.AbstractSimpleGraph)
Construct the 1-based CSR representation of the `LightGraphs` graph `G`.
"""
function graph(G::LightGraphs.AbstractSimpleGraph)
N = LightGraphs.nv(G)
xadj = Vector{idx_t}(undef, N+1)
xadj[1] = 1
adjncy = Vector{idx_t}(undef, 2*LightGraphs.ne(G))
adjncy_i = 0
for j in 1:N
ne = 0
for i in LightGraphs.outneighbors(G, j)
ne += 1
adjncy_i += 1
adjncy[adjncy_i] = i
end
xadj[j+1] = xadj[j] + ne
end
resize!(adjncy, adjncy_i)
return Graph(idx_t(N), xadj, adjncy)
end
"""
perm, iperm = Metis.permutation(G)
Compute the fill reducing permutation `perm`
and its inverse `iperm` of `G`.
"""
permutation(G) = permutation(graph(G))
function permutation(G::Graph)
perm = Vector{idx_t}(undef, G.nvtxs)
iperm = Vector{idx_t}(undef, G.nvtxs)
vwgt = isdefined(G, :vwgt) ? G.vwgt : C_NULL
METIS_NodeND(G.nvtxs, G.xadj, G.adjncy, vwgt, options, perm, iperm)
return perm, iperm
end
"""
Metis.partition(G, n; alg = :KWAY)
Partition the graph `G` in `n` parts.
The partition algorithm is defined by the `alg` keyword:
- :KWAY: multilevel k-way partitioning
- :RECURSIVE: multilevel recursive bisection
"""
partition(G, nparts; alg = :KWAY) = partition(graph(G), nparts, alg = alg)
function partition(G::Graph, nparts::Integer; alg = :KWAY)
part = Vector{idx_t}(undef, G.nvtxs)
vwgt = isdefined(G, :vwgt) ? G.vwgt : C_NULL
edgecut = fill(idx_t(0), 1)
if alg === :RECURSIVE
METIS_PartGraphRecursive(G.nvtxs, idx_t(1), G.xadj, G.adjncy, vwgt, C_NULL, C_NULL,
idx_t(nparts), C_NULL, C_NULL, options, edgecut, part)
elseif alg === :KWAY
METIS_PartGraphKway(G.nvtxs, idx_t(1), G.xadj, G.adjncy, vwgt, C_NULL, C_NULL,
idx_t(nparts), C_NULL, C_NULL, options, edgecut, part)
else
throw(ArgumentError("unknown algorithm $(repr(alg))"))
end
return part
end
"""
Metis.separator(G)
Compute a vertex separator of the graph `G`.
"""
separator(G) = separator(graph(G))
function separator(G::Graph)
part = Vector{idx_t}(undef, G.nvtxs)
sepsize = fill(idx_t(0), 1)
vwgt = isdefined(G, :vwgt) ? G.vwgt : C_NULL
# METIS_ComputeVertexSeparator segfaults with 1-based indexing
xadj = G.xadj .- idx_t(1)
adjncy = G.adjncy .- idx_t(1)
METIS_ComputeVertexSeparator(G.nvtxs, xadj, adjncy, vwgt, options, sepsize, part)
part .+= 1
return part
end
"""
Metis.mesh_partition(M, n; alg = :DUAL)
Partition the mesh `M` in `n` parts.
The partition algorithm is defined by the `alg` keyword:
- :DUAL: Partition of the mesh's dual graph
- :NODAL: Partition of the mesh's nodal graph
"""
function mesh_partition(mesh::TriangleMesh.TriMesh, nparts::Integer; alg = :DUAL)
M = Mesh(mesh)
epart = Vector{idx_t}(undef, M.ne)
npart = Vector{idx_t}(undef, M.nn)
objval = fill(idx_t(0), 1)
if alg === :NODAL
METIS_PartMeshNodal(M.ne, M.nn, M.eptr, M.eind, C_NULL, C_NULL, idx_t(nparts),
C_NULL, options, objval, epart, npart)
elseif alg === :DUAL
METIS_PartMeshDual(M.ne, M.nn, M.eptr, M.eind, C_NULL, C_NULL, idx_t(2), idx_t(nparts),
C_NULL, options, objval, epart, npart)
else
throw(ArgumentError("unknown algorithm $(repr(alg))"))
end
return epart, npart
end
end # module |
module plot
contains
subroutine start_plot ()
implicit none
integer :: iStat, pgOpen
iStat = pgOpen ( 'p2f.ps/ps' )
end subroutine start_plot
subroutine end_plot ()
implicit none
call pgClos ()
end subroutine end_plot
subroutine plot_f_vv ()
use rzvv_grid
use constants
implicit none
integer :: nLevs, i=0
real :: tr(6)
real, allocatable :: levels(:)
call pgEnv ( -1.0, 1.0, 0.0, 1.0, 1, 1 )
call pgLab ( 'vPar [%c]', 'vPerp [%c]', 'f(vv)' )
tr = (/ ( minVal ( vPar_binCenters ) - vPar_binSize ) / c * 100.0, &
vPar_binSize / c * 100.0, &
0.0, &
( minVal ( vPerp_binCenters ) - vPerp_binSize ) / c * 100.0, &
0.0, &
vPerp_binSize / c * 100.0 /)
nLevs = 20
allocate ( levels ( nLevs ) )
levels = (/ ((2.0**i)*1e-16,i=0,nLevs-1) /)
call pgCont ( transpose ( f_vv ), vPar_nBins, vPerp_nBins, &
1, vPar_nBins, 1, vPerp_nBins, &
levels, nLevs, tr )
end subroutine plot_f_vv
subroutine plot_f_rzvv ( rPt, zPt )
use rzvv_grid
use constants
implicit none
integer :: nLevs, i=0, rPt, zPt
real :: tr(6)
real, allocatable :: levels(:)
call pgEnv ( -1.0, 1.0, 0.0, 1.0, 1, 1 )
call pgLab ( 'vPar [%c]', 'vPerp [%c]', 'f(rzvv)' )
tr = (/ ( minVal ( vPar_binCenters ) - vPar_binSize ) / c * 100.0, &
vPar_binSize / c * 100.0, &
0.0, &
( minVal ( vPerp_binCenters ) - vPerp_binSize ) / c * 100.0, &
0.0, &
vPerp_binSize / c * 100.0 /)
nLevs = 20
allocate ( levels ( nLevs ) )
levels = (/ ((2.0**i)*1e-15,i=0,nLevs-1) /)
call pgCont ( transpose ( f_rzvv_(rPt,zPt,:,:) ), vPar_nBins, vPerp_nBins, &
1, vPar_nBins, 1, vPerp_nBins, &
levels, nLevs, tr )
end subroutine plot_f_rzvv
end module plot
|
# Spherical Mean Technique
The recently proposed Spherical Mean Technique (SMT) model *(Kaden et al. 2015)* is a spherical convolution-based technique, which instead of a model to multi-shell DWIs, fits the *spherical mean* of the model to the *spherical mean* of the signal per shell.
SMT observes that if the FOD is a probability density (i.e. integrated to unity) then spherical mean of the signal and the convolution kernel must be the same
\begin{equation}
\int_{\mathbb{S}^2}E_b(\textbf{g})d\textbf{g}=\int_{\mathbb{S}^2}(\operatorname{FOD}\,*_{\mathbb{S}^2}\,K)_b(\textbf{g})d\textbf{g}=\int_{\mathbb{S}^2}K_b(\textbf{g})d\textbf{g}=\epsilon_K(b,\lambda_\perp,\lambda_\parallel).
\end{equation}
The estimation of the multi-compartment kernel using SMT enables the characterization of per-axon micro-environments, as the effects of axon dispersion and crossings are only contained in the FOD.
Advantages:
- Insensitive to axon dispersion and crossings.
Limitations:
- Only accounts for average of tissue properties in crossing configurations, which potentially each have different properties.
# Using Dmipy to set up the SMT Model
To set up the SMT model we start by calling the regular zeppelin model.
```python
from dmipy.signal_models import gaussian_models
zeppelin = gaussian_models.G2Zeppelin()
```
To initialize a spherical mean model, instead of calling the MultiCompartmentModel, we call MultiCompartmentSphericalMeanModel.
```python
from dmipy.core import modeling_framework
smt_mod = modeling_framework.MultiCompartmentSphericalMeanModel(
models=[zeppelin])
```
Notice how the zeppelin's spherical mean representation has no orientation parameter 'mu'.
```python
smt_mod.parameter_names
```
['G2Zeppelin_1_lambda_perp', 'G2Zeppelin_1_lambda_par']
The only constraint on the Zeppelin model is that $\lambda_\parallel\geq\lambda_\perp$. We can impose this parameter constraint using the model.set_fractional_parameter() function.
```python
smt_mod.set_fractional_parameter('G2Zeppelin_1_lambda_perp', 'G2Zeppelin_1_lambda_par')
smt_mod.parameter_names
```
['G2Zeppelin_1_lambda_perp_fraction', 'G2Zeppelin_1_lambda_par']
The lambda_perp parameter has now been replaced with lambda_perp_fraction.
In essence, what this function did is replace the original parameter with a different optimization parameter that operates as a fraction $f$ of $\lambda_{\parallel}$, such that $\lambda_{\perp}=f \times \lambda_{\parallel}$, where $1\geq f \geq 0$. Internally, the $\lambda_{\perp}$ parameter will now always respect the relative "smaller than" constraint to $\lambda_{\parallel}$, and the true value of $\lambda_{\perp}$ can be recovered after fitting the model.
Visualize the model, notice that it's just single model:
```python
from IPython.display import Image
smt_mod.visualize_model_setup(view=False, cleanup=False)
Image('Model Setup.png')
```
## Fitting SMT to Human Connectome Project data
```python
from dmipy.data import saved_data
scheme_hcp, data_hcp = saved_data.wu_minn_hcp_coronal_slice()
sub_image = data_hcp[70:90,: , 70:90]
```
This data slice originates from Subject 100307 of the Human Connectome Project, WU-Minn Consortium (Principal Investigators: David Van Essen and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and Centers that support the NIH Blueprint for Neuroscience Research; and by the McDonnell Center for Systems Neuroscience at Washington University.
```python
import matplotlib.pyplot as plt
import matplotlib.patches as patches
%matplotlib inline
fig, ax = plt.subplots(1)
ax.imshow(data_hcp[:, 0, :, 0].T, origin=True)
rect = patches.Rectangle((70,70),20,20,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.set_axis_off()
ax.set_title('HCP coronal slice B0 with ROI');
```
```python
# Fitting SMT is very fast, half the time is actually spent estimating the spherical mean of the data.
smt_fit_hcp = smt_mod.fit(scheme_hcp, data_hcp, Ns=30, mask=data_hcp[..., 0]>0, use_parallel_processing=False)
```
Setup brute2fine optimizer in 1.50449085236 seconds
Fitting of 8181 voxels complete in 22.991079092 seconds.
Average of 0.0028103018081 seconds per voxel.
```python
fitted_parameters = smt_fit_hcp.fitted_parameters
fig, axs = plt.subplots(1, len(fitted_parameters), figsize=[15, 15])
axs = axs.ravel()
for i, (name, values) in enumerate(fitted_parameters.items()):
cf = axs[i].imshow(values.squeeze().T, origin=True)
axs[i].set_title(name)
axs[i].set_axis_off()
fig.colorbar(cf, ax=axs[i], shrink=0.2)
```
## Estimating Parametric FODs using SMT kernel
Dmipy allows for the estimation of parametric Fiber Orientation Distributions (FODs) using the fitted spherical mean model parameters as a convolution kernel. At this time, it is possible to choose either 'watson' or 'bingham' distributions with any number of compartments.
### Watson FOD estimation
First we fit SMT on a patch where we want to estimate FODs.
```python
smt_fit_patch = smt_mod.fit(scheme_hcp, sub_image, Ns=30)
```
Using parallel processing with 8 workers.
Setup brute2fine optimizer in 0.0670688152313 seconds
Fitting of 400 voxels complete in 1.86997413635 seconds.
Average of 0.00467493534088 seconds per voxel.
Then we call the fitted SMT model to return a multi-compartment model that is designed to fit $N$ parametric FODs to the data. Note that the returned model is no different from a self-designed regular MultiCompartmentModel - it only differs in that as the kernel properties are fixed to the estimated SMT parameters.
```python
parametric_fod_model= smt_fit_patch.return_parametric_fod_model(
distribution='watson', Ncompartments=1)
```
Notice that the FOD optimizer has the same design as a regular MultiCompartmentModel, and all the same solvers and options are available for FOD estimation. In this way, it is possible to have completely different approaches to estimating the kernel using a spherical mean model, and estimating the subsequent parametric FODs.
```python
smt_fod_fit = parametric_fod_model.fit(scheme_hcp, sub_image)
```
Using parallel processing with 8 workers.
Cannot estimate signal grid with voxel-dependent x0_vector.
Setup brute2fine optimizer in 0.000311851501465 seconds
Fitting of 400 voxels complete in 27.5659279823 seconds.
Average of 0.0689148199558 seconds per voxel.
Fitting an FOD optimizer returns a FittedMultiCompartmentModel, where now all the same functions are available. We will show the estimated FODs in the next example.
### Watson FOD visualization
Here we use the same Dipy procedure as in the previous examples to visualize the FODs.
```python
from dipy.data import get_sphere
from dipy.viz.actor import slicer
sphere = get_sphere(name='symmetric724').subdivide()
fods = smt_fod_fit.fod(sphere.vertices, visual_odi_lower_bound=0.1)
```
```python
import numpy as np
affine = np.eye(4)
affine[0,3] = -10
affine[1,3] = -10
# lambda_perp = lambda_perp_fraction * lambda_par
volume_res = (smt_fod_fit.fitted_parameters['SD1WatsonDistributed_1_G2Zeppelin_1_lambda_perp_fraction'] *
smt_fod_fit.fitted_parameters['SD1WatsonDistributed_1_G2Zeppelin_1_lambda_par'])
volume_im = slicer(volume_res[:, 0, :, None], interpolation='nearest', affine=affine, opacity=0.7)
```
```python
from dipy.viz import fvtk
ren = fvtk.ren()
fod_spheres = fvtk.sphere_funcs(fods, sphere, scale=1., norm=False)
fod_spheres.RotateX(90)
fod_spheres.RotateZ(180)
fod_spheres.RotateY(180)
fvtk.add(ren, fod_spheres)
fvtk.add(ren, volume_im)
fvtk.record(ren=ren, size=[700, 700])
```
```python
import matplotlib.image as mpimg
img = mpimg.imread('dipy.png')
plt.figure(figsize=[10, 10])
plt.imshow(img[100:-97, 100:-85])
plt.title('SMT Watson FODs lambda_perp background', fontsize=20)
plt.axis('off');
```
## Estimating Error Metrics: MSE and $R^2$
It is also possible to calculate the Mean Squared Error (MSE) and the $R^2$ coefficient of determination.
In MSE, the lower the better, while $R^2$ ranges between 0 and 1, with 1 being a perfect model fit.
```python
mse = smt_fit_hcp.mean_squared_error(data_hcp)
R2 = smt_fit_hcp.R2_coefficient_of_determination(data_hcp)
fig, axs = plt.subplots(1, 2, figsize=[15, 15])
cf = axs[0].imshow(mse.squeeze().T, origin=True, vmax=1e-3, cmap='Greys_r')
fig.colorbar(cf, ax=axs[0], shrink=0.33)
axs[0].set_title('Mean Squared Error', fontsize=20)
axs[0].set_axis_off()
cf = axs[1].imshow(R2.squeeze().T, origin=True, vmin=.98, cmap='Greys_r')
fig.colorbar(cf, ax=axs[1], shrink=0.33)
axs[1].set_title('R2 - Coefficient of Determination', fontsize=20)
axs[1].set_axis_off();
```
The MSE shows that the fitting error is overall very low, with higher errors in the CSF and the skull. The $R^2$ agree with the MSE results, having values very close to 1 overall, with lower values in the CSF and skull.
## References
- Kaden, Enrico, et al. "Multi-compartment microscopic diffusion imaging." NeuroImage 139 (2016): 346-359.
|
{-# OPTIONS --cubical #-}
open import Agda.Builtin.Cubical.Path
data D : Set where
c : (@0 x y : D) → x ≡ y
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import os
from pathlib import Path
from typing import Any, Optional
import numpy as np
import torch
from InnerEyeDataQuality.deep_learning.metrics.joint_metrics import JointMetrics
from InnerEyeDataQuality.deep_learning.metrics.sample_metrics import SampleMetrics
from torch.utils.data import Dataset
from torch.utils.tensorboard import SummaryWriter
class MetricTracker(object):
"""
"""
def __init__(self,
output_dir: str,
num_epochs: int,
num_samples_total: int,
num_samples_per_epoch: int,
num_classes: int,
save_tf_events: bool,
dataset: Optional[Dataset] = None,
name: str = "default_metric",
**sample_info_kwargs: Any):
"""
Class to track model training metrics.
If a co-teaching model is trained, joint model metrics are stored such as disagreement rate and kl divergence.
Similarly, it stores loss and logits values on a per sample basis for each epoch for post-training analysis.
This stored data can be utilised in data selection simulation.
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
self.output_dir = output_dir
self.name = name
self.num_classes = num_classes
self.num_samples_total = num_samples_total
self.num_samples_per_epoch = num_samples_per_epoch
clean_targets = dataset.clean_targets if hasattr(dataset, "clean_targets") else None # type: ignore
self.joint_model_metrics = JointMetrics(num_samples_total, num_epochs, dataset, **sample_info_kwargs)
self.sample_metrics = SampleMetrics(name, num_epochs, num_samples_total, num_classes,
clear_labels=clean_targets,
embeddings_size=None, **sample_info_kwargs)
self.writer = SummaryWriter(log_dir=output_dir) if save_tf_events else None
def reset(self) -> None:
self.sample_metrics.reset()
self.joint_model_metrics.reset()
def log_epoch_and_reset(self, epoch: int) -> None:
# assert np.count_nonzero(~np.isnan(self.sample_metrics.loss_per_sample[:, epoch])) == self.num_samples_per_epoch
self.sample_metrics.log_results(epoch=epoch, name=self.name, writer=self.writer)
if self.writer:
self.joint_model_metrics.log_results(self.writer, epoch, self.sample_metrics)
# Reset epoch metrics
self.reset()
def append_batch_aggregate(self, epoch: int, logits_x: torch.Tensor, logits_y: torch.Tensor,
dropped_cases: torch.Tensor, indices: torch.Tensor) -> None:
"""
Stores the disagreement stats for co-teaching models
"""
post_x = torch.softmax(logits_x, dim=-1)
post_y = torch.softmax(logits_y, dim=-1)
sym_kl_per_sample = torch.sum(post_x * torch.log(post_x / post_y) + post_y * torch.log(post_y / post_x), dim=-1)
pred_x = torch.argmax(logits_x, dim=-1)
pred_y = torch.argmax(logits_y, dim=-1)
class_pred_disagreement = pred_x != pred_y
self.joint_model_metrics.kl_divergence_symmetric[indices] = sym_kl_per_sample.cpu().numpy()
self.joint_model_metrics.prediction_disagreement[indices, epoch] = class_pred_disagreement.cpu().numpy()
self.joint_model_metrics.case_drop_histogram[dropped_cases.cpu().numpy(), epoch] = True
self.joint_model_metrics.active = True
def save_loss(self) -> None:
output_path = os.path.join(self.output_dir, f'{self.name}_training_stats.npz')
if hasattr(self.joint_model_metrics, "case_drop_histogram"):
np.savez(output_path,
loss_per_sample=self.sample_metrics.loss_per_sample,
logits_per_sample=self.sample_metrics.logits_per_sample,
dropped_cases=self.joint_model_metrics.case_drop_histogram[:, :-1])
else:
np.savez(output_path,
loss_per_sample=self.sample_metrics.loss_per_sample,
logits_per_sample=self.sample_metrics.logits_per_sample)
|
REAL FUNCTION EXTCOR(WAVE,EBV)
REAL WAVE, EBV
C
C MEAN GALACTIC CURVE (Seaton,M. 1979, MNRAS,187, 73p)
C THE METHOD EMPLOYED IS LINEAR INTERPOLATION.
REAL MAG1, M, X(18), MAG(18)
REAL DY, DX, B, Z
C Data for mean galactic with x less than 2.7
DATA X /1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0,2.1,2.2,
& 2.3,2.4,2.5,2.6,2.7/
DATA MAG /1.36,1.64,1.84,2.04,2.24,2.44,2.66,2.88,3.14,3.36,
& 3.56,3.77,3.96,4.15,4.26,4.40,4.52,4.64/
Z = 10000. / WAVE
C GAK modification for a Herschel 36 w/o bump from Fitzpatrick and Massa
C 1988, ApJ, 328, 734. Weak bump relative to Seaton, slightly stronger
C FUV upturn.
C mag1 = 3.14 + 1.680 + 0.055 * Z
C if ( z .ge. 5.9 )
C & mag1=mag1+0.312*(0.5392*(Z-5.9)**2+0.0564*(z-5.9)**3)
C extcor = 10. ** ( -0.4 * EBV * MAG1)
C return
C GAK modification for Theta1 Ori C from Fitspatrick and Mass (1988).
C This star has weak bump and weak UV upturn relative to Seaton curve.
C drude = z**2 / ( (z**2 - 4.635**2)**2 + (0.846 * z)**2)
C mag1 = 3.14 + 1.251 + 0.033 * Z + 1.331 * drude
C if ( z .ge. 5.9 )
C & mag1=mag1+0.186*(0.5392*(Z-5.9)**2+0.0564*(z-5.9)**3)
C extcor = 10. ** ( -0.4 * EBV * MAG1)
C return
MAG1 = 0.0
IF( Z .LE. 1.0 ) GO TO 90
IF( Z .GE. 2.7 ) GO TO 30
DO 10 I = 1,18
10 IF (X(I).GT. Z) GOTO 20
20 DY = MAG(I) - MAG(I - 1)
DX = X(I) - X(I - 1)
M = DY / DX
B = MAG(I) - M * X(I)
MAG1 = M * Z + B
GO TO 90
30 IF( Z .GE. 3.65 ) GO TO 50
MAG1=1.56+1.048*Z+1.01/((Z-4.60)**2+0.280)
GO TO 90
50 IF( Z .GE. 7.14 ) GO TO 70
MAG1=2.29+0.848*Z+1.01/((Z-4.60)**2+0.280)
GO TO 90
70 MAG1=16.17-3.20*Z+0.2975*Z**2
GO TO 90
90 EXTCOR = 10. ** ( -.4 * MAG1 * EBV )
RETURN
END
|
url <- "https://ww2.amstat.org/publications/jse/datasets/fruitfly.dat.txt"
data <- read.table(url)
data <- data[, c(-1, -6)]
names(data) <- c("partners", "type", "longevity", "thorax")
par(mfrow = c(3, 2))
plot(data$thorax, data$longevity, col = data$partners, pch = data$type)
data0 <- data[data$partners == 0, ]
plot(data0$thorax, data0$longevity, col = data$partners, pch = data$type)
data1 <- data[data$partners == 1, ]
plot(data1$thorax, data1$longevity, col = data$partners, pch = data$type)
data8 <- data[data$partners == 8, ]
plot(data8$thorax, data8$longevity, col = data$partners, pch = data$type)
p0 <- as.integer(data$partners == 0)
p1t0 <- as.integer(data$partners == 1 & data$type == 0)
p1t1 <- as.integer(data$partners == 1 & data$type == 1)
p8t0 <- as.integer(data$partners == 8 & data$type == 0)
p8t1 <- as.integer(data$partners == 8 & data$type == 1)
data_dummy <- cbind(data, p0, p1t0, p1t1, p8t0, p8t1)
boxplot(cbind(data_dummy[data_dummy$p0 == 1, ]$longevity,
data_dummy[data_dummy$p1t0 == 1, ]$longevity,
data_dummy[data_dummy$p1t1 == 1, ]$longevity,
data_dummy[data_dummy$p8t0 == 1, ]$longevity,
data_dummy[data_dummy$p8t1 == 1, ]$longevity)
)
# use p0 as control group
fitfull <- lm(thorax ~ p1t0 + p1t1 + p8t0 + p8t1, data_dummy)
fitintercept <- lm(thorax ~ 1, data_dummy)
# not significant, so null hypothesis: model is the same should be acc.ed
print(anova(fitintercept, firfull))
reg_wo <- lm(longevity ~ type, data1)
print(summary(reg_wo))
reg_w <- lm(longevity ~ type + thorax, data1)
print(summary(reg_w))
reg_full_fail <- lm(longevity ~ thorax
+ as.factor(partners) * as.factor(type), data)
print(summary(reg_full_fail)) # some NA here, and most var are not significant
reg_full <- lm(longevity ~ thorax + p1t0 + p1t1 + p8t0 + p8t1, data_dummy)
print(summary(reg_full))
reg_reduce <- lm(longevity ~
thorax +
I(p1t0 - p8t1) +
I(p1t1 + p8t1) +
I(p8t0 + p8t1), data_dummy)
print(summary(reg_reduce))
print(anova(reg_reduce, reg_full)) |
<div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
<div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
<div style="position: relative ; top: 50% ; transform: translatey(-50%)">
<div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
<div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">The Fourier Pseudospectral Method - Acoustic Waves in 1D</div>
</div>
</div>
</div>
Seismo-Live: http://seismo-live.org
##### Authors:
* David Vargas ([@dvargas](https://github.com/davofis))
* Heiner Igel ([@heinerigel](https://github.com/heinerigel))
---
## Basic Equations
We use the Fourier method to calculate exact n-th derivatives on a regular spaced grid (to machine precision). This property combined with classical time extrapolation schemes result in the so call Fourier pseudospectral method. The problem of solving the 1D acoustic wave equation in an homogeneous media
\begin{equation}
\partial_t^2 p(x,t) = c(x)^2 \ \partial_x^2 p(x,t) + s(x,t)
\end{equation}
is covered in this notebook. We explore the benefits of calculating exact spatial derivatives (up to machine precision), numerical dispersion, comparison with a Finite Difference scheme
```python
# This is a configuration step for the exercise. Please run it before calculating the derivative!
import numpy as np
import matplotlib.pyplot as plt
from ricker import ricker
# Show the plots in the Notebook.
plt.switch_backend("nbagg")
```
### 1. Fourier derivative method
The second spatial derivative is computed by multiplying the spatial Fourier transform of the pressure field $P(k,t)$ with $ (ik)^2 $
\begin{equation}
\partial_x^2 p(x,t) = \mathscr{F}^{-1}[(ik)^{2}P(k,t)] = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} (ik)^{2} P(k,t) e^{ikx} dk
\end{equation}
where $k$ is the wavenumber and $IFT$ the Inverse Fourier Transform. A function to perform this task is implemented in the next cell.
```python
def fourier_derivative_2nd(f, dx):
# Length of vector f
nx = np.size(f)
# Initialize k vector up to Nyquist wavenumber
kmax = np.pi / dx
dk = kmax / (nx / 2)
k = np.arange(float(nx))
k[: int(nx/2)] = k[: int(nx/2)] * dk
k[int(nx/2) :] = k[: int(nx/2)] - kmax
# Fourier derivative
ff = np.fft.fft(f)
ff = (1j*k)**2 * ff
df_num = np.real(np.fft.ifft(ff))
return df_num
```
### 2. Initialization of setup
```python
# Basic parameters
# ---------------------------------------------------------------
nt = 3500 # number of time steps
c = 343. # acoustic velocity [m/s]
eps = 0.2 # stability limit
isnap = 50 # snapshot frequency
isx = 1250 # source location
f0 = 60. # Frequency [Hz](div by 5)
nx = 2024 # number of grid points in x
# pressure fields Initialization
p = np.zeros(nx) ; pnew = p ; pold = p ; d2p = p; dp = p
ap = np.zeros(nx); apnew = ap; apold = ap; ad2p = ap; adp = ap
sp = np.zeros(nx); spnew = sp; spold = sp; sd2p = sp; sdp = p
dx = 1250./(nx-1) # calculate space increment
x = np.arange(0, nx)*dx # initialize space coordinates
dt = eps*dx/c; # calculate time step from stability criterion
```
### 3. Source Initialization
```python
# source time function
# ---------------------------------------------------------------
t = np.arange(1, nt+1)*dt # initialize time axis
T0 = 1./f0
tmp = ricker(dt, T0)
tmp = np.diff(tmp)
src = np.zeros(nt)
src[0:np.size(tmp)] = tmp
lam = c*T0
#spatial source function
# ---------------------------------------------------------------
sigma = 2*dx
x0 = x[isx-1]
sg = np.exp(-1/sigma**2 *(x - x0)**2); sg = sg/np.amax(sg)
```
### 4. Time Extrapolation
The final solution for our 1D acoustic wave problem after introducing a finite differences time extrapolation schem can be written as
\begin{equation}
p_{j}^{n+1} = dt^2c_{j}^{2} \partial_{x}^{2}p_{j}^{n} + dt^2s_{j}^{n} + 2p_{j}^{n} - p_{j}^{n-1}
\end{equation}
where the space derivative is computed with the Fourier method. In order to compare the above numerical solution, we implement a 3-point finite difference operator, as well as a 5-point finite difference operator to compute spatial derivatives. They are given as:
1.) **3-point finite difference operator**
\begin{equation}
\partial_x^2 p(x,t) = \frac{p(x+\mathrm{d}x,t) - 2 p(x,t) + p(x-\mathrm{d}x,t)}{\mathrm{d}x^2}
\end{equation}
2.) **5-point finite difference operator**
\begin{equation}
\partial_x^2 p(x,t) = \frac{-p(x+2\mathrm{d}x,t) + 16p(x+\mathrm{d}x,t) - 30p(x,t) + 16p(x-\mathrm{d}x,t) - p(x-2\mathrm{d}x,t)}{12\mathrm{d}x^2}
\end{equation}
#### Numerical dispersion
One of the most prominent characteristic of the Fourier method is the low numerical dispersion in comparison with the finite difference method. The animation displayed below compare the effects of numerical dispersion on the solution of the 1D acoustic equation using our three different approaches.
```python
# Initialize animated plot
# ---------------------------------------------------------------
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True, figsize=(12,7))
line1 = ax1.plot(x[isx:], p[isx:], 'k', lw=1.5, label='FD-3pt')
line2 = ax2.plot(x[isx:], ap[isx:], 'r', lw=1.5, label='FD-5pt')
line3 = ax3.plot(x[isx:], sp[isx:], 'b', lw=1.5, label='Fourier')
ax1.axis([isx*dx, nx*dx, -6E-7, 6E-7]); ax3.set_xlabel('x [m]')
ax1.legend(loc=4)
ax2.legend(loc=4)
ax3.legend(loc=4)
plt.ion() # set interective mode
plt.show()
# ---------------------------------------------------------------
# Time extrapolation
# ---------------------------------------------------------------
for it in range(nt):
# ----------------------------------------
# Fourier Pseudospectral Method
# ----------------------------------------
sd2p = fourier_derivative_2nd(sp, dx) # 2nd space derivative
spnew = 2*sp - spold + c**2 * dt**2 * sd2p # Time Extrapolation
spnew = spnew + sg*src[it]*dt**2 # Add sources
spold, sp = sp, spnew # Time levels
sp[1] = 0; sp[nx-1] = 0 # set boundaries pressure free
# ----------------------------------------
# Finite Differences Method 3pt
# ----------------------------------------
for i in range(1, nx-1):
d2p[i] = (p[i+1] - 2*p[i] + p[i-1])/dx**2 # Space derivative
pnew = 2*p - pold + dt**2 * c**2 * d2p # Time Extrapolation
pnew = pnew + sg*src[it]*dt**2 # Add source
pold, p = p, pnew # Time levels
p[0] = 0; p[nx-1] = 0 # set boundaries pressure free
# ----------------------------------------
# Finite Differences Method 5pt
# ----------------------------------------
for i in range(2, nx-2):
ad2p[i] = (-1/12*ap[i+2] + 4/3*ap[i+1] - 5/2*ap[i] \
+ 4/3*ap[i-1] - 1/12*ap[i-2])/dx**2 # Space derivative
apnew = 2*ap - apold + dt**2 * c**2 * ad2p # Time Extrapolation
apnew = apnew + sg*src[it]*dt**2 # Add source
apold, ap = ap, apnew # Time levels
ap[0] = 0; ap[nx-1] = 0 # set boundaries pressure free
# --------------------------------------
# Animation plot. Display solution
if not it % isnap:
for l in line1:
l.remove()
del l
for l in line2:
l.remove()
del l
for l in line3:
l.remove()
del l
# --------------------------------------
# Display lines
line1 = ax1.plot(x[isx:], p[isx:], 'k', lw=1.5)
line2 = ax2.plot(x[isx:], ap[isx:], 'r', lw=1.5)
line3 = ax3.plot(x[isx:], sp[isx:], 'b', lw=1.5)
plt.gcf().canvas.draw()
```
```python
```
|
In 2002 , Fey suggested a pilot episode for a situation comedy about a cable news network to NBC , which rejected it . The pilot was reworked to revolve around an SNL style series , and was accepted by NBC . She signed a contract with NBC in May 2003 , which allowed her to remain in her SNL head writer position at least through the 2004 – 2005 television season . As part of the contract , Fey was to develop a prime @-@ time project to be produced by Broadway Video and NBC Universal . The pilot , directed by Adam Bernstein , centered on Liz Lemon , the head writer of a variety show on NBC , and how she managed her relationships with the show 's volatile stars and the new head of the network . In October 2006 , the pilot aired on NBC as 30 Rock . Although the episode received generally favorable reviews , it finished third in its timeslot .
|
(* Title: Terminated coinductive list
Author: Andreas Lochbihler
Maintainer: Andreas Lochbihler
*)
header {* Terminated coinductive lists and their operations *}
theory TLList imports
Coinductive_List
begin
text {*
Terminated coinductive lists @{text "('a, 'b) tllist"} are the codatatype defined by the construtors
@{text "TNil"} of type @{text "'b \<Rightarrow> ('a, 'b) tllist"} and
@{text "TCons"} of type @{text "'a \<Rightarrow> ('a, 'b) tllist \<Rightarrow> ('a, 'b) tllist"}.
*}
subsection {* Auxiliary lemmas *}
lemma split_fst: "R (fst p) = (\<forall>x y. p = (x, y) \<longrightarrow> R x)"
by(cases p) simp
lemma split_fst_asm: "R (fst p) \<longleftrightarrow> (\<not> (\<exists>x y. p = (x, y) \<and> \<not> R x))"
by(cases p) simp
subsection {* Type definition *}
consts terminal0 :: "'a"
codatatype (tset: 'a, 'b) tllist =
TNil (terminal : 'b)
| TCons (thd : 'a) (ttl : "('a, 'b) tllist")
for
map: tmap
rel: tllist_all2
where
"thd (TNil _) = undefined"
| "ttl (TNil b) = TNil b"
| "terminal (TCons _ xs) = terminal0 xs"
overloading
terminal0 == "terminal0::('a, 'b) tllist \<Rightarrow> 'b"
begin
partial_function (tailrec) terminal0
where "terminal0 xs = (if is_TNil xs then case_tllist id undefined xs else terminal0 (ttl xs))"
end
lemma terminal0_terminal [simp]: "terminal0 = terminal"
apply(rule ext)
apply(subst terminal0.simps)
apply(case_tac x)
apply(simp_all add: terminal_def)
done
lemmas terminal_TNil [code, nitpick_simp] = tllist.sel(1)
lemma terminal_TCons [simp, code, nitpick_simp]: "terminal (TCons x xs) = terminal xs"
by simp
declare tllist.sel(2) [simp del]
primcorec unfold_tllist :: "('a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'c) \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('c, 'b) tllist" where
"p a \<Longrightarrow> unfold_tllist p g1 g21 g22 a = TNil (g1 a)" |
"_ \<Longrightarrow> unfold_tllist p g1 g21 g22 a =
TCons (g21 a) (unfold_tllist p g1 g21 g22 (g22 a))"
declare
unfold_tllist.ctr(1) [simp]
tllist.corec(1) [simp]
subsection {* Code generator setup *}
text {* Test quickcheck setup *}
lemma "xs = TNil x"
quickcheck[random, expect=counterexample]
quickcheck[exhaustive, expect=counterexample]
oops
lemma "TCons x xs = TCons x xs"
quickcheck[narrowing, expect=no_counterexample]
oops
text {* More lemmas about generated constants *}
lemma is_TNil_ttl [simp]: "is_TNil xs \<Longrightarrow> is_TNil (ttl xs)"
by(cases xs) simp_all
lemma terminal_ttl [simp]: "terminal (ttl xs) = terminal xs"
by(cases xs) simp_all
lemma unfold_tllist_eq_TNil [simp]:
"unfold_tllist IS_TNIL TNIL THD TTL a = TNil b \<longleftrightarrow> IS_TNIL a \<and> b = TNIL a"
by(auto simp add: unfold_tllist.code)
lemma TNil_eq_unfold_tllist [simp]:
"TNil b = unfold_tllist IS_TNIL TNIL THD TTL a \<longleftrightarrow> IS_TNIL a \<and> b = TNIL a"
by(auto simp add: unfold_tllist.code)
lemma tmap_is_TNil: "is_TNil xs \<Longrightarrow> tmap f g xs = TNil (g (terminal xs))"
by(clarsimp simp add: is_TNil_def)
declare tllist.map_sel(2)[simp]
lemma ttl_tmap [simp]: "ttl (tmap f g xs) = tmap f g (ttl xs)"
by(cases xs) simp_all
lemma TNil_eq_tmap_conv:
"TNil y = tmap f g xs \<longleftrightarrow> (\<exists>y'. xs = TNil y' \<and> g y' = y)"
by(cases xs) auto
declare tllist.set_sel(1)[simp]
lemma tset_ttl: "tset (ttl xs) \<subseteq> tset xs"
by(cases xs) auto
lemma in_tset_ttlD: "x \<in> tset (ttl xs) \<Longrightarrow> x \<in> tset xs"
using tset_ttl[of xs] by auto
theorem tllist_set_induct[consumes 1, case_names find step]:
assumes "x \<in> tset xs" and "\<And>xs. \<not> is_TNil xs \<Longrightarrow> P (thd xs) xs"
and "\<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> tset (ttl xs); P y (ttl xs)\<rbrakk> \<Longrightarrow> P y xs"
shows "P x xs"
using assms by(induct)(fastforce simp del: tllist.disc(2) iff: tllist.disc(2), auto)
theorem set2_tllist_induct[consumes 1, case_names find step]:
assumes "x \<in> set2_tllist xs" and "\<And>xs. is_TNil xs \<Longrightarrow> P (terminal xs) xs"
and "\<And>xs y. \<lbrakk>\<not> is_TNil xs; y \<in> set2_tllist (ttl xs); P y (ttl xs)\<rbrakk> \<Longrightarrow> P y xs"
shows "P x xs"
using assms by(induct)(fastforce simp del: tllist.disc(1) iff: tllist.disc(1), auto)
subsection {* Connection with @{typ "'a llist"} *}
context fixes b :: 'b begin
primcorec tllist_of_llist :: "'a llist \<Rightarrow> ('a, 'b) tllist" where
"tllist_of_llist xs = (case xs of LNil \<Rightarrow> TNil b | LCons x xs' \<Rightarrow> TCons x (tllist_of_llist xs'))"
end
primcorec llist_of_tllist :: "('a, 'b) tllist \<Rightarrow> 'a llist"
where "llist_of_tllist xs = (case xs of TNil _ \<Rightarrow> LNil | TCons x xs' \<Rightarrow> LCons x (llist_of_tllist xs'))"
simps_of_case tllist_of_llist_simps [simp, code, nitpick_simp]: tllist_of_llist.code
lemmas tllist_of_llist_LNil = tllist_of_llist_simps(1)
and tllist_of_llist_LCons = tllist_of_llist_simps(2)
lemma terminal_tllist_of_llist_lnull [simp]:
"lnull xs \<Longrightarrow> terminal (tllist_of_llist b xs) = b"
unfolding lnull_def by simp
declare tllist_of_llist.sel(1)[simp del]
lemma lhd_LNil: "lhd LNil = undefined"
by(simp add: lhd_def)
lemma thd_TNil: "thd (TNil b) = undefined"
by(simp add: thd_def)
lemma ttl_tllist_of_llist [simp]: "ttl (tllist_of_llist b xs) = tllist_of_llist b (ltl xs)"
by(cases xs) simp_all
lemma llist_of_tllist_eq_LNil:
"llist_of_tllist xs = LNil \<longleftrightarrow> is_TNil xs"
using llist_of_tllist.disc_iff(1) unfolding lnull_def .
simps_of_case llist_of_tllist_simps [simp, code, nitpick_simp]: llist_of_tllist.code
lemmas llist_of_tllist_TNil = llist_of_tllist_simps(1)
and llist_of_tllist_TCons = llist_of_tllist_simps(2)
declare llist_of_tllist.sel [simp del]
lemma lhd_llist_of_tllist [simp]: "\<not> is_TNil xs \<Longrightarrow> lhd (llist_of_tllist xs) = thd xs"
by(cases xs) simp_all
lemma ltl_llist_of_tllist [simp]:
"ltl (llist_of_tllist xs) = llist_of_tllist (ttl xs)"
by(cases xs) simp_all
lemma tllist_of_llist_cong [cong]:
assumes "xs = xs'" "lfinite xs' \<Longrightarrow> b = b'"
shows "tllist_of_llist b xs = tllist_of_llist b' xs'"
proof(unfold `xs = xs'`)
from assms have "lfinite xs' \<longrightarrow> b = b'" by simp
thus "tllist_of_llist b xs' = tllist_of_llist b' xs'"
by(coinduction arbitrary: xs') auto
qed
lemma llist_of_tllist_inverse [simp]:
"tllist_of_llist (terminal b) (llist_of_tllist b) = b"
by(coinduction arbitrary: b) simp_all
lemma tllist_of_llist_eq [simp]: "tllist_of_llist b' xs = TNil b \<longleftrightarrow> b = b' \<and> xs = LNil"
by(cases xs) auto
lemma TNil_eq_tllist_of_llist [simp]: "TNil b = tllist_of_llist b' xs \<longleftrightarrow> b = b' \<and> xs = LNil"
by(cases xs) auto
lemma tllist_of_llist_inject [simp]:
"tllist_of_llist b xs = tllist_of_llist c ys \<longleftrightarrow> xs = ys \<and> (lfinite ys \<longrightarrow> b = c)"
(is "?lhs \<longleftrightarrow> ?rhs")
proof(intro iffI conjI impI)
assume ?rhs
thus ?lhs by(auto intro: tllist_of_llist_cong)
next
assume ?lhs
thus "xs = ys"
by(coinduction arbitrary: xs ys)(auto simp add: lnull_def neq_LNil_conv)
assume "lfinite ys"
thus "b = c" using `?lhs`
unfolding `xs = ys` by(induct) simp_all
qed
lemma tllist_of_llist_inverse [simp]:
"llist_of_tllist (tllist_of_llist b xs) = xs"
by(coinduction arbitrary: xs) auto
definition cr_tllist :: "('a llist \<times> 'b) \<Rightarrow> ('a, 'b) tllist \<Rightarrow> bool"
where "cr_tllist \<equiv> (\<lambda>(xs, b) ys. tllist_of_llist b xs = ys)"
lemma Quotient_tllist:
"Quotient (\<lambda>(xs, a) (ys, b). xs = ys \<and> (lfinite ys \<longrightarrow> a = b))
(\<lambda>(xs, a). tllist_of_llist a xs) (\<lambda>ys. (llist_of_tllist ys, terminal ys)) cr_tllist"
unfolding Quotient_alt_def cr_tllist_def by(auto intro: tllist_of_llist_cong)
lemma reflp_tllist: "reflp (\<lambda>(xs, a) (ys, b). xs = ys \<and> (lfinite ys \<longrightarrow> a = b))"
by(simp add: reflp_def)
setup_lifting (no_code) Quotient_tllist reflp_tllist
context
begin
interpretation lifting_syntax .
lemma TNil_transfer [transfer_rule]:
"(B ===> pcr_tllist A B) (Pair LNil) TNil"
by(auto simp add: pcr_tllist_def cr_tllist_def intro!: rel_funI relcomppI)
lemma TCons_transfer [transfer_rule]:
"(A ===> pcr_tllist A B ===> pcr_tllist A B) (apfst \<circ> LCons) TCons"
by(auto 4 3 intro!: rel_funI relcomppI simp add: pcr_tllist_def rel_prod_def llist_all2_LCons1 cr_tllist_def)
lemma tmap_tllist_of_llist:
"tmap f g (tllist_of_llist b xs) = tllist_of_llist (g b) (lmap f xs)"
by(coinduction arbitrary: xs)(auto simp add: tmap_is_TNil)
lemma tmap_transfer [transfer_rule]:
"(op = ===> op = ===> pcr_tllist op = op = ===> pcr_tllist op = op =) (map_prod \<circ> lmap) tmap"
by(auto intro!: rel_funI simp add: cr_tllist_def tllist.pcr_cr_eq tmap_tllist_of_llist)
lemma lset_llist_of_tllist [simp]:
"lset (llist_of_tllist xs) = tset xs" (is "?lhs = ?rhs")
proof(intro set_eqI iffI)
fix x
assume "x \<in> ?lhs"
thus "x \<in> ?rhs"
by(induct "llist_of_tllist xs" arbitrary: xs rule: llist_set_induct)(auto simp: tllist.set_sel(2))
next
fix x
assume "x \<in> ?rhs"
thus "x \<in> ?lhs"
proof(induct rule: tllist_set_induct)
case (find xs)
thus ?case by(cases xs) auto
next
case step
thus ?case
by(auto simp add: ltl_llist_of_tllist[symmetric] simp del: ltl_llist_of_tllist dest: in_lset_ltlD)
qed
qed
lemma tset_tllist_of_llist [simp]:
"tset (tllist_of_llist b xs) = lset xs"
by(simp add: lset_llist_of_tllist[symmetric] del: lset_llist_of_tllist)
lemma tset_transfer [transfer_rule]:
"(pcr_tllist op = op = ===> op =) (lset \<circ> fst) tset"
by(auto simp add: cr_tllist_def tllist.pcr_cr_eq)
lemma is_TNil_transfer [transfer_rule]:
"(pcr_tllist op = op = ===> op =) (\<lambda>(xs, b). lnull xs) is_TNil"
by(auto simp add: tllist.pcr_cr_eq cr_tllist_def)
lemma thd_transfer [transfer_rule]:
"(pcr_tllist op = op = ===> op =) (lhd \<circ> fst) thd"
by(auto simp add: cr_tllist_def tllist.pcr_cr_eq)
lemma ttl_transfer [transfer_rule]:
"(pcr_tllist A B ===> pcr_tllist A B) (apfst ltl) ttl"
by(auto simp add: pcr_tllist_def cr_tllist_def rel_prod_def intro!: rel_funI relcomppI intro: llist_all2_ltlI)
lemma llist_of_tllist_transfer [transfer_rule]:
"(pcr_tllist op = B ===> op =) fst llist_of_tllist"
by(auto simp add: pcr_tllist_def cr_tllist_def llist.rel_eq)
lemma tllist_of_llist_transfer [transfer_rule]:
"(op = ===> op = ===> pcr_tllist op = op =) (\<lambda>b xs. (xs, b)) tllist_of_llist"
by(auto simp add: tllist.pcr_cr_eq cr_tllist_def)
lemma terminal_tllist_of_llist_lfinite [simp]:
"lfinite xs \<Longrightarrow> terminal (tllist_of_llist b xs) = b"
by(induct rule: lfinite.induct) simp_all
lemma set2_tllist_tllist_of_llist [simp]:
"set2_tllist (tllist_of_llist b xs) = (if lfinite xs then {b} else {})"
proof(cases "lfinite xs")
case True
thus ?thesis by(induct) auto
next
case False
{ fix x
assume "x \<in> set2_tllist (tllist_of_llist b xs)"
hence False using False
by(induct "tllist_of_llist b xs" arbitrary: xs rule: set2_tllist_induct) fastforce+ }
thus ?thesis using False by auto
qed
lemma set2_tllist_transfer [transfer_rule]:
"(pcr_tllist A B ===> rel_set B) (\<lambda>(xs, b). if lfinite xs then {b} else {}) set2_tllist"
by(auto 4 4 simp add: pcr_tllist_def cr_tllist_def dest: llist_all2_lfiniteD intro: rel_setI)
lemma tllist_all2_transfer [transfer_rule]:
"(op = ===> op = ===> pcr_tllist op = op = ===> pcr_tllist op = op = ===> op =)
(\<lambda>P Q (xs, b) (ys, b'). llist_all2 P xs ys \<and> (lfinite xs \<longrightarrow> Q b b')) tllist_all2"
unfolding tllist.pcr_cr_eq
apply(rule rel_funI)+
apply(clarsimp simp add: cr_tllist_def llist_all2_def tllist_all2_def)
apply(safe elim!: GrpE)
apply simp_all
apply(rule_tac b="tllist_of_llist (b, ba) bb" in relcomppI)
apply(auto intro!: GrpI simp add: tmap_tllist_of_llist)[2]
apply(rule_tac b="tllist_of_llist (b, ba) bb" in relcomppI)
apply(auto simp add: tmap_tllist_of_llist intro!: GrpI split: split_if_asm)[2]
apply(rule_tac b="llist_of_tllist bb" in relcomppI)
apply(auto intro!: GrpI)
apply(transfer, auto intro: GrpI split: split_if_asm)+
done
subsection {* Library function definitions *}
text {*
We lift the constants from @{typ "'a llist"} to @{typ "('a, 'b) tllist"} using the lifting package.
This way, many results are transferred easily.
*}
lift_definition tappend :: "('a, 'b) tllist \<Rightarrow> ('b \<Rightarrow> ('a, 'c) tllist) \<Rightarrow> ('a, 'c) tllist"
is "\<lambda>(xs, b) f. apfst (lappend xs) (f b)"
by(auto simp add: split_def lappend_inf)
lift_definition lappendt :: "'a llist \<Rightarrow> ('a, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
is "apfst \<circ> lappend"
by(simp add: split_def)
lift_definition tfilter :: "'b \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> ('a, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
is "\<lambda>b P (xs, b'). (lfilter P xs, if lfinite xs then b' else b)"
by(simp add: split_beta)
lift_definition tconcat :: "'b \<Rightarrow> ('a llist, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
is "\<lambda>b (xss, b'). (lconcat xss, if lfinite xss then b' else b)"
by(simp add: split_beta)
lift_definition tnth :: "('a, 'b) tllist \<Rightarrow> nat \<Rightarrow> 'a"
is "lnth \<circ> fst" by(auto)
lift_definition tlength :: "('a, 'b) tllist \<Rightarrow> enat"
is "llength \<circ> fst" by auto
lift_definition tdropn :: "nat \<Rightarrow> ('a, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
is "apfst \<circ> ldropn" by auto
abbreviation tfinite :: "('a, 'b) tllist \<Rightarrow> bool"
where "tfinite xs \<equiv> lfinite (llist_of_tllist xs)"
subsection {* @{term "tfinite"} *}
lemma tfinite_induct [consumes 1, case_names TNil TCons]:
assumes "tfinite xs"
and "\<And>y. P (TNil y)"
and "\<And>x xs. \<lbrakk>tfinite xs; P xs\<rbrakk> \<Longrightarrow> P (TCons x xs)"
shows "P xs"
using assms
by transfer (clarsimp, erule lfinite.induct)
lemma is_TNil_tfinite [simp]: "is_TNil xs \<Longrightarrow> tfinite xs"
by transfer clarsimp
subsection {* The terminal element @{term "terminal"} *}
lemma terminal_tinfinite:
assumes "\<not> tfinite xs"
shows "terminal xs = undefined"
unfolding terminal0_terminal[symmetric]
using assms
apply(rule contrapos_np)
by(induct xs rule: terminal0.raw_induct[rotated 1, OF refl, consumes 1])(auto split: tllist.split_asm)
lemma terminal_tllist_of_llist:
"terminal (tllist_of_llist y xs) = (if lfinite xs then y else undefined)"
by(simp add: terminal_tinfinite)
lemma terminal_transfer [transfer_rule]:
"(pcr_tllist A op = ===> op =) (\<lambda>(xs, b). if lfinite xs then b else undefined) terminal"
by(auto simp add: cr_tllist_def pcr_tllist_def terminal_tllist_of_llist intro!: rel_funI dest: llist_all2_lfiniteD)
lemma terminal_tmap [simp]: "tfinite xs \<Longrightarrow> terminal (tmap f g xs) = g (terminal xs)"
by(induct rule: tfinite_induct) simp_all
subsection {* @{term "tmap"} *}
lemma TCons_eq_tmap_conv:
"TCons y ys = tmap f g xs \<longleftrightarrow>
(\<exists>z zs. xs = TCons z zs \<and> f z = y \<and> tmap f g zs = ys)"
by(cases xs) auto
subsection {* Appending two terminated lazy lists @{term "tappend" } *}
lemma tappend_TNil [simp, code, nitpick_simp]:
"tappend (TNil b) f = f b"
by transfer auto
lemma tappend_TCons [simp, code, nitpick_simp]:
"tappend (TCons a tr) f = TCons a (tappend tr f)"
by transfer(auto simp add: apfst_def map_prod_def split: prod.splits)
lemma tappend_TNil2 [simp]:
"tappend xs TNil = xs"
by transfer auto
lemma tappend_assoc: "tappend (tappend xs f) g = tappend xs (\<lambda>b. tappend (f b) g)"
by transfer(auto simp add: split_beta lappend_assoc)
lemma tfinite_tappend: "tfinite (tappend xs f) \<longleftrightarrow> tfinite xs \<and> tfinite (f (terminal xs))"
by transfer auto
lift_definition tcast :: "('a, 'b) tllist \<Rightarrow> ('a, 'c) tllist"
is "\<lambda>(xs, a). (xs, undefined)" by clarsimp
lemma tappend_inf: "\<not> tfinite xs \<Longrightarrow> tappend xs f = tcast xs"
by(transfer)(auto simp add: apfst_def map_prod_def split_beta lappend_inf)
text {* @{term tappend} is the monadic bind on @{typ "('a, 'b) tllist"} *}
lemmas tllist_monad = tappend_TNil tappend_TNil2 tappend_assoc
subsection {* Appending a terminated lazy list to a lazy list @{term "lappendt"} *}
lemma lappendt_LNil [simp, code, nitpick_simp]: "lappendt LNil tr = tr"
by transfer auto
lemma lappendt_LCons [simp, code, nitpick_simp]:
"lappendt (LCons x xs) tr = TCons x (lappendt xs tr)"
by transfer auto
lemma terminal_lappendt_lfinite [simp]:
"lfinite xs \<Longrightarrow> terminal (lappendt xs ys) = terminal ys"
by transfer auto
lemma tllist_of_llist_eq_lappendt_conv:
"tllist_of_llist a xs = lappendt ys zs \<longleftrightarrow>
(\<exists>xs' a'. xs = lappend ys xs' \<and> zs = tllist_of_llist a' xs' \<and> (lfinite ys \<longrightarrow> a = a'))"
by transfer auto
lemma tset_lappendt_lfinite [simp]:
"lfinite xs \<Longrightarrow> tset (lappendt xs ys) = lset xs \<union> tset ys"
by transfer auto
subsection {* Filtering terminated lazy lists @{term tfilter} *}
lemma tfilter_TNil [simp]:
"tfilter b' P (TNil b) = TNil b"
by transfer auto
lemma tfilter_TCons [simp]:
"tfilter b P (TCons a tr) = (if P a then TCons a (tfilter b P tr) else tfilter b P tr)"
by transfer auto
lemma is_TNil_tfilter[simp]:
"is_TNil (tfilter y P xs) \<longleftrightarrow> (\<forall>x \<in> tset xs. \<not> P x)"
by transfer auto
lemma tfilter_empty_conv:
"tfilter y P xs = TNil y' \<longleftrightarrow> (\<forall>x \<in> tset xs. \<not> P x) \<and> (if tfinite xs then terminal xs = y' else y = y')"
by transfer(clarsimp simp add: lfilter_eq_LNil)
lemma tfilter_eq_TConsD:
"tfilter a P ys = TCons x xs \<Longrightarrow>
\<exists>us vs. ys = lappendt us (TCons x vs) \<and> lfinite us \<and> (\<forall>u\<in>lset us. \<not> P u) \<and> P x \<and> xs = tfilter a P vs"
by transfer(fastforce dest: lfilter_eq_LConsD[OF sym])
text {* Use a version of @{term "tfilter"} for code generation that does not evaluate the first argument *}
definition tfilter' :: "(unit \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> ('a, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
where [simp, code del]: "tfilter' b = tfilter (b ())"
lemma tfilter_code [code, code_unfold]:
"tfilter = (\<lambda>b. tfilter' (\<lambda>_. b))"
by simp
lemma tfilter'_code [code]:
"tfilter' b' P (TNil b) = TNil b"
"tfilter' b' P (TCons a tr) = (if P a then TCons a (tfilter' b' P tr) else tfilter' b' P tr)"
by simp_all
end
hide_const (open) tfilter'
subsection {* Concatenating a terminated lazy list of lazy lists @{term tconcat} *}
lemma tconcat_TNil [simp]: "tconcat b (TNil b') = TNil b'"
by transfer auto
lemma tconcat_TCons [simp]: "tconcat b (TCons a tr) = lappendt a (tconcat b tr)"
by transfer auto
text {* Use a version of @{term "tconcat"} for code generation that does not evaluate the first argument *}
definition tconcat' :: "(unit \<Rightarrow> 'b) \<Rightarrow> ('a llist, 'b) tllist \<Rightarrow> ('a, 'b) tllist"
where [simp, code del]: "tconcat' b = tconcat (b ())"
lemma tconcat_code [code, code_unfold]: "tconcat = (\<lambda>b. tconcat' (\<lambda>_. b))"
by simp
lemma tconcat'_code [code]:
"tconcat' b (TNil b') = TNil b'"
"tconcat' b (TCons a tr) = lappendt a (tconcat' b tr)"
by simp_all
hide_const (open) tconcat'
subsection {* @{term tllist_all2} *}
lemmas tllist_all2_TNil = tllist.rel_inject(1)
lemmas tllist_all2_TCons = tllist.rel_inject(2)
lemma tllist_all2_TNil1: "tllist_all2 P Q (TNil b) ts \<longleftrightarrow> (\<exists>b'. ts = TNil b' \<and> Q b b')"
by transfer auto
lemma tllist_all2_TNil2: "tllist_all2 P Q ts (TNil b') \<longleftrightarrow> (\<exists>b. ts = TNil b \<and> Q b b')"
by transfer auto
lemma tllist_all2_TCons1:
"tllist_all2 P Q (TCons x ts) ts' \<longleftrightarrow> (\<exists>x' ts''. ts' = TCons x' ts'' \<and> P x x' \<and> tllist_all2 P Q ts ts'')"
by transfer(fastforce simp add: llist_all2_LCons1 dest: llist_all2_lfiniteD)
lemma tllist_all2_TCons2:
"tllist_all2 P Q ts' (TCons x ts) \<longleftrightarrow> (\<exists>x' ts''. ts' = TCons x' ts'' \<and> P x' x \<and> tllist_all2 P Q ts'' ts)"
by transfer(fastforce simp add: llist_all2_LCons2 dest: llist_all2_lfiniteD)
lemma tllist_all2_coinduct [consumes 1, case_names tllist_all2, case_conclusion tllist_all2 is_TNil TNil TCons, coinduct pred: tllist_all2]:
assumes "X xs ys"
and "\<And>xs ys. X xs ys \<Longrightarrow>
(is_TNil xs \<longleftrightarrow> is_TNil ys) \<and>
(is_TNil xs \<longrightarrow> is_TNil ys \<longrightarrow> R (terminal xs) (terminal ys)) \<and>
(\<not> is_TNil xs \<longrightarrow> \<not> is_TNil ys \<longrightarrow> P (thd xs) (thd ys) \<and> (X (ttl xs) (ttl ys) \<or> tllist_all2 P R (ttl xs) (ttl ys)))"
shows "tllist_all2 P R xs ys"
using assms
apply(transfer fixing: P R)
apply clarsimp
apply(rule conjI)
apply(erule llist_all2_coinduct, blast, blast)
proof(rule impI)
case (goal1 X xs b ys c)
from `lfinite xs` `X (xs, b) (ys, c)`
show "R b c"
by(induct arbitrary: ys rule: lfinite_induct)(auto dest: goal1(2))
qed
lemma tllist_all2_cases[consumes 1, case_names TNil TCons, cases pred]:
assumes "tllist_all2 P Q xs ys"
obtains (TNil) b b' where "xs = TNil b" "ys = TNil b'" "Q b b'"
| (TCons) x xs' y ys'
where "xs = TCons x xs'" and "ys = TCons y ys'"
and "P x y" and "tllist_all2 P Q xs' ys'"
using assms
by(cases xs)(fastforce simp add: tllist_all2_TCons1 tllist_all2_TNil1)+
lemma tllist_all2_tmap1:
"tllist_all2 P Q (tmap f g xs) ys \<longleftrightarrow> tllist_all2 (\<lambda>x. P (f x)) (\<lambda>x. Q (g x)) xs ys"
by(transfer)(auto simp add: llist_all2_lmap1)
lemma tllist_all2_tmap2:
"tllist_all2 P Q xs (tmap f g ys) \<longleftrightarrow> tllist_all2 (\<lambda>x y. P x (f y)) (\<lambda>x y. Q x (g y)) xs ys"
by(transfer)(auto simp add: llist_all2_lmap2)
lemma tllist_all2_mono:
"\<lbrakk> tllist_all2 P Q xs ys; \<And>x y. P x y \<Longrightarrow> P' x y; \<And>x y. Q x y \<Longrightarrow> Q' x y \<rbrakk>
\<Longrightarrow> tllist_all2 P' Q' xs ys"
by transfer(auto elim!: llist_all2_mono)
lemma tllist_all2_tlengthD: "tllist_all2 P Q xs ys \<Longrightarrow> tlength xs = tlength ys"
by(transfer)(auto dest: llist_all2_llengthD)
lemma tllist_all2_tfiniteD: "tllist_all2 P Q xs ys \<Longrightarrow> tfinite xs = tfinite ys"
by(transfer)(auto dest: llist_all2_lfiniteD)
lemma tllist_all2_tfinite1_terminalD:
"\<lbrakk> tllist_all2 P Q xs ys; tfinite xs \<rbrakk> \<Longrightarrow> Q (terminal xs) (terminal ys)"
by(frule tllist_all2_tfiniteD)(transfer, auto)
lemma tllist_all2_tfinite2_terminalD:
"\<lbrakk> tllist_all2 P Q xs ys; tfinite ys \<rbrakk> \<Longrightarrow> Q (terminal xs) (terminal ys)"
by(metis tllist_all2_tfinite1_terminalD tllist_all2_tfiniteD)
lemma tllist_all2D_llist_all2_llist_of_tllist:
"tllist_all2 P Q xs ys \<Longrightarrow> llist_all2 P (llist_of_tllist xs) (llist_of_tllist ys)"
by(transfer) auto
lemma tllist_all2_is_TNilD:
"tllist_all2 P Q xs ys \<Longrightarrow> is_TNil xs \<longleftrightarrow> is_TNil ys"
by(cases xs)(auto simp add: tllist_all2_TNil1 tllist_all2_TCons1)
lemma tllist_all2_thdD:
"\<lbrakk> tllist_all2 P Q xs ys; \<not> is_TNil xs \<or> \<not> is_TNil ys \<rbrakk> \<Longrightarrow> P (thd xs) (thd ys)"
by(cases xs)(auto simp add: tllist_all2_TNil1 tllist_all2_TCons1)
lemma tllist_all2_ttlI:
"\<lbrakk> tllist_all2 P Q xs ys; \<not> is_TNil xs \<or> \<not> is_TNil ys \<rbrakk> \<Longrightarrow> tllist_all2 P Q (ttl xs) (ttl ys)"
by(cases xs)(auto simp add: tllist_all2_TNil1 tllist_all2_TCons1)
lemma tllist_all2_refl:
"tllist_all2 P Q xs xs \<longleftrightarrow> (\<forall>x \<in> tset xs. P x x) \<and> (tfinite xs \<longrightarrow> Q (terminal xs) (terminal xs))"
by transfer(auto)
lemma tllist_all2_reflI:
"\<lbrakk> \<And>x. x \<in> tset xs \<Longrightarrow> P x x; tfinite xs \<Longrightarrow> Q (terminal xs) (terminal xs) \<rbrakk>
\<Longrightarrow> tllist_all2 P Q xs xs"
by(simp add: tllist_all2_refl)
lemma tllist_all2_conv_all_tnth:
"tllist_all2 P Q xs ys \<longleftrightarrow>
tlength xs = tlength ys \<and>
(\<forall>n. enat n < tlength xs \<longrightarrow> P (tnth xs n) (tnth ys n)) \<and>
(tfinite xs \<longrightarrow> Q (terminal xs) (terminal ys))"
by transfer(auto 4 4 simp add: llist_all2_conv_all_lnth split: split_if_asm dest: lfinite_llength_enat not_lfinite_llength)
lemma tllist_all2_tnthD:
"\<lbrakk> tllist_all2 P Q xs ys; enat n < tlength xs \<rbrakk>
\<Longrightarrow> P (tnth xs n) (tnth ys n)"
by(simp add: tllist_all2_conv_all_tnth)
lemma tllist_all2_tnthD2:
"\<lbrakk> tllist_all2 P Q xs ys; enat n < tlength ys \<rbrakk>
\<Longrightarrow> P (tnth xs n) (tnth ys n)"
by(simp add: tllist_all2_conv_all_tnth)
lemmas tllist_all2_eq = tllist.rel_eq
lemma tmap_eq_tmap_conv_tllist_all2:
"tmap f g xs = tmap f' g' ys \<longleftrightarrow>
tllist_all2 (\<lambda>x y. f x = f' y) (\<lambda>x y. g x = g' y) xs ys"
apply transfer
apply(clarsimp simp add: lmap_eq_lmap_conv_llist_all2)
apply(auto dest: llist_all2_lfiniteD)
done
lemma tllist_all2_trans:
"\<lbrakk> tllist_all2 P Q xs ys; tllist_all2 P Q ys zs; transp P; transp Q \<rbrakk>
\<Longrightarrow> tllist_all2 P Q xs zs"
by transfer(auto elim: llist_all2_trans dest: llist_all2_lfiniteD transpD)
lemma tllist_all2_tappendI:
"\<lbrakk> tllist_all2 P Q xs ys;
\<lbrakk> tfinite xs; tfinite ys; Q (terminal xs) (terminal ys) \<rbrakk>
\<Longrightarrow> tllist_all2 P R (xs' (terminal xs)) (ys' (terminal ys)) \<rbrakk>
\<Longrightarrow> tllist_all2 P R (tappend xs xs') (tappend ys ys')"
apply transfer
apply(auto 4 3 simp add: apfst_def map_prod_def lappend_inf split: prod.split_asm dest: llist_all2_lfiniteD intro: llist_all2_lappendI)
apply(frule llist_all2_lfiniteD, simp add: lappend_inf)
done
lemma llist_all2_tllist_of_llistI:
"tllist_all2 A B xs ys \<Longrightarrow> llist_all2 A (llist_of_tllist xs) (llist_of_tllist ys)"
by(coinduction arbitrary: xs ys)(auto dest: tllist_all2_is_TNilD tllist_all2_thdD intro: tllist_all2_ttlI)
lemma tllist_all2_tllist_of_llist [simp]:
"tllist_all2 A B (tllist_of_llist b xs) (tllist_of_llist c ys) \<longleftrightarrow>
llist_all2 A xs ys \<and> (lfinite xs \<longrightarrow> B b c)"
by transfer auto
subsection {* From a terminated lazy list to a lazy list @{term llist_of_tllist} *}
lemma llist_of_tllist_tmap [simp]:
"llist_of_tllist (tmap f g xs) = lmap f (llist_of_tllist xs)"
by transfer auto
lemma llist_of_tllist_tappend:
"llist_of_tllist (tappend xs f) = lappend (llist_of_tllist xs) (llist_of_tllist (f (terminal xs)))"
by(transfer)(auto simp add: lappend_inf)
lemma llist_of_tllist_lappendt [simp]:
"llist_of_tllist (lappendt xs tr) = lappend xs (llist_of_tllist tr)"
by transfer auto
lemma llist_of_tllist_tfilter [simp]:
"llist_of_tllist (tfilter b P tr) = lfilter P (llist_of_tllist tr)"
by transfer auto
lemma llist_of_tllist_tconcat:
"llist_of_tllist (tconcat b trs) = lconcat (llist_of_tllist trs)"
by transfer auto
lemma llist_of_tllist_eq_lappend_conv:
"llist_of_tllist xs = lappend us vs \<longleftrightarrow>
(\<exists>ys. xs = lappendt us ys \<and> vs = llist_of_tllist ys \<and> terminal xs = terminal ys)"
by transfer auto
subsection {* The nth element of a terminated lazy list @{term "tnth"} *}
lemma tnth_TNil [nitpick_simp]:
"tnth (TNil b) n = undefined n"
by(transfer)(simp add: lnth_LNil)
lemma tnth_TCons:
"tnth (TCons x xs) n = (case n of 0 \<Rightarrow> x | Suc n' \<Rightarrow> tnth xs n')"
by(transfer)(auto simp add: lnth_LCons split: nat.split)
lemma lnth_llist_of_tllist [simp]:
"lnth (llist_of_tllist xs) = tnth xs"
by(transfer)(auto)
lemma tnth_tmap [simp]: "enat n < tlength xs \<Longrightarrow> tnth (tmap f g xs) n = f (tnth xs n)"
by transfer simp
subsection {* The length of a terminated lazy list @{term "tlength"} *}
lemma [simp, nitpick_simp]:
shows tlength_TNil: "tlength (TNil b) = 0"
and tlength_TCons: "tlength (TCons x xs) = eSuc (tlength xs)"
apply(transfer, simp)
apply(transfer, auto)
done
lemma llength_llist_of_tllist [simp]: "llength (llist_of_tllist xs) = tlength xs"
by transfer auto
lemma tlength_tmap [simp]: "tlength (tmap f g xs) = tlength xs"
by transfer simp
definition gen_tlength :: "nat \<Rightarrow> ('a, 'b) tllist \<Rightarrow> enat"
where "gen_tlength n xs = enat n + tlength xs"
lemma gen_tlength_code [code]:
"gen_tlength n (TNil b) = enat n"
"gen_tlength n (TCons x xs) = gen_tlength (n + 1) xs"
by(simp_all add: gen_tlength_def iadd_Suc eSuc_enat[symmetric] iadd_Suc_right)
lemma tlength_code [code]: "tlength = gen_tlength 0"
by(simp add: gen_tlength_def fun_eq_iff zero_enat_def)
subsection {* @{term "tdropn"} *}
lemma tdropn_0 [simp, code, nitpick_simp]: "tdropn 0 xs = xs"
by transfer auto
lemma tdropn_TNil [simp, code]: "tdropn n (TNil b) = (TNil b)"
by transfer(auto)
lemma tdropn_Suc_TCons [simp, code]: "tdropn (Suc n) (TCons x xs) = tdropn n xs"
by transfer(auto)
lemma tdropn_Suc [nitpick_simp]: "tdropn (Suc n) xs = (case xs of TNil b \<Rightarrow> TNil b | TCons x xs' \<Rightarrow> tdropn n xs')"
by(cases xs) simp_all
lemma lappendt_ltake_tdropn:
"lappendt (ltake (enat n) (llist_of_tllist xs)) (tdropn n xs) = xs"
by transfer (auto)
lemma llist_of_tllist_tdropn [simp]:
"llist_of_tllist (tdropn n xs) = ldropn n (llist_of_tllist xs)"
by transfer auto
lemma tdropn_Suc_conv_tdropn:
"enat n < tlength xs \<Longrightarrow> TCons (tnth xs n) (tdropn (Suc n) xs) = tdropn n xs"
by transfer(auto simp add: ldropn_Suc_conv_ldropn)
lemma tlength_tdropn [simp]: "tlength (tdropn n xs) = tlength xs - enat n"
by transfer auto
lemma tnth_tdropn [simp]: "enat (n + m) < tlength xs \<Longrightarrow> tnth (tdropn n xs) m = tnth xs (m + n)"
by transfer auto
subsection {* @{term "tset"} *}
lemma tset_induct [consumes 1, case_names find step]:
assumes "x \<in> tset xs"
and "\<And>xs. P (TCons x xs)"
and "\<And>x' xs. \<lbrakk> x \<in> tset xs; x \<noteq> x'; P xs \<rbrakk> \<Longrightarrow> P (TCons x' xs)"
shows "P xs"
using assms
by transfer(clarsimp, erule lset_induct)
lemma tset_conv_tnth: "tset xs = {tnth xs n|n . enat n < tlength xs}"
by transfer(simp add: lset_conv_lnth)
lemma in_tset_conv_tnth: "x \<in> tset xs \<longleftrightarrow> (\<exists>n. enat n < tlength xs \<and> tnth xs n = x)"
using tset_conv_tnth[of xs] by auto
subsection {* Setup for Lifting/Transfer *}
subsubsection {* Relator and predicator properties *}
abbreviation "tllist_all == pred_tllist"
subsubsection {* Transfer rules for the Transfer package *}
context
begin
interpretation lifting_syntax .
lemma set1_pre_tllist_transfer [transfer_rule]:
"(rel_pre_tllist A B C ===> rel_set A) set1_pre_tllist set1_pre_tllist"
by(auto simp add: rel_pre_tllist_def vimage2p_def rel_fun_def set1_pre_tllist_def rel_set_def collect_def sum_set_defs rel_sum_def fsts_def split: sum.split_asm)
lemma set2_pre_tllist_transfer [transfer_rule]:
"(rel_pre_tllist A B C ===> rel_set B) set2_pre_tllist set2_pre_tllist"
by(auto simp add: rel_pre_tllist_def vimage2p_def rel_fun_def set2_pre_tllist_def rel_set_def collect_def sum_set_defs snds_def rel_sum_def split: sum.split_asm)
lemma set3_pre_tllist_transfer [transfer_rule]:
"(rel_pre_tllist A B C ===> rel_set C) set3_pre_tllist set3_pre_tllist"
by(auto simp add: rel_pre_tllist_def vimage2p_def rel_fun_def set3_pre_tllist_def rel_set_def collect_def sum_set_defs snds_def rel_sum_def split: sum.split_asm)
lemma TNil_transfer2 [transfer_rule]: "(B ===> tllist_all2 A B) TNil TNil"
by auto
declare TNil_transfer [transfer_rule]
lemma TCons_transfer2 [transfer_rule]:
"(A ===> tllist_all2 A B ===> tllist_all2 A B) TCons TCons"
unfolding rel_fun_def by simp
declare TCons_transfer [transfer_rule]
lemma case_tllist_transfer [transfer_rule]:
"((B ===> C) ===> (A ===> tllist_all2 A B ===> C) ===> tllist_all2 A B ===> C)
case_tllist case_tllist"
unfolding rel_fun_def
by (simp add: tllist_all2_TNil1 tllist_all2_TNil2 split: tllist.split)
lemma unfold_tllist_transfer [transfer_rule]:
"((A ===> op =) ===> (A ===> B) ===> (A ===> C) ===> (A ===> A) ===> A ===> tllist_all2 C B) unfold_tllist unfold_tllist"
proof(rule rel_funI)+
fix IS_TNIL1 :: "'a \<Rightarrow> bool" and IS_TNIL2
TERMINAL1 TERMINAL2 THD1 THD2 TTL1 TTL2 x y
assume rel: "(A ===> op =) IS_TNIL1 IS_TNIL2" "(A ===> B) TERMINAL1 TERMINAL2"
"(A ===> C) THD1 THD2" "(A ===> A) TTL1 TTL2"
and "A x y"
show "tllist_all2 C B (unfold_tllist IS_TNIL1 TERMINAL1 THD1 TTL1 x) (unfold_tllist IS_TNIL2 TERMINAL2 THD2 TTL2 y)"
using `A x y`
apply(coinduction arbitrary: x y)
using rel by(auto 4 4 elim: rel_funE)
qed
lemma corec_tllist_transfer [transfer_rule]:
"((A ===> op =) ===> (A ===> B) ===> (A ===> C) ===> (A ===> op =) ===> (A ===> tllist_all2 C B) ===> (A ===> A) ===> A ===> tllist_all2 C B) corec_tllist corec_tllist"
proof(rule rel_funI)+
fix IS_TNIL1 MORE1 :: "'a \<Rightarrow> bool" and IS_TNIL2
TERMINAL1 TERMINAL2 THD1 THD2 MORE2 STOP1 STOP2 TTL1 TTL2 x y
assume rel: "(A ===> op =) IS_TNIL1 IS_TNIL2" "(A ===> B) TERMINAL1 TERMINAL2"
"(A ===> C) THD1 THD2" "(A ===> op =) MORE1 MORE2"
"(A ===> tllist_all2 C B) STOP1 STOP2" "(A ===> A) TTL1 TTL2"
and "A x y"
show "tllist_all2 C B (corec_tllist IS_TNIL1 TERMINAL1 THD1 MORE1 STOP1 TTL1 x) (corec_tllist IS_TNIL2 TERMINAL2 THD2 MORE2 STOP2 TTL2 y)"
using `A x y`
apply(coinduction arbitrary: x y)
using rel by(auto 4 4 elim: rel_funE)
qed
lemma ttl_transfer2 [transfer_rule]:
"(tllist_all2 A B ===> tllist_all2 A B) ttl ttl"
unfolding ttl_def[abs_def] by transfer_prover
declare ttl_transfer [transfer_rule]
lemma tset_transfer2 [transfer_rule]:
"(tllist_all2 A B ===> rel_set A) tset tset"
by (intro rel_funI rel_setI) (auto simp only: in_tset_conv_tnth tllist_all2_conv_all_tnth Bex_def)
lemma tmap_transfer2 [transfer_rule]:
"((A ===> B) ===> (C ===> D) ===> tllist_all2 A C ===> tllist_all2 B D) tmap tmap"
by(auto simp add: rel_fun_def tllist_all2_tmap1 tllist_all2_tmap2 elim: tllist_all2_mono)
declare tmap_transfer [transfer_rule]
lemma is_TNil_transfer2 [transfer_rule]:
"(tllist_all2 A B ===> op =) is_TNil is_TNil"
by(auto dest: tllist_all2_is_TNilD)
declare is_TNil_transfer [transfer_rule]
lemma tappend_transfer [transfer_rule]:
"(tllist_all2 A B ===> (B ===> tllist_all2 A C) ===> tllist_all2 A C) tappend tappend"
by(auto intro: tllist_all2_tappendI elim: rel_funE)
declare tappend.transfer [transfer_rule]
lemma lappendt_transfer [transfer_rule]:
"(llist_all2 A ===> tllist_all2 A B ===> tllist_all2 A B) lappendt lappendt"
unfolding rel_fun_def
by transfer(auto intro: llist_all2_lappendI)
declare lappendt.transfer [transfer_rule]
lemma llist_of_tllist_transfer2 [transfer_rule]:
"(tllist_all2 A B ===> llist_all2 A) llist_of_tllist llist_of_tllist"
by(auto intro: llist_all2_tllist_of_llistI)
declare llist_of_tllist_transfer [transfer_rule]
lemma tllist_of_llist_transfer2 [transfer_rule]:
"(B ===> llist_all2 A ===> tllist_all2 A B) tllist_of_llist tllist_of_llist"
by(auto intro!: rel_funI)
declare tllist_of_llist_transfer [transfer_rule]
lemma tlength_transfer [transfer_rule]:
"(tllist_all2 A B ===> op =) tlength tlength"
by(auto dest: tllist_all2_tlengthD)
declare tlength.transfer [transfer_rule]
lemma tdropn_transfer [transfer_rule]:
"(op = ===> tllist_all2 A B ===> tllist_all2 A B) tdropn tdropn"
unfolding rel_fun_def
by transfer(auto intro: llist_all2_ldropnI)
declare tdropn.transfer [transfer_rule]
lemma tfilter_transfer [transfer_rule]:
"(B ===> (A ===> op =) ===> tllist_all2 A B ===> tllist_all2 A B) tfilter tfilter"
unfolding rel_fun_def
by transfer(auto intro: llist_all2_lfilterI dest: llist_all2_lfiniteD)
declare tfilter.transfer [transfer_rule]
lemma tconcat_transfer [transfer_rule]:
"(B ===> tllist_all2 (llist_all2 A) B ===> tllist_all2 A B) tconcat tconcat"
unfolding rel_fun_def
by transfer(auto intro: llist_all2_lconcatI dest: llist_all2_lfiniteD)
declare tconcat.transfer [transfer_rule]
lemma tllist_all2_rsp:
assumes R1: "\<forall>x y. R1 x y \<longrightarrow> (\<forall>a b. R1 a b \<longrightarrow> S x a = T y b)"
and R2: "\<forall>x y. R2 x y \<longrightarrow> (\<forall>a b. R2 a b \<longrightarrow> S' x a = T' y b)"
and xsys: "tllist_all2 R1 R2 xs ys"
and xs'ys': "tllist_all2 R1 R2 xs' ys'"
shows "tllist_all2 S S' xs xs' = tllist_all2 T T' ys ys'"
proof
assume "tllist_all2 S S' xs xs'"
with xsys xs'ys' show "tllist_all2 T T' ys ys'"
proof(coinduction arbitrary: ys ys' xs xs')
case (tllist_all2 ys ys' xs xs')
thus ?case
by cases (auto 4 4 simp add: tllist_all2_TCons1 tllist_all2_TCons2 tllist_all2_TNil1 tllist_all2_TNil2 dest: R1[rule_format] R2[rule_format])
qed
next
assume "tllist_all2 T T' ys ys'"
with xsys xs'ys' show "tllist_all2 S S' xs xs'"
proof(coinduction arbitrary: xs xs' ys ys')
case (tllist_all2 xs xs' ys ys')
thus ?case
by cases(auto 4 4 simp add: tllist_all2_TCons1 tllist_all2_TCons2 tllist_all2_TNil1 tllist_all2_TNil2 dest: R1[rule_format] R2[rule_format])
qed
qed
lemma tllist_all2_transfer2 [transfer_rule]:
"((R1 ===> R1 ===> op =) ===> (R2 ===> R2 ===> op =) ===>
tllist_all2 R1 R2 ===> tllist_all2 R1 R2 ===> op =) tllist_all2 tllist_all2"
by (simp add: tllist_all2_rsp rel_fun_def)
declare tllist_all2_transfer [transfer_rule]
end
text {*
Delete lifting rules for @{typ "('a, 'b) tllist"}
because the parametricity rules take precedence over
most of the transfer rules. They can be restored by
including the bundle @{text "tllist.lifting"}.
*}
lifting_update tllist.lifting
lifting_forget tllist.lifting
end
|
The integral of the identity function over a line segment is the square of the length of the segment divided by two. |
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
π : Y ⟶ Z
q : IsSplitCoequalizer f g π
F : C ⥤ D
⊢ F.map f ≫ F.map π = F.map g ≫ F.map π
[PROOFSTEP]
rw [← F.map_comp, q.condition, F.map_comp]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
π : Y ⟶ Z
q : IsSplitCoequalizer f g π
F : C ⥤ D
⊢ F.map q.rightSection ≫ F.map π = 𝟙 (F.obj Z)
[PROOFSTEP]
rw [← F.map_comp, q.rightSection_π, F.map_id]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
π : Y ⟶ Z
q : IsSplitCoequalizer f g π
F : C ⥤ D
⊢ F.map q.leftSection ≫ F.map g = 𝟙 (F.obj Y)
[PROOFSTEP]
rw [← F.map_comp, q.leftSection_bottom, F.map_id]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
π : Y ⟶ Z
q : IsSplitCoequalizer f g π
F : C ⥤ D
⊢ F.map q.leftSection ≫ F.map f = F.map π ≫ F.map q.rightSection
[PROOFSTEP]
rw [← F.map_comp, q.leftSection_top, F.map_comp]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
h : Y ⟶ Z
t : IsSplitCoequalizer f g h
s : Cofork f g
⊢ Cofork.π (asCofork t) ≫ t.rightSection ≫ Cofork.π s = Cofork.π s
[PROOFSTEP]
dsimp
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
h : Y ⟶ Z
t : IsSplitCoequalizer f g h
s : Cofork f g
⊢ h ≫ t.rightSection ≫ Cofork.π s = Cofork.π s
[PROOFSTEP]
rw [← t.leftSection_top_assoc, s.condition, t.leftSection_bottom_assoc]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
D : Type u₂
inst✝ : Category.{v₂, u₂} D
G : C ⥤ D
X Y : C
f g : X ⟶ Y
Z : C
h : Y ⟶ Z
t : IsSplitCoequalizer f g h
s : Cofork f g
m✝ :
((Functor.const WalkingParallelPair).obj (asCofork t).pt).obj WalkingParallelPair.one ⟶
((Functor.const WalkingParallelPair).obj s.pt).obj WalkingParallelPair.one
hm : Cofork.π (asCofork t) ≫ m✝ = Cofork.π s
⊢ m✝ = t.rightSection ≫ Cofork.π s
[PROOFSTEP]
simp [← hm]
|
State Before: α : Type u_1
inst✝ : NonUnitalNonAssocRing α
k : α
h : ∀ (x : α), x * k = 0 → x = 0
⊢ IsRightRegular k State After: α : Type u_1
inst✝ : NonUnitalNonAssocRing α
k : α
h : ∀ (x : α), x * k = 0 → x = 0
x y : α
h' : x * k = y * k
⊢ (x - y) * k = 0 Tactic: refine' fun x y (h' : x * k = y * k) => sub_eq_zero.mp (h _ _) State Before: α : Type u_1
inst✝ : NonUnitalNonAssocRing α
k : α
h : ∀ (x : α), x * k = 0 → x = 0
x y : α
h' : x * k = y * k
⊢ (x - y) * k = 0 State After: no goals Tactic: rw [sub_mul, sub_eq_zero, h'] |
[STATEMENT]
lemma bin_lcp_neq: "a \<noteq> b \<Longrightarrow> \<alpha> = f ([a] \<cdot> [b]) \<and>\<^sub>p f ([b] \<cdot> [a])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> \<alpha> = f ([a] \<cdot> [b]) \<and>\<^sub>p f ([b] \<cdot> [a])
[PROOF STEP]
using neq_bin_swap[of a b]
[PROOF STATE]
proof (prove)
using this:
a \<noteq> b \<Longrightarrow> b = 1 - a
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> \<alpha> = f ([a] \<cdot> [b]) \<and>\<^sub>p f ([b] \<cdot> [a])
[PROOF STEP]
unfolding bin_lcp_def'[of a]
[PROOF STATE]
proof (prove)
using this:
a \<noteq> b \<Longrightarrow> b = 1 - a
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> f ([a] \<cdot> [1 - a]) \<and>\<^sub>p f ([1 - a] \<cdot> [a]) = f ([a] \<cdot> [b]) \<and>\<^sub>p f ([b] \<cdot> [a])
[PROOF STEP]
by blast |
If $f$ and $g$ are analytic on a set $S$, then $f - g$ is analytic on $S$. |
module Conversion where
open import Agda.Builtin.Nat
nonDependent : Nat -> Nat -> Nat
nonDependent a b = a
dependent : {A : Set} -> A -> A
dependent a = a
stuff : {A : Set} -> {B : Nat} -> Nat -> Nat
stuff zero = zero
stuff (suc c) = dependent c
|
#ifndef __ENVIRE_CORE_GRAPHVIZ__
#define __ENVIRE_CORE_GRAPHVIZ__
#include <fstream> // std::ofstream
#include <envire_core/TransformTree.hpp>
#include <boost/graph/graphviz.hpp>
namespace envire { namespace core
{
/**@class Transform Writer
* Frame Graph Viz property writer for boost graphs
* */
template <class _Frame>
class FrameWriter
{
public:
FrameWriter(_Frame _f):f(_f){}
template <class _Vertex>
void operator()(std::ostream &out, const _Vertex& n) const
{
if(f[n].name.find("camera") != std::string::npos)
{
out << "[shape=record, label=\"<f0> " << f[n].name <<
"|<f1>" << f[n].items.size()<<"\""
<<",style=filled,fillcolor=orange]";
}
else
{
out << "[shape=record, label=\"<f0> " << f[n].name <<
"|<f1>" << f[n].items.size()<<"\""
<<",style=filled,fillcolor=lightblue]";
}
}
private:
_Frame f;
};
/**@class Transform Writer
* Transform Graph Viz Property writer for boost graphs
* */
template <class _Transform>
class TransformWriter
{
public:
TransformWriter(_Transform _tf):tf(_tf){}
template <class _Edge>
void operator()(std::ostream &out, const _Edge& e) const
{
out << "[label=\"" << tf[e].time.toString(::base::Time::Seconds) <<
boost::format("\\nt: (%.1f %.1f %.1f)\\nr: (%.1f %.1f %.1f %.1f)") % tf[e].transform.translation.x() % tf[e].transform.translation.y() % tf[e].transform.translation.z()
% tf[e].transform.orientation.w() % tf[e].transform.orientation.x() % tf[e].transform.orientation.y() % tf[e].transform.orientation.z()
<< "\""
<< ",shape=ellipse,color=red,style=filled,fillcolor=lightcoral]";
}
private:
_Transform tf;
};
/**@class Environment Writer
* Transform Graph Viz Property writer for boost graphs
* */
class GraphPropWriter
{
public:
GraphPropWriter(){}
void operator()(std::ostream &out) const
{
//out<< "graph[rankdir=LR,splines=ortho];\n";
out<< "graph[size=\"88,136\", ranksep=3.0, nodesep=2.0, fontname=\"Helvetica\", fontsize=8];\n";
}
};
/**@class GraphViz
* Class to print TransformGraphs in Graph Viz
* */
class GraphViz
{
protected:
inline GraphPropWriter
make_graph_writer()
{
return GraphPropWriter();
}
/**@brief Writer for Frame Node
*/
template <class _Frame>
inline FrameWriter<_Frame>
make_node_writer(_Frame frame)
{
return FrameWriter<_Frame>(frame);
}
/**@brief Writer for Frame Node
*/
template <class _Transform>
inline TransformWriter<_Transform>
make_edge_writer(_Transform tf)
{
return TransformWriter<_Transform>(tf);
}
public:
/**@brief Export to GraphViz
*
*/
void write(const TransformGraph &graph, const std::string& filename = "")
{
std::streambuf * buf;
std::ofstream of;
if(!filename.empty())
{
of.open(filename.c_str());
buf = of.rdbuf();
}
else
{
buf = std::cout.rdbuf();
}
/** Print graph **/
std::ostream out(buf);
boost::write_graphviz (out, graph,
make_node_writer(boost::get(&FrameProperty::frame, graph)),
make_edge_writer(boost::get(&TransformProperty::transform, graph)),
make_graph_writer());
}
};
}}
#endif
|
##### Chapter 3: Classification using Nearest Neighbors --------------------
pacman::p_load("class", "gmodels", "tidyverse")
## Example: Classifying Cancer Samples ----
## Step 2: Exploring and preparing the data ----
# import the CSV file
wbcd <- read.csv(".//Chapter03//wisc_bc_data.csv", stringsAsFactors = FALSE)
# examine the structure of the wbcd data frame
wbcd
# drop the id feature
wbcd <- wbcd[-1]
# table of diagnosis
table(wbcd$diagnosis)
# recode diagnosis as a factor
wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"),
labels = c("Benign", "Malignant"))
# table or proportions with more informative labels
round(prop.table(table(wbcd$diagnosis)) * 100, digits = 1)
# summarize three numeric features
summary(wbcd[c("radius_mean", "area_mean", "smoothness_mean")])
# create normalization function
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
# test normalization function - result should be identical
normalize(c(1, 2, 3, 4, 5))
normalize(c(10, 20, 30, 40, 50))
# normalize the wbcd data
wbcd_n <- as.data.frame(lapply(wbcd[2:31], normalize))
# confirm that normalization worked
summary(wbcd_n$area_mean)
# create training and test data
wbcd_train <- wbcd_n[1:469, ]
wbcd_test <- wbcd_n[470:569, ]
# create labels for training and test data
wbcd_train_labels <- wbcd[1:469, 1]
wbcd_test_labels <- wbcd[470:569, 1]
## Step 3: Training a model on the data ----
# load the "class" library
library(class)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test,
cl = wbcd_train_labels, k = 21)
## Step 4: Evaluating model performance ----
# load the "gmodels" library
library(gmodels)
# Create the cross tabulation of predicted vs. actual
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred,
prop.chisq = FALSE)
## Step 5: Improving model performance ----
# use the scale() function to z-score standardize a data frame
wbcd_z <- as.data.frame(scale(wbcd[-1]))
# confirm that the transformation was applied correctly
summary(wbcd_z$area_mean)
# create training and test datasets
wbcd_train <- wbcd_z[1:469, ]
wbcd_test <- wbcd_z[470:569, ]
# re-classify test cases
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test,
cl = wbcd_train_labels, k = 21)
# Create the cross tabulation of predicted vs. actual
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred,
prop.chisq = FALSE)
# try several different values of k
wbcd_train <- wbcd_n[1:469, ]
wbcd_test <- wbcd_n[470:569, ]
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=1)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=5)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=11)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=15)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=21)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k=27)
CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq=FALSE)
# 타이디문법으로 재구성
install.packages("modeldata")
pacman::p_load("dplyr", "class", "gmodels", "tidyverse", "ggplot2", "rsample", "recipes", "caret", "modeldata") # 없는 패키지는 자동으로 다운로드받아줌
library(modeldata)
options(scipen = 999)
ggplot2::theme_set(ggplot2::theme_light())
".//Chapter03//wisc_bc_data.csv" %>%
read.csv(stringsAsFactors = FALSE) %>%
as.tibble %>%
mutate_if(is.ordered, factor, ordered = FALSE) -> wbcd
wbcd %>% # 양성과 음성 수 보기
select(-id) %>%
glimpse() %>%
mutate(diagnosis = str_replace_all(diagnosis, "M", "Malignant")) %>%
mutate(diagnosis = str_replace_all(diagnosis, "B", "Benign")) %>%
count(diagnosis)
wbcd %>% # 양성과 음성 비율 보기
select(-id) %>%
mutate(diagnosis = str_replace_all(diagnosis, "M", "Malignant")) %>%
mutate(diagnosis = str_replace_all(diagnosis, "B", "Benign")) %>%
count(diagnosis) %>%
mutate(prop=format(prop.table(n)*100, digits=3))
library(tidymodels) # 머신러닝용 타이디패키지
wbcd %>% # 훈련데이터, 테스트데이터 생성 (7:3 비율로 나누었다!)
select(-id) %>%
mutate(diagnosis = str_replace_all(diagnosis, "M", "Malignant")) %>%
mutate(diagnosis = str_replace_all(diagnosis, "B", "Benign")) %>%
ungroup() %>%
rsample::initial_split(prop = 0.7, strata = diagnosis) -> wbcd_split
wbcd_split %>% # 데이터 어떻게 생겼나?
training() %>% # 테스트 데이터 보려면 testing()
glimpse()
wbcd_split %>% training() -> train_df
wbcd_split %>% testing() -> test_df
# 이제부터 직관적인 분석이 시작됩니다.
df_rec <- recipe(diagnosis ~., data = train_df) %>%
step_downsample(diagnosis) %>% # 훈련데이터 정제작업
step_center(-diagnosis) %>% # 숫자 데이터를 평균0으로 정규화
step_scale(-diagnosis) %>% # 숫자 데이터를 표준편차 1로 정규화
prep()
df_juiced <- juice(df_rec)
df_juiced %>% pivot_longer(-diagnosis) %>%
ggplot() +
geom_histogram(aes(value, fill = diagnosis)) +
facet_wrap(~name)
baked_test <- bake(df_rec, new_data = test_df)
install.packages("kknn")
#make a knn spec
knn_spec <- nearest_neighbor() %>%
set_engine("kknn") %>%
set_mode("classification")
#make a rf spec
rf_spec <- rand_forest() %>%
set_engine("ranger") %>%
set_mode("classification")
#use the knn spec to fit the pre-processed training data
knn_fit <- knn_spec %>%
fit(diagnosis ~., data = df_juiced)
#use the rf spec to fit the pre-processed training data
rf_fit <- rf_spec %>%
fit(diagnosis ~., data = df_juiced)
rf_fit
knn_fit %>%
predict(baked_test) %>%
bind_cols(baked_test) %>%
metrics(truth = diagnosis, estimate = .pred_class)
knn_fit %>%
predict(df_juiced) %>%
bind_cols(df_juiced) %>%
metrics(truth = diagnosis, estimate = .pred_class)
rf_fit %>%
predict(df_juiced) %>%
bind_cols(df_juiced) %>%
metrics(truth = diagnosis, estimate = .pred_class)
results_train <- knn_fit %>%
predict(df_juiced) %>%
mutate(model = "knn",
truth = df_juiced$diagnosis) %>%
bind_rows(rf_fit %>%
predict(df_juiced) %>%
mutate(model = "rf",
truth = df_juiced$diagnosis)
) %>%
mutate(accuracy = if_else(.pred_class == truth, "yes", "no"))
results_test <- knn_fit %>%
predict(baked_test) %>%
mutate(model = "knn",
truth = baked_test$diagnosis) %>%
bind_rows(rf_fit %>%
predict(baked_test) %>%
mutate(model = "rf",
truth = baked_test$diagnosis)
) %>%
mutate(accuracy = if_else(.pred_class == truth, "yes", "no"))
results_train %>%
ggplot() +
geom_bar(aes(accuracy, fill = accuracy)) +
facet_wrap(~ model)
results_test %>%
ggplot() +
geom_bar(aes(accuracy, fill = accuracy)) +
facet_wrap(~ model)
|
module ClosedInt
import Bits
import Signed
import Data.ZZ
%access export
%default total
||| A value in the closed (integer) interval [n,m]
data ClosedInt : (n: ZZ) -> (m: ZZ) -> Type where
||| Built with a ZZ
CI : {n, m: ZZ} -> (x:ZZ) -> ClosedInt n m
-- Consider adding a proof that n <= x <= m using some ZZ proof types, similar
-- to Nat's LTE type. See
-- https://stackoverflow.com/questions/46066253/lte-for-integers-zz
-- for ideas
||| Show instance for printing `ClosedInt`s
Show (ClosedInt n m) where
show {n} {m} (CI x) = "CI(" ++ show n ++ "," ++ show m ++ ") " ++ show x
||| Eq instance for equality of `ClosedInt`s
Eq (ClosedInt n m) where
(==) (CI x) (CI y) = x==y
||| Ord instance for ordering of `ClosedInt`s
Ord (ClosedInt n m) where
compare (CI x) (CI y) = compare x y
||| MinBound instance for generating extremes of `ClosedInt n m`
MinBound (ClosedInt n m) where
minBound = CI n
||| MaxBound instance for generating extremes of `ClosedInt n m`
MaxBound (ClosedInt n m) where
maxBound = CI m
||| Generate a `ClosedInt n m` with zeros. The caller must ensure that `n<=0<=m`
zeros : ClosedInt n m
zeros = CI 0
||| Safely create an `Unsigned n` from an Integer using saturation
saturate : ZZ -> ClosedInt n m
saturate x = let tryCI = CI {n} {m} x
in if tryCI > maxBound then maxBound else
if tryCI < minBound then minBound else tryCI
--------------------------------------------------------------------------------
-- ClosedInt arithmetic functions
--------------------------------------------------------------------------------
||| Helper function to find high bound of `ClosedInt` scaled by an integer
||| constant
-- public export
multHi : (n, m, a : ZZ) -> ZZ
multHi n m (Pos k) = m*(Pos k)
multHi n m (NegS k) = n*(NegS k)
||| Helper function to find low bound of `ClosedInt` scaled by an integer
||| constant
-- public export
multLo : (n, m, a : ZZ) -> ZZ
multLo n m (Pos k) = n*(Pos k)
multLo n m (NegS k) = m*(NegS k)
||| Multiply a `ClosedInt` with an integer constant
multConst : ClosedInt n m -> (a : ZZ) -> ClosedInt (multLo n m a) (multHi n m a)
multConst {n}{m} (CI x) a = CI (x*a) {n=multLo n m a} {m=multHi n m a}
||| Add two `ClosedInt`s
add : ClosedInt n m -> ClosedInt a b -> ClosedInt (n+a) (m+b)
add {n}{m}{a}{b} (CI x) (CI y) = CI (x+y) {n=(n+a)} {m=(m+b)}
-- Local Variables:
-- idris-load-packages: ("contrib")
-- End:
|
This breaks compilation on anything but Linux, BSD/FreeBSD, Mac OSX, Sun, SysV R4. I deleted it to build for Windows.
Did you mean to put this below all other OS definitions, and using a condition that excludes every OS that has its own definition and implementation? |
-- Andrease, 2016-12-31, issue #1975 reported by nad.
-- {-# OPTIONS -v tc.lhs.split:40 #-}
data ⊥ : Set where
record ⊤ : Set where
data Bool : Set where
true false : Bool
T : Bool → Set
T false = ⊥
T true = ⊤
module M (b : Bool) where
data D : Set where
c : T b → D
open M true
-- The following definition is rejected:
-- rejected : M.D false → ⊥
-- rejected (c x) = x
data D₂ : Set where
c : D₂
-- WAS: However, the following definition is accepted:
test : M.D false → ⊥
test (c x) = x
-- I think both definitions should be rejected.
-- NOW: Both are rejected.
|
library(tidyverse)
library(RCurl)
library(xml2)
url_root = "ftp://ftp-reg.cloud.bom.gov.au/fwo/IDV60920.xml"
dat=getURLContent(url_root,userpwd="bom893:bmaT94jN",binary=FALSE)
dat=read_xml(dat)
dl = xml_find_all(dat,".//observations")
dl = xml_find_all(dl,".//station")
st_id = xml_attr(dl,"wmo-id")
st_name = xml_attr(dl,"stn-name")
st_lat = xml_attr(dl,"lat")
st_lon = xml_attr(dl,"lon")
get_var = function(dl,varname){
x=map(dl,xml_find_all,xpath=sprintf("period/level/element[@type='%s']",varname))
x=map(x,xml_text)
x[sapply(x, is_empty)] <- NA
as.numeric(unlist(x))
}
df = tibble(id = st_id,
name = st_name,
lat=as.numeric(st_lat),
lon=as.numeric(st_lon),
apparent_temp = get_var(dl,"apparent_temp"),
air_temperature = get_var(dl,"air_temperature"),
dew_point = get_var(dl,"dew_point"),
pres = get_var(dl,"pres"),
rel_humidity = get_var(dl,"rel-humidity"),
wind_dir_deg = get_var(dl,"wind_dir_deg"),
wind_spd_kmh = get_var(dl,"wind_spd_kmh"),
rainfall_24hr = get_var(dl,"rainfall_24hr")
)
library(tmap)
library(sf)
dat = st_as_sf(df,coords=c("lon","lat"),crs=4326)
m = tm_shape(dat) + tm_text("air_temperature")
|
{-
Half adjoint equivalences ([HAEquiv])
- Iso to HAEquiv ([iso→HAEquiv])
- Equiv to HAEquiv ([equiv→HAEquiv])
- Cong is an equivalence ([congEquiv])
-}
{-# OPTIONS --cubical --safe #-}
module Cubical.Foundations.HAEquiv where
open import Cubical.Core.Everything
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.GroupoidLaws
open import Cubical.Data.Nat
record isHAEquiv {ℓ ℓ'} {A : Type ℓ} {B : Type ℓ'} (f : A → B) : Type (ℓ-max ℓ ℓ') where
field
g : B → A
sec : ∀ a → g (f a) ≡ a
ret : ∀ b → f (g b) ≡ b
com : ∀ a → cong f (sec a) ≡ ret (f a)
-- from redtt's ha-equiv/symm
com-op : ∀ b → cong g (ret b) ≡ sec (g b)
com-op b j i = hcomp (λ k → λ { (i = i0) → sec (g b) (j ∧ (~ k))
; (j = i0) → g (ret b i)
; (j = i1) → sec (g b) (i ∨ (~ k))
; (i = i1) → g b })
(cap1 j i)
where cap0 : Square {- (i = i0) -} (λ j → f (sec (g b) j))
{- (j = i0) -} (λ i → f (g (ret b i)))
{- (j = i1) -} (λ i → ret b i)
{- (i = i1) -} (λ j → ret b j)
cap0 j i = hcomp (λ k → λ { (i = i0) → com (g b) (~ k) j
; (j = i0) → f (g (ret b i))
; (j = i1) → ret b i
; (i = i1) → ret b j })
(ret (ret b i) j)
filler : I → I → A
filler j i = hfill (λ k → λ { (i = i0) → g (ret b k)
; (i = i1) → g b })
(inS (sec (g b) i)) j
cap1 : Square {- (i = i0) -} (λ j → sec (g b) j)
{- (j = i0) -} (λ i → g (ret b i))
{- (j = i1) -} (λ i → g b)
{- (i = i1) -} (λ j → g b)
cap1 j i = hcomp (λ k → λ { (i = i0) → sec (sec (g b) j) k
; (j = i0) → sec (g (ret b i)) k
; (j = i1) → filler i k
; (i = i1) → filler j k })
(g (cap0 j i))
HAEquiv : ∀ {ℓ ℓ'} (A : Type ℓ) (B : Type ℓ') → Type (ℓ-max ℓ ℓ')
HAEquiv A B = Σ (A → B) λ f → isHAEquiv f
private
variable
ℓ ℓ' : Level
A : Type ℓ
B : Type ℓ'
iso→HAEquiv : Iso A B → HAEquiv A B
iso→HAEquiv {A = A} {B = B} (iso f g ε η) = f , (record { g = g ; sec = η ; ret = ret ; com = com })
where
sides : ∀ b i j → Partial (~ i ∨ i) B
sides b i j = λ { (i = i0) → ε (f (g b)) j
; (i = i1) → ε b j }
bot : ∀ b i → B
bot b i = cong f (η (g b)) i
ret : (b : B) → f (g b) ≡ b
ret b i = hcomp (sides b i) (bot b i)
com : (a : A) → cong f (η a) ≡ ret (f a)
com a i j = hcomp (λ k → λ { (i = i0) → ε (f (η a j)) k
; (i = i1) → hfill (sides (f a) j) (inS (bot (f a) j)) k
; (j = i0) → ε (f (g (f a))) k
; (j = i1) → ε (f a) k})
(cong (cong f) (sym (Hfa≡fHa (λ x → g (f x)) η a)) i j)
equiv→HAEquiv : A ≃ B → HAEquiv A B
equiv→HAEquiv e = iso→HAEquiv (equivToIso e)
congEquiv : ∀ {ℓ ℓ'} {A : Type ℓ} {B : Type ℓ'} {x y : A} (e : A ≃ B) → (x ≡ y) ≃ (e .fst x ≡ e .fst y)
congEquiv {A = A} {B} {x} {y} e = isoToEquiv (iso intro elim intro-elim elim-intro)
where
e' : HAEquiv A B
e' = equiv→HAEquiv e
f : A → B
f = e' .fst
g : B → A
g = isHAEquiv.g (e' .snd)
sec : ∀ a → g (f a) ≡ a
sec = isHAEquiv.sec (e' .snd)
ret : ∀ b → f (g b) ≡ b
ret = isHAEquiv.ret (e' .snd)
com : ∀ a → cong f (sec a) ≡ ret (f a)
com = isHAEquiv.com (e' .snd)
intro : x ≡ y → f x ≡ f y
intro = cong f
elim-sides : ∀ p i j → Partial (~ i ∨ i) A
elim-sides p i j = λ { (i = i0) → sec x j
; (i = i1) → sec y j }
elim-bot : ∀ p i → A
elim-bot p i = cong g p i
elim : f x ≡ f y → x ≡ y
elim p i = hcomp (elim-sides p i) (elim-bot p i)
intro-elim : ∀ p → intro (elim p) ≡ p
intro-elim p i j =
hcomp (λ k → λ { (i = i0) → f (hfill (elim-sides p j)
(inS (elim-bot p j)) k)
; (i = i1) → ret (p j) k
; (j = i0) → com x i k
; (j = i1) → com y i k })
(f (g (p j)))
elim-intro : ∀ p → elim (intro p) ≡ p
elim-intro p i j =
hcomp (λ k → λ { (i = i0) → hfill (λ l → λ { (j = i0) → secEq e x l
; (j = i1) → secEq e y l })
(inS (cong (λ z → g (f z)) p j)) k
; (i = i1) → p j
; (j = i0) → secEq e x (i ∨ k)
; (j = i1) → secEq e y (i ∨ k) })
(secEq e (p j) i)
|
import data.equiv.basic
import analysis.topology.topological_structures
def transport_topological_ring {α β : Type}
[topological_space α] [ring α] [topological_ring α] (f : α ≃ β) : @topological_ring β sorry sorry := sorry
def transport_ring {α β : Type*} [ring α] (f : α ≃ β) : ring β :=
{ add := λ x y, f (f.symm x + f.symm y),
zero := f 0,
neg := λ x, f (-f.symm x),
mul := λ x y, f (f.symm x * f.symm y),
one := f 1,
add_assoc := λ x y z, by simp; from add_assoc _ _ _,
zero_add := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (zero_add _),
add_zero := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (add_zero _),
add_left_neg := λ x, by simp; from add_left_neg _,
add_comm := λ x y, by simp; from add_comm _ _,
mul_assoc := λ x y z, by simp; from mul_assoc _ _ _,
one_mul := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (one_mul _),
mul_one := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (mul_one _),
left_distrib := λ x y z, by simp; from left_distrib _ _ _,
right_distrib := λ x y z, by simp; from right_distrib _ _ _, }
import data.equiv
--import analysis.topology.topological_structures
universes u v
def transport_ring {α : Type u} {β : Type v} [ring α] (f : α ≃ β) : ring β :=
{ add := λ x y, f (f.symm x + f.symm y),
zero := f 0,
neg := λ x, f (-f.symm x),
mul := λ x y, f (f.symm x * f.symm y),
one := f 1,
add_assoc := λ x y z, by simp; from add_assoc _ _ _,
zero_add := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (zero_add _),
add_zero := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (add_zero _),
add_left_neg := λ x, by simp; from add_left_neg _,
add_comm := λ x y, by simp; from add_comm _ _,
mul_assoc := λ x y z, by simp; from mul_assoc _ _ _,
one_mul := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (one_mul _),
mul_one := λ x, by simp; from (equiv.apply_eq_iff_eq_inverse_apply _ _ _).2 (mul_one _),
left_distrib := λ x y z, by simp; from left_distrib _ _ _,
right_distrib := λ x y z, by simp; from right_distrib _ _ _, }
class transportable (f : Type u → Sort v) :=
(on_equiv : Π {α β : Type u} (e : equiv α β), equiv (f α) (f β))
(on_refl : Π (α : Type u), on_equiv (equiv.refl α) = equiv.refl (f α))
(on_trans : Π {α β γ : Type u} (d : equiv α β) (e : equiv β γ), on_equiv (equiv.trans d e) = equiv.trans (on_equiv d) (on_equiv e))
#print topological_ring
#print sigma
-- Our goal is an automagic proof of the following
theorem group.transportable : transportable group := sorry
theorem topological_ring.transportable : transportable
(λ R : (Σ (α : Type u), (topological_space α) × (ring α)) ,
@topological_ring R.fst (R.snd).1 (R.snd).2) := sorry
#check topological_ring
-- These we might need to define and prove by hand
def Const : Type u → Type v := λ α, punit
def Fun : Type u → Type v → Type (max u v) := λ α β, α → β
def Prod : Type u → Type v → Type (max u v) := λ α β, α × β
def Swap : Type u → Type v → Type (max u v) := λ α β, β × α
lemma Const.transportable : (transportable Const) := {
on_equiv := λ β γ H,⟨λ _,punit.star,λ _,punit.star,sorry,sorry⟩,
on_refl := λ β,_,on_trans := λ β,_ }
lemma Fun.transportable (α : Type u) : (transportable (Fun α)) := sorry
lemma Prod.transportable (α : Type u) : (transportable (Prod α)) := sorry
lemma Swap.transportable (α : Type u) : (transportable (Swap α)) := sorry
-- And then we can define
def Hom1 (α : Type u) : Type v → Type (max u v) := λ β, α → β
def Hom2 (β : Type v) : Type u → Type (max u v) := λ α, α → β
def Aut : Type u → Type u := λ α, α → α
-- And hopefully automagically derive
lemma Hom1.transportable (α : Type u) : (transportable (Hom1 α)) := sorry
lemma Hom2.transportable (β : Type v) : (transportable (Hom1 β)) := sorry
lemma Aut.transportable (α : Type u) : (transportable Aut) := sorry
-- If we have all these in place...
-- A bit of magic might actually be able to derive `group.transportable` on line 11.
-- After all, a group just is a type plus some functions... and we can now transport functions.
structure equiv' (α : Type zfc_u) (β : Type zfc_u) :=
(i : α → β)
(j : β → α)
(ij : ∀ (x : α), j (i x) = x)
(ji : ∀ (y : β), i (j y) = y)
definition mul_is_add {α : Type zfc_u} : equiv' (has_mul α) (has_add α) :=
{ i := λ ⟨mul⟩,⟨mul⟩,
j := λ ⟨mul⟩,⟨mul⟩, -- didn't I just write that?
ij := λ ⟨x⟩,rfl,
ji := λ ⟨x⟩, rfl, -- didn't I just write that?
}
definition : equiv'
definition equiv_mul {α β : Type zfc_u} : equiv' α β → equiv' (has_mul α) (has_mul β) := λ E,
{ i := λ αmul,⟨λ b1 b2, E.i (@has_mul.mul α αmul (E.j b1) (E.j b2))⟩,
j := λ βmul,⟨λ a1 a2, E.j (@has_mul.mul β βmul (E.i a1) (E.i a2))⟩, -- didn't I just write that?
-- should we introduce E-dual?
ij := λ f, begin
cases f, -- aargh why do I struggle
suffices : (λ (a1 a2 : α), E.j (E.i (f (E.j (E.i a1)) (E.j (E.i a2))))) = (λ a1 a2, f a1 a2),
by rw this,
funext,
simp [E.ij,E.ji], -- got there in the end
end,
ji := -- I can't even do this in term mode so I just copy out the entire tactic mode proof again.
λ g, begin
cases g, -- aargh why do I struggle
suffices : (λ (b1 b2 : β), E.i (E.j (g (E.i (E.j b1)) (E.i (E.j b2))))) = (λ b1 b2, g b1 b2),
by rw this,
funext,
simp [E.ij,E.ji], -- got there in the end
end, -- didn't I just write that?
}
definition mul_to_add {α β : Type} : equiv' α β → equiv' (has_add α) (has_add β) := _ ∘ equiv_mul
|
open import Common.Reflect
open import Common.Prelude
data Z : Set where
zero : Z
foo : QName → Bool → Bool
foo (quote Nat.zero) b = {!b!}
foo _ _ = false
|
# 3.2. land use regression
mod.min <- lm(pm25 ~ 1, data=mon.data)
mod.full <- lm(pm25 ~ long + lat + log10.m.to.a1 + log10.m.to.a2 + log10.m.to.a3
+ log10.m.to.road + km.to.coast + s2000.pop.div.10000, data=mon.data)
step(mod.min, direction="both", scope=list(lower = ~1, upper = mod.full), test="F")
# output of step has the coefficients. Can use that to get the list??
mod <- lm(pm25 ~ km.to.coast + log10.m.to.a1 + log10.m.to.a2 + lat, data=mon.data)
# 3.2.1. leave-one-out cross-validation
for(i in 1:nrow(mon.data)){
train <- mon.data[-i,]
test <- mon.data[i,]
train.lm <- lm(pm25 ~ km.to.coast + log10.m.to.a1 + log10.m.to.a2 + lat, data=train)
X <- cbind(1,matrix(unlist(test[,names(train.lm$model)[-1]]),nrow=1))
pred.1 <- X %*% train.lm$coef
if(i==1) pred <- pred.1 else pred <- c(pred,pred.1)
}
mse.lur <- mean( (mon.data$pm25 - pred)^2 )
r2.lur <- 1 - mse.lur/var(mon.data$pm25)
mse.lur
r2.lur
|
\section{Systematic uncertainties}
\label{sec:hmhzz_sys}
This section describes the sources and values of theoretical and experimental systematic uncertainties considered in this analysis.
%% =========================================================================================================================
\subsection{Theoretical systematics}
The theoretical modelling uncertainties include the PDF variations, missing QCD higher-order corrections via the variations of factorisation and renormalization scales,
and the parton showering uncertainties.
\subsubsection{Theoretical uncertainties for signal}
\label{sec:hmhzz_theo_signal}
The PDF, QCD scale and parton showering uncertainties affecting the acceptance difference originating from analysis selection for signal are taken into account in different categories.
The acceptance uncertainties are calculated on the acceptance factor which extrapolates from the fiducial space to the full phase space by a simple ratio:
\begin{equation}
A = \frac{N_{fiducial}}{N_{total}}
\end{equation}
For PDF uncertainties, the standard derivations of 100 PDF replicas of NNPDF3.0 NNLO, as well as comparison to two external PDF sets: MMHT2014 NNLO, CT14 NNLO are considered.
For missing QCD higher-order corrections, the effects are studied with truth events by comparing weights corresponding to
variations of the renormalization and factorization scale factors, up and down by a factor of two, and the envelop of different variations is used.
The parton showering uncertainties are estimated by comparing events with different setting via \textsc{Pythia8}.
Systematic uncertainties are studied for both cut- and MVA- based event categorizations,
for cut-based analysis in two different categories: the inclusive ggF-CBA-enriched and VBF-CBA-enriched category,
and for MVA-based one in three different categories: inclusive ggF-MVA-high, ggF-MVA-low and VBF-MVA-enriched category.
This section shows the MVA-based results as an example.
Table~\ref{tab:acc-ggF-dnn} and ~\ref{tab:acc-VBF-dnn} show the theoretical uncertainties mentioned above for ggF and VBF signal respectively in MVA-based categorization.
\begin{table}[htbp]
\centering
\caption{Summary of acceptance uncertainties of PDF, QCD scale and parton shower variations for ggF production. The MVA-based categorization is used.}
\label{tab:acc-ggF-dnn}
\begin{spacing}{0.75}
\begin{tabular}{cccc}
\toprule
Categories & PDF & QCD Scale & Parton Shower \\
\midrule
ggF-MVA-high & 0.40\% & 0.06\% & 2.03\% \\
ggF-MVA-low & 0.56\% & 0.07\% & 4.86\% \\
VBF-MVA-enriched & 0.53\% & 0.09\% & 3.43\% \\
\bottomrule
\end{tabular}
\end{spacing}
\end{table}
\begin{table}[htbp]
\centering
\caption{Summary of acceptance uncertainties of PDF, QCD scale and parton shower variations for VBF production. The MVA-based categorization is used.}
\label{tab:acc-VBF-dnn}
\begin{spacing}{0.75}
\begin{tabular}{cccc}
\toprule
Categories & PDF & QCD Scale & Parton Shower \\
\midrule
ggF-MVA-high & 0.18\% & 1.20\% & 0.41\% \\
ggF-MVA-low & 0.43\% & 0.26\% & 0.36\% \\
VBF-MVA-enriched & 0.23\% & 3.19\% & 0.85\% \\
\bottomrule
\end{tabular}
\end{spacing}
\end{table}
%\textbf{Cut-based analysis} \\
%
%Table~\ref{tab:acc-ggF-cut} and ~\ref{tab:acc-VBF-cut} show the theoretical uncertainties for ggF and VBF signals respectively in cut-based categorization.
%The uncertainties are computed in two different categories: the inclusive ggF and VBF category.
%\begin{table}[htbp]
% \centering
% \caption{Summary of acceptance uncertainties of PDF, QCD scale and parton shower variations for ggF production. The cut-based categorization is used.}
% \label{tab:acc-ggF-cut}
% \begin{tabular}{cccc}
% \toprule
% Categories & PDF & QCD Scale & Parton Shower \\
% \midrule
% ggF & 0.44\% & 0.07\% & 0.22\% \\
% VBF & 0.61\% & 0.12\% & 3.33\% \\
% \bottomrule
% \end{tabular}
%\end{table}
%
%\begin{table}[htbp]
% \centering
% \caption{Summary of acceptance uncertainties of PDF, QCD scale and parton shower variations for VBF production. The cut-based categorization is used.}
% \label{tab:acc-VBF-cut}
% \begin{tabular}{cccc}
% \toprule
% Categories & PDF & QCD Scale & Parton Shower \\
% \midrule
% ggF & 0.18\% & 2.87\% & 0.52\% \\
% VBF & 0.08\% & 4.52\% & 0.72\% \\
% \bottomrule
% \end{tabular}
%\end{table}
\subsubsection{Theoretical uncertainties for SM background processes}
The theoretical uncertainties of irreducible $ZZ$ backgrounds are considered in terms of both the variations of shape of \mfl distributions
and the acceptance originating from the event selection.
The PDF and QCD scale uncertainties are considered by using the same method as described for signal.
The parton showering uncertainties for those \textsc{Sherpa} samples are evaluated by varying the resummation scale by a factor of 2,
changing the CKKW setting and using different showering option, following the PMG recommendation in Ref.~\cite{twiki_pmgsyst},
and the quadratic sum between the uncertainties in different kinds of showering option is taken as final result of uncertainties.
Moreover, the shape uncertainty associated with electroweak higher-order correction for \qqZZ process is also taken into account.
Same as for signals, these theoretical uncertainties for irreducible backgrounds are studied for both cut- and MVA- based event categorizations.
The value of shape uncertainties vary from less than 1\% at low mass region to 50\% at high mass tail due to large statistic fluctuation.
As for the acceptance uncertainties, the values vary from about 1\% for PDF variations to 40\% for parton showering variations.
The VBF category has relative larger uncertainties.
Table~\ref{tab:acc-all-qqZZ_MVA} summarizes the acceptance uncertainties of PDF, QCD scale, and parton showering variations for the dominant background: \qqZZ.
\begin{table}[htbp]
\centering
\caption{Summary of acceptance uncertainties of PDF, scale, and parton showering variations for QCD \qqZZ background. The MVA-based categorization is used.}
\label{tab:acc-all-qqZZ_MVA}
\begin{spacing}{0.75}
\begin{tabular}{cccc}
\toprule
Categories & PDF & QCD Scale & Parton showering \\
\midrule
ggF-MVA-high & 1.15\% & 10.16 \% & 3.71\% \\
ggF-MVA-low & 1.04\% & 3.26 \% & 3.80\% \\
VBF-MVA-enriched & 2.91\% & 27.90 \% & 23.82\% \\
\bottomrule
\end{tabular}
\end{spacing}
\end{table}
%% =========================================================================================================================
\subsection{Experimental systematics}
The signal and background predictions used in this analysis are also affected by various sources of experimental systematic uncertainties.
Similar as described in section~\ref{sec:vbszz_exp_uncer}, the dominant experimental uncertainties in this analysis also come from the energy/momentum scales
and reconstruction and identification efficiencies of the leptons and jets, as well as the luminosity uncertainty.
The systematic uncertainties are calculated using the recommendations from the Combined Performance (CP) groups of ATLAS experiment.
In addition, as mentioned in previous sections, the uncertainties of irreducible background modelling, reducible background shape smoothing procedure and signal yield difference between simulation and parameterization are all taken into account.
%Table~\ref{tab:np_list} summarizes the experimental systematics considered in this analysis that affect either the normalization of total event yield or the shape of \mfl distribution.
The impact of a few largest systematics and their value from statistical fit are studied in section~\ref{sec:hmhzz_result_4l}.
\iffalse
\begin{table}
\centering
\caption{
A list of the experimental systematics considered in this analysis. The NPs have been separated by whether they only
affect the normalisation (left column) or if they affect the shape (right column) of the \mfl distribution. They are
further subdivided into the primary objects that they affect.
}
\begin{spacing}{0.65}
\small
\begin{tabular}{l|l}
\toprule
\multicolumn{1}{c}{Normalisation NPs} & \multicolumn{1}{c}{Shape NPs} \\
\midrule
\multicolumn{2}{c}{\textbf{Electrons}} \\
\midrule
\texttt{EL\_EFF\_ID\_CorrUncertaintyNP[0-15]} & \texttt{EG\_RESOLUTION\_ALL} \\
\texttt{EL\_EFF\_ID\_SIMPLIFIED\_UncorrUncertaintyNP[0-17]} & \texttt{EG\_SCALE\_ALLCORR} \\
\texttt{EL\_EFF\_Iso\_TOTAL\_1NPCOR\_PLUS\_UNCOR} & \texttt{EG\_SCALE\_E4SCINTILLATOR} \\
\texttt{EL\_EFF\_Reco\_TOTAL\_1NPCOR\_PLUS\_UNCOR} & \texttt{EG\_SCALE\_LARCALIB\_EXTRA2015PRE} \\
~ & \texttt{EG\_SCALE\_LARTEMPERATURE\_EXTRA2015PRE} \\
~ & \texttt{EG\_SCALE\_LARTEMPERATURE\_EXTRA2016PRE} \\
\midrule
\multicolumn{2}{c}{\textbf{Muons}} \\
\midrule
\texttt{MUON\_EFF\_ISO\_STAT} & \texttt{MUON\_ID} \\
\texttt{MUON\_EFF\_ISO\_SYS} & \texttt{MUON\_MS} \\
\texttt{MUON\_EFF\_RECO\_STAT} & \texttt{MUON\_SAGITTA\_RESBIAS} \\
\texttt{MUON\_EFF\_RECO\_STAT\_LOWPT} & \texttt{MUON\_SAGITTA\_RHO} \\
\texttt{MUON\_EFF\_RECO\_SYS} & \texttt{MUON\_SCALE} \\
\texttt{MUON\_EFF\_RECO\_SYS\_LOWPT} & ~ \\
\texttt{MUON\_EFF\_TTVA\_STAT} & ~ \\
\texttt{MUON\_EFF\_TTVA\_SYS} & ~ \\
\midrule
\multicolumn{2}{c}{\textbf{Jets}} \\
\midrule
~ & \texttt{JET\_BJES\_Response} \\
~ & \texttt{JET\_EffectiveNP\_[1-7]} \\
~ & \texttt{JET\_EffectiveNP\_8restTerm} \\
~ & \texttt{JET\_EtaIntercalibration\_Modelling} \\
~ & \texttt{JET\_EtaIntercalibration\_NonClosure\_highE} \\
~ & \texttt{JET\_EtaIntercalibration\_NonClosure\_negEta} \\
~ & \texttt{JET\_EtaIntercalibration\_NonClosure\_posEta} \\
~ & \texttt{JET\_EtaIntercalibration\_TotalStat} \\
~ & \texttt{JET\_Flavor\_Composition} \\
~ & \texttt{JET\_Flavor\_Response} \\
~ & \texttt{JET\_JER\_DataVsMC} \\
~ & \texttt{JET\_JER\_EffectiveNP\_[1-6]} \\
~ & \texttt{JET\_JER\_EffectiveNP\_7restTerm} \\
~ & \texttt{JET\_Pileup\_OffsetMu} \\
~ & \texttt{JET\_Pileup\_OffsetNPV} \\
~ & \texttt{JET\_Pileup\_PtTerm} \\
~ & \texttt{JET\_Pileup\_RhoTopology} \\
~ & \texttt{JET\_PunchThrough\_MC16} \\
~ & \texttt{JET\_SingleParticle\_HighPt} \\
\midrule
\multicolumn{2}{c}{\textbf{Other}} \\
\midrule
\texttt{HOEW\_QCD\_syst} & ~ \\
\texttt{HOEW\_syst} & ~ \\
\texttt{HOQCD\_scale\_syst} & ~ \\
\texttt{PRW\_DATASF} & ~ \\
\bottomrule
\end{tabular}
\end{spacing}
\label{tab:np_list}
\end{table}
\fi
|
\section{SoC Control}
\pulpino features a small and simple APB peripheral which provides information about the platform and provides the means
for pad muxing on the ASIC.
The following registers can be accessed.
\regDesc{0x1A10\_7000}{0x0000\_0000}{PAD Mux}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{PAD\_MUX}
\bitbox{32}{PADMUX}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{PADMUX}{
The content of this register can be used to multiplex pads when targetting an ASIC.
}
}
\regDesc{0x1A10\_7004}{0x0000\_0001}{CLK Gate}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{CLK\_GATE}
\bitbox{31}{Unused}
\bitbox{1}{E}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{CLK GATE}{
This register contains the value of the clock gate enable signal (E) used to clock gate the core. It is active high.
}
}
\regDesc{0x1A10\_7008}{0x0000\_8000}{Boot Address}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{BOOT\_ADR}
\bitbox{32}{Boot Address}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{Boot Address}{
This register holds the boot address. It is possible to boot from a ROM, or directly from the instruction memory.
}
}
\regDesc{0x1A10\_7010}{0x0000\_8082}{Info}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{INFO}
\bitbox{4}{Unused}
\bitbox{1}{D}
\bitbox{1}{I}
\bitbox{5}{Rom Size}
\bitbox{8}{Inst Ram Size}
\bitbox{8}{Data Ram Size}
\bitbox{5}{Version}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{Info Register}{
This register holds information about the PULPino architecture. Version contains the pulpino version. The flags D and I report if there is a data/instruction cache present. Rom Size defines the size of the boot ROM. Finally, Inst. Ram Size and Data Ram Size define the size of the RAMs in multiples of 8 kB.
}
}
\regDesc{0x1A10\_7014}{0x0000\_0001}{Status}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{STATUS}
\bitbox{31}{Unused}
\bitbox{1}{S}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{Status Register}{
The status register bit S can be used to hold the final result of a test for verification purposes.
}
}
\regDesc{0x1A10\_7020 - 0x1A10\_703C}{0x0000\_0000}{PAD Configuration}{
\begin{bytefield}[rightcurly=.,endianness=big]{32}
\bitheader{31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0} \\
\begin{rightwordgroup}{PAD CFG0-7}
\bitbox{32}{PAD Configuration}
\end{rightwordgroup}\\
\end{bytefield}
}{
\regItem{Bit 31:0}{PAD CFG0-7}{
These 8 registers can be used for ASIC targets to configure pads, e.g. pull up, pull down values.
}
}
|
Speculation ID: 1
Committed a discarded buffer
|
\documentclass[a4paper]{article}
% generated by Docutils <http://docutils.sourceforge.net/>
% rubber: set program xelatex
\usepackage{fontspec}
% \defaultfontfeatures{Scale=MatchLowercase}
% straight double quotes (defined T1 but missing in TU):
\ifdefined \UnicodeEncodingName
\DeclareTextCommand{\textquotedbl}{\UnicodeEncodingName}{%
{\addfontfeatures{RawFeature=-tlig,Mapping=}\char34}}%
\fi
\usepackage{ifthen}
\usepackage{alltt}
\usepackage{color}
\setcounter{secnumdepth}{0}
%%% Custom LaTeX preamble
\setmainfont{Linux Libertine O}
\setsansfont{Linux Biolinum O}
\setmonofont[Scale=MatchLowercase]{Fira Code}
\usepackage{xeCJK}
\setCJKmainfont{Noto Sans CJK JP}
\setCJKsansfont{Noto Sans CJK JP}
\setCJKmonofont[Scale=0.9777]{Noto Sans CJK JP}
%%% User specified packages and stylesheets
\usepackage{alectryon}
\usepackage{tango_subtle}
%%% Fallback definitions for Docutils-specific commands
% basic code highlight:
\providecommand*\DUrolecomment[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUroledeleted[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUrolekeyword[1]{\textbf{#1}}
\providecommand*\DUrolestring[1]{\textit{#1}}
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else
% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\PackageWarningNoLine{docutils}{Command prefix "docutilsrole" is
deprecated, \MessageBreak use `\protect\DUrole #1`}
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}
% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref}
\usepackage{bookmark}
\urlstyle{same} % normal text font (alternatives: tt, rm, sf)
}{}
\hypersetup{
pdftitle={Using the marker-placement mini-language},
}
\title{Using the marker-placement mini-language%
\label{using-the-marker-placement-mini-language}}
\author{}
\date{}
%%% Body
\begin{document}
\maketitle
To compile:
\begin{quote}
\begin{alltt}
$ alectryon references.rst
# ReST → HTML; produces ‘references.html’
$ DOCUTILSCONFIG=references.docutils.conf alectryon \textbackslash{}
references.rst -o references.xe.tex --latex-dialect xelatex
# ReST → HTML; produces ‘references.xe.tex’
\end{alltt}
\end{quote}
\section{Referring to parts of a goal%
\label{referring-to-parts-of-a-goal}%
}
Alectryon supports references to individual sentences and hypotheses within a code fragment. The easiest way to reference a sentence is to use \texttt{:mref:`search-term`}. Alectryon will search for that text and automatically add a label to the first matching sentence of the proof. For example:
\begin{quote}
\begin{alectryon}
% Generator: Alectryon
\sep
\anchor{setup}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Fixpoint}~\PY{n+nf}{plus\PYZus{}comm}~\PY{o}{(}\PY{n+nv}{n}~\PY{n+nv}{m}\PY{o}{:}~\PY{n}{nat}\PY{o}{)}~\PY{o}{\PYZob{}}\PY{n+nv}{struct}~\PY{n+nv}{n}\PY{o}{\PYZcb{}}~\PY{o}{:}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}\PY{o}{.}\anchor{references-rst-fixpoint-plus-comm-0}\mrefmarker{1}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m}{\PY{n}{nat}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Proof}\PY{o}{.}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m}{\PY{n}{nat}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~\PY{n+nb}{destruct}~\PY{n}{n}~\PY{n+nb}{eqn}\PY{o}{:}\PY{n}{Heq}\PY{o}{.}\anchor{references-rst-destruct-n-0}\mrefmarker{◉}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\anchor{references-rst-s-destruct-n-g-1-0}
\sep
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}\anchor{references-rst-s-destruct-n-g-1-h-plus-comm-0}\mrefmarker{8}}
\sep
\hyp{n, m}{\PY{n}{nat}\anchor{references-rst-s-destruct-n-g-1-h-n-0}\mrefmarker{3}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{l+m+mi}{0}\anchor{references-rst-s-destruct-n-g-1-h-n-0-0}\mrefmarker{4}}
\end{hyps}
\sep
\infrule{\mrefmarker{2}}
\sep
\begin{conclusion}
\PY{l+m+mi}{0}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{l+m+mi}{0}\anchor{references-rst-s-destruct-n-ccl-0}\mrefmarker{5}
\end{conclusion}
\end{goal}
\sep
\begin{extragoals}
\begin{goal}
\anchor{references-rst-s-destruct-n-g-s-n0-0}
\sep
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m, n0}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{n}{S}~\PY{n}{n0}}
\end{hyps}
\sep
\infrule{\mrefmarker{6}}
\sep
\begin{conclusion}
\PY{n}{S}~\PY{n}{n0}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}\anchor{references-rst-s-destruct-n-g-s-n0-ccl-0}\mrefmarker{7}
\end{conclusion}
\end{goal}
\end{extragoals}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~\PY{o}{\PYZhy{}}~\PY{c}{(*~Base~case~*)}\anchor{references-rst-io-setup-s-base-case-0}\mrefmarker{9}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{l+m+mi}{0}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{l+m+mi}{0}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{l+m+mi}{0}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~~~\PY{n+nb}{rewrite}~\PY{o}{\PYZlt{}\PYZhy{}}~\PY{n}{plus\PYZus{}n\PYZus{}O}\PY{o}{;}~\PY{n+nb+bp}{reflexivity}\PY{o}{.}
\end{input}
\end{sentence}
\end{alectryon}
The \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{kn}{Fixpoint}}}}} command (\hyperref[references-rst-fixpoint-plus-comm-0]{1}) indicates that we are beginning an inductive proof.
\end{quote}
Optionally, the label can be picked manually, using \texttt{:mref:`label <target>`}:
\begin{quote}
The proof starts with a case analysis, indicated by “\hyperref[references-rst-destruct-n-0]{◉}”.
\end{quote}
Instead of whole sentences, is possible to refer to individual goals and hypotheses:
\begin{quote}
In the first case (\hyperref[references-rst-s-destruct-n-g-1-0]{2}), we see the variable \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{n}{n}}}}} in the context (\hyperref[references-rst-s-destruct-n-g-1-h-n-0]{3}), and we see that it is \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{mi}{0}}}}} (\hyperref[references-rst-s-destruct-n-g-1-h-n-0-0]{4}); notice how the conclusion of the first goal \hyperref[references-rst-s-destruct-n-ccl-0]{5} does not mention \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{n}{n}}}}} (it says \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{mi}{0}}}}} instead). In the second case \hyperref[references-rst-s-destruct-n-g-s-n0-0]{6}, the conclusion (\hyperref[references-rst-s-destruct-n-g-s-n0-ccl-0]{7}) mentions \texttt{\DUrole{code}{\DUrole{highlight}{\DUrole{coq}{\DUrole{n}{S} \DUrole{n}{n0}}}}} instead.
\end{quote}
Note that we can safely refer multiple times to the same object, even using a different reference:
\begin{quote}
\begin{itemize}
\item \hyperref[references-rst-s-destruct-n-g-1-h-plus-comm-0]{8}
\item \hyperref[references-rst-s-destruct-n-g-1-h-plus-comm-0]{8}
\item \hyperref[references-rst-s-destruct-n-g-1-h-plus-comm-0]{8}
\item \hyperref[references-rst-s-destruct-n-g-1-h-plus-comm-0]{8}
\end{itemize}
\end{quote}
To allow forward- and back-references, counters are not reset from one block to the next:
\begin{quote}
\begin{alectryon}
% Generator: Alectryon
\sep
\begin{txt}
~~
\end{txt}
\sep
\begin{sentence}
\begin{input}
\PY{o}{\PYZhy{}}~\PY{c}{(*~Induction~*)}\anchor{references-rst-induction-0}\mrefmarker{10}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m, n0}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{n}{S}~\PY{n}{n0}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{S}~\PY{n}{n0}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~~~\PY{n+nb}{simpl}\PY{o}{.}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m, n0}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{n}{S}~\PY{n}{n0}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{S}~\PY{o}{(}\PY{n}{n0}~\PY{o}{+}~\PY{n}{m}\PY{o}{)}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~~~\PY{n+nb}{rewrite}~\PY{o}{(}\PY{n}{plus\PYZus{}comm}~\PY{n}{n0}\PY{o}{).}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m, n0}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{n}{S}~\PY{n}{n0}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{S}~\PY{o}{(}\PY{n}{m}~\PY{o}{+}~\PY{n}{n0}\PY{o}{)}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~~~\PY{n+nb}{rewrite}~\PY{n}{plus\PYZus{}n\PYZus{}Sm}\PY{o}{.}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{plus\char`\_comm}{\PY{k+kr}{forall}~\PY{n+nv}{n}~\PY{n+nv}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{,}~\PY{n}{n}~\PY{o}{+}~\PY{n}{m}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{n}}
\sep
\hyp{n, m, n0}{\PY{n}{nat}}
\sep
\hyp{Heq}{\PY{n}{n}~\PY{o}{=}~\PY{n}{S}~\PY{n}{n0}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}~\PY{o}{=}~\PY{n}{m}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n0}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~~~\PY{n+nb+bp}{reflexivity}\PY{o}{.}\nl
\end{input}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Qed}\PY{o}{.}
\end{input}
\end{sentence}
\end{alectryon}
\begin{itemize}
\item Bullets (\texttt{-}, \texttt{+}, \texttt{*}) delimit subproofs (\hyperref[references-rst-io-setup-s-base-case-0]{9}, \hyperref[references-rst-induction-0]{10})
\item It all started at \hyperref[references-rst-fixpoint-plus-comm-0]{1}
\end{itemize}
\end{quote}
Custom counter styles can be defined like using the \texttt{.. role::} directive and the \texttt{:counter-style:} option:
Here is how it looks:
\begin{quote}
The following commands print information about an identifier \hyperref[references-rst-io-cp-s-about-0]{α}, print its definition \hyperref[references-rst-io-cp-s-print-0]{β}, and compute the type of a term \hyperref[references-rst-io-cp-s-check-0]{γ} or its reduction \hyperref[references-rst-io-cp-s-compute-0]{δ}.
\begin{alectryon}
% Generator: Alectryon
\sep
\anchor{cp}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{About}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}\PY{o}{.}\anchor{references-rst-io-cp-s-about-0}\mrefmarker{α}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{o}{:}~\PY{n}{nat}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{nat}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{nat}\nl
\nl
\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{k+kr}{is}~\PY{n}{not}~\PY{n}{universe}~\PY{n}{polymorphic}\nl
\PY{k+kn}{Arguments}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{o}{(}\PY{n}{\PYZus{}}~\PY{n}{\PYZus{}}\PY{o}{)\PYZpc{}}\PY{n}{nat\PYZus{}scope}\nl
\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{k+kr}{is}~\PY{n}{transparent}\nl
\PY{n}{Expands}~\PY{n}{to}\PY{o}{:}~\PY{n}{Constant}~\PY{n}{Coq}\PY{o}{.}\PY{n}{Init}\PY{o}{.}\PY{n}{Nat}\PY{o}{.}\PY{n}{add}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Print}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}\PY{o}{.}\anchor{references-rst-io-cp-s-print-0}\mrefmarker{β}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{o}{=}~\nl
\PY{k+kr}{fix}~\PY{n}{add}~\PY{o}{(}\PY{n}{n}~\PY{n}{m}~\PY{o}{:}~\PY{n}{nat}\PY{o}{)}~\PY{o}{\PYZob{}}\PY{k+kr}{struct}~\PY{n}{n}\PY{o}{\PYZcb{}}~\PY{o}{:}~\PY{n}{nat}~\PY{o}{:=}\nl
~~\PY{k+kr}{match}~\PY{n}{n}~\PY{k+kr}{with}\nl
~~\PY{o}{|}~\PY{l+m+mi}{0}~\PY{o}{=\PYZgt{}}~\PY{n}{m}\nl
~~\PY{o}{|}~\PY{n}{S}~\PY{n}{p}~\PY{o}{=\PYZgt{}}~\PY{n}{S}~\PY{o}{(}\PY{n}{add}~\PY{n}{p}~\PY{n}{m}\PY{o}{)}\nl
~~\PY{k+kr}{end}\nl
~~~~~\PY{o}{:}~\PY{n}{nat}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{nat}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{nat}\nl
\nl
\PY{k+kn}{Arguments}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{o}{(}\PY{n}{\PYZus{}}~\PY{n}{\PYZus{}}\PY{o}{)\PYZpc{}}\PY{n}{nat\PYZus{}scope}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Check}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-check-0}\mrefmarker{γ}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{l+m+mi}{2}~\PY{o}{+}~\PY{l+m+mi}{3}\nl
~~~~~\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Compute}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-compute-0}\mrefmarker{δ}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{txt}
\nl
\end{txt}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{simpl}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-simpl-0}\mrefmarker{い}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{cbn}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-cbn-0}\mrefmarker{ろ}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{cbv}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-cbv-0}\mrefmarker{は}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{lazy}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-lazy-0}\mrefmarker{に}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{vm\PYZus{}compute}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-vm-compute-0}\mrefmarker{ほ}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{l+m+mi}{5}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Eval}~\PY{n+nb}{pattern}~\PY{l+m+mi}{2}~\PY{k+kr}{in}~\PY{n}{Nat}\PY{o}{.}\PY{n}{add}~\PY{l+m+mi}{2}~\PY{l+m+mi}{3}\PY{o}{.}\anchor{references-rst-io-cp-s-pattern-0}\mrefmarker{へ}
\end{input}
\sep
\begin{output}
\begin{messages}
\begin{message}
\PY{o}{=}~\PY{o}{(}\PY{k+kr}{fun}~\PY{n+nv}{n}~\PY{o}{:}~\PY{n}{nat}~\PY{o}{=\PYZgt{}}~\PY{n}{n}~\PY{o}{+}~\PY{n}{S}~\PY{n}{n}\PY{o}{)}~\PY{l+m+mi}{2}\nl
\PY{o}{:}~\PY{n}{nat}
\end{message}
\end{messages}
\end{output}
\end{sentence}
\end{alectryon}
The second batch of commands perform reduction with a custom strategy: \hyperref[references-rst-io-cp-s-simpl-0]{い} \hyperref[references-rst-io-cp-s-cbn-0]{ろ} \hyperref[references-rst-io-cp-s-cbv-0]{は} \hyperref[references-rst-io-cp-s-lazy-0]{に} \hyperref[references-rst-io-cp-s-vm-compute-0]{ほ} \hyperref[references-rst-io-cp-s-pattern-0]{へ}.
\end{quote}
Each inline reference is a link to the corresponding code fragment.
\section{Using references to customize display (\textbf{experimental})%
\label{using-references-to-customize-display-experimental}%
}
References can also be used to customize the display of goals and hypotheses. In the following, hypotheses whose name start with \texttt{l} are omitted, and so are hypotheses named \texttt{a} and \texttt{A}. After the call to \texttt{induction} (\hyperref[references-rst-io-pr-s-induction-1-0]{11}) the output is further limited to just goals 2 and 4, by excluding all goals and re-including only 2 and 4. In goal 4, hypotheses whose type is exactly \texttt{list A} are shown, regardless of previous status, so \texttt{l}, \texttt{l'}, \texttt{l''} are visible (\hyperref[references-rst-io-pr-s-induction-1-g-4-h-l-0]{12}).
\begin{quote}
\begin{alectryon}
% Generator: Alectryon
\sep
\anchor{pr}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Theorem}~\PY{n+nf}{Permutation\PYZus{}In}~\PY{o}{\PYZob{}}\PY{n+nv}{A}\PY{o}{\PYZcb{}}~\PY{o}{(}\PY{n+nv}{l}~\PY{n+nv}{l\PYZsq{}}~\PY{o}{:}~\PY{n}{list}~\PY{n}{A}\PY{o}{)}~\PY{o}{(}\PY{n+nv}{a}\PY{o}{:}~\PY{n}{A}\PY{o}{)}~\PY{o}{:}\nl
~~\PY{n}{Permutation}~\PY{n}{l}~\PY{n}{l\PYZsq{}}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}\PY{o}{.}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{Permutation}~\PY{n}{l}~\PY{n}{l\PYZsq{}}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Proof}\PY{o}{.}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{Permutation}~\PY{n}{l}~\PY{n}{l\PYZsq{}}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}
\end{conclusion}
\end{goal}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~\PY{n+nb}{induction}~\PY{l+m+mi}{1}\PY{o}{;}~\PY{n+nb}{intros}~\PY{o}{*}~\PY{n}{Hin}\PY{o}{.}\anchor{references-rst-io-pr-s-induction-1-0}\mrefmarker{11}
\end{input}
\sep
\begin{output}
\begin{goals}
\begin{goal}
\begin{hyps}
\hyp{H}{\PY{n}{Permutation}~\PY{n}{l}~\PY{n}{l\PYZsq{}}}
\sep
\hyp{IHPermutation}{\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}}
\sep
\hyp{Hin}{\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{o}{(}\PY{n}{x}~\PY{o}{::}~\PY{n}{l}\PY{o}{)}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{o}{(}\PY{n}{x}~\PY{o}{::}~\PY{n}{l\PYZsq{}}\PY{o}{)}
\end{conclusion}
\end{goal}
\sep
\begin{extragoals}
\begin{goal}
\begin{hyps}
\hyp{l, l\char`\', l\char`\'\char`\'}{\PY{n}{list}~\PY{n}{A}\anchor{references-rst-io-pr-s-induction-1-g-4-h-l-0}\mrefmarker{12}}
\sep
\hyp{H}{\PY{n}{Permutation}~\PY{n}{l}~\PY{n}{l\PYZsq{}}}
\sep
\hyp{H0}{\PY{n}{Permutation}~\PY{n}{l\PYZsq{}}~\PY{n}{l\PYZsq{}\PYZsq{}}}
\sep
\hyp{IHPermutation1}{\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}}
\sep
\hyp{IHPermutation2}{\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}}~\PY{o}{\PYZhy{}\PYZgt{}}~\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}\PYZsq{}}}
\sep
\hyp{Hin}{\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l}}
\end{hyps}
\sep
\infrule{}
\sep
\begin{conclusion}
\PY{n}{List}\PY{o}{.}\PY{n}{In}~\PY{n}{a}~\PY{n}{l\PYZsq{}\PYZsq{}}
\end{conclusion}
\end{goal}
\end{extragoals}
\end{goals}
\end{output}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
~~\PY{k+kp}{all}\PY{o}{:}~\PY{n+nb}{simpl}~\PY{k+kr}{in}~\PY{o}{*;}~\PY{n+nb+bp}{tauto}\PY{o}{.}\nl
\end{input}
\end{sentence}
\sep
\begin{sentence}
\begin{input}
\PY{k+kn}{Qed}\PY{o}{.}
\end{input}
\end{sentence}
\end{alectryon}
\end{quote}
\end{document}
|
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <boost/format.hpp>
#include "perceptron.h"
#include "utils.h"
#include <omp.h>
#include <ctime>
using namespace std;
using namespace Eigen;
Perceptron::Perceptron(const int& n_iterations, const double& learning_rate, const double& tolerance, const int& seed, const int& early_stopping_round) : n_iterations(n_iterations), learning_rate(learning_rate), tolerance(tolerance), seed(seed), early_stopping_round(early_stopping_round) {}
Perceptron::~Perceptron(){}
void Perceptron::fit(const MatrixXd& X, const VectorXd& y, VectorXd (*activation)(Eigen::VectorXd x), double (*loss)(VectorXd y, VectorXd y_pred), double (*metric)(VectorXd y, VectorXd pred)){
int n_features = X.cols();
srand(seed);
// initialize weights between [-1/sqrt(n_features+1), 1/sqrt(n_features+1)]
double limit = 1/sqrt(n_features+1);
W = limit * VectorXd::Random(X.cols()+1);
MatrixXd X_new(X.rows(), X.cols()+1);
X_new<<X, MatrixXd::Ones(X.rows(), 1);
double best_acc = 0.0;
int become_worse_round = 0;
for(int iter = 0; iter < n_iterations; iter++){
// calculate outputs
VectorXd outputs = X_new*W;
//cout << "??" << endl;
VectorXd y_pred = predict_prob(X);
//cout << "??" << endl;
VectorXd E = y - y_pred;
// calcalate the loss gradient w.r.t the input of the activation function
VectorXd loss_gradient = (-E.array() * Utils::sigmoid(outputs).array()*(VectorXd::Ones(X.rows()) - Utils::sigmoid(outputs)).array()).matrix();
W = W - learning_rate*X_new.transpose()*loss_gradient;
y_pred = predict_prob(X);
double loss = Utils::squareLoss(y, y_pred);
double acc = metric(y, y_pred);
cout << boost::format("Iteration: %d, squareloss:%.5f, accuracy:%.5f") %iter %loss %acc << endl;
if(loss <= tolerance) break;
if(acc < best_acc){
become_worse_round +=1;
}else{
become_worse_round = 0;
best_acc = acc;
}
if(become_worse_round >= early_stopping_round){
cout << "Early stopping. the best accuracy: " << best_acc << endl;
break;
}
}
}
VectorXd Perceptron::predict_prob(const MatrixXd& X){
MatrixXd X_new(X.rows(), X.cols()+1);
X_new << X, MatrixXd::Ones(X.rows(), 1);
VectorXd y_pred_prob = Utils::sigmoid(X_new*W);
return y_pred_prob;
}
VectorXi Perceptron::predict(const MatrixXd& X){
VectorXd ret_ = predict_prob(X);
int n = ret_.size();
VectorXi ret(n);
#pragma omp parallel for
for(int i = 0; i < n; i++){
ret(i) = ret_(i)>0.5?1:0;
}
return ret;
}
|
Ch = randSeisChannel()
clear_notes!(Ch)
@test length(Ch.notes) == 1
printstyled(" annotation and logging\n", color=:light_green)
S = randSeisData(2)
id_str = "XX.STA.00.EHZ"
S.id[1] = id_str
printstyled(" note!\n", color=:light_green)
note!(S, 1, "hi")
@test occursin("hi", S.notes[1][end])
note!(S, "poor SNR")
@test occursin("poor SNR", S.notes[2][end])
note!(S, string(id_str, " SNR OK"))
@test occursin(" SNR OK", S.notes[1][end])
note!(S, id_str, "why is it clipping again")
@test occursin("clipping", S.notes[1][end])
printstyled(" clear_notes!\n", color=:light_green)
clear_notes!(S, 1)
@test length(S.notes[1]) == 1
@test occursin("notes cleared.", S.notes[1][1])
clear_notes!(S)
for i = 1:2
@test length(S.notes[i]) == 1
@test occursin("notes cleared.", S.notes[i][1])
end
note!(S, 2, "whee")
clear_notes!(S, id_str)
@test S.notes[1] != S.notes[2]
@test_throws ErrorException clear_notes!(S, "YY.STA.11.BHE")
clear_notes!(S)
Ev = randSeisEvent()
clear_notes!(Ev)
for i = 1:Ev.data.n
@test length(Ev.data.notes[i]) == 1
@test occursin("notes cleared.", Ev.data.notes[i][1])
end
@test length(Ev.hdr.notes) == 1
@test occursin("notes cleared.", Ev.hdr.notes[1])
Ngaps = [size(S.t[i],1)-2 for i =1:2]
ungap!(S)
for i = 1:2
@test ==(size(S.t[i],1), 2)
end
S.gain = rand(Float64,2)
unscale!(S)
for i = 1:2
@test ==(S.gain[i], 1.0)
end
demean!(S)
printstyled(" accuracy of automatic logging\n", color=:light_green)
for i = 1:2
c = (Ngaps[i]>0) ? 1 : 0
@test length(S.notes[i]) == (3+c)
if c > 0
@test occursin("ungap!", S.notes[i][2])
end
@test occursin("unscale!", S.notes[i][2+c])
@test occursin("demean!", S.notes[i][3+c])
end
|
||| `Flexidisc` is an implementation of extensible records in Idris.
|||
||| It's goal is to provide easy to write extensible record functions that
||| ensure the following:
|||
||| - **Type safety:** operation on row and access to row data is type safe.
||| - **Compile time validation:** row existence and unicity of row
||| declarations are checked at compile time.
||| - **Lean syntax:** Take advantage of the list syntax as much as possible,
||| minimize the type declarations as well.
||| - **Custom keys:** Row labels can be any type that implements the `DecEq`
||| and `Ord` interface.
module Flexidisc
import public Flexidisc.Record
import public Flexidisc.Record.Connection
import public Flexidisc.Record.Transformation
import public Flexidisc.RecordList
|
\chapter{Poisson denoising}
\label{ch_denoising}
\markright{Poisson denoising}
\section{MS-VST + IUWT}
Under the hypothesis of homogeneous Poisson intensity, the stabilized wavelet coefficients $d_j$ behave like centered Gaussian variables of standard deviation $\sigma_{(j)}$. We can detect significant coefficients with binary hypothesis testing as in Gaussian denoising.
Under the null hypothesis $\mathcal{H}_0$ of homogeneous Poisson intensity, the distribution of the stabilized wavelet coefficient $d_j[k]$ at scale $j$ and location index $k$ can be written as:
\begin{equation}
\label{ }
p(d_j[k]) = \frac{1}{\sqrt{2\pi}\sigma_j}\exp(-d_j[k]^2 / 2 \sigma_j^2) .
\end{equation}
The rejection of the hypothesis $\mathcal{H}_0$ depends on the double-sided p-value:
\begin{equation}
\label{ }
p_j[k] = 2 \frac{1}{\sqrt{2\pi}\sigma_j}\int_{|d_j[k]|}^{+\infty} \exp(-x^2 / 2 \sigma_j^2) dx .
\end{equation}
Consequently, to accept or reject $\mathcal{H}_0$, we compare each $|d_j[k]|$ with a critical threshold $\kappa \sigma_j$, $\kappa= 3,4 \text{ or } 5$ corresponding respectively to significance levels. This amounts to deciding that:
\begin{itemize}
\item if $|d_j[k]| \geqslant \kappa \sigma_j$, $d_j[k]$ is significant.
\item if $|d_j[k]| < \kappa \sigma_j$, $d_j[k]$ is not significant.
\end{itemize}
Then we have to invert the MS-VSTS scheme to reconstruct the estimate. However, although the direct inversion is possible (Eq. (\ref{eq30})), it can not guarantee a positive intensity estimate, while the Poisson intensity is always nonnegative. A positivity projection can be applied, but important structures could be lost in the estimate. To tackle this problem, we reformulate the reconstruction as a convex optimisation problem and solve it iteratively with an algorithm based on Hybrid Steepest Descent (HSD)~\citep{wave:yamada01}.
We define the multiresolution support $\mathcal{M}$, which is determined by the set of detected significant coefficients after hypothesis testing:
\begin{equation}
\label{eq33}
\mathcal{M} := \{ (j,k) | \text{if } d_j[k] \text{ is declared significant} \} .
\end{equation}
We formulate the reconstruction problem as a convex constrained minimization problem:
\begin{equation}
\label{eq34}
\begin{split}
\text{Arg} \min_{\mathbf{X}} \| \mathbf{ \Phi}^{T}\mathbf{X}\|_1,
\text{s.t.} \\ \: \left\{\begin{array}{c}\mathbf{X} \geqslant 0 , \\\forall (j,k)\in \mathcal{M}, (\mathbf{ \Phi}^{T}\mathbf{X})_j[k]=(\mathbf{ \Phi}^{T}\mathbf{Y})_j[k] , \end{array}\right.
\end{split}
\end{equation}
where $\mathbf{\Phi}$ denotes the IUWT synthesis operator.
This problem is solved with the following iterative scheme: the image is initialised by $\mathbf{X}^{(0)} = 0$, and the iteration scheme is, for $n=0$ to $N_{\max}-1$:
\begin{eqnarray}
\tilde{\mathbf{X}} &=& P_{+}[\mathbf{ X}^{(n)} + \mathbf{ \Phi} P_{\mathcal{M}} \mathbf{ \Phi}^{T} (\mathbf{ Y} - \mathbf{ X}^{(n)})] \\
\mathbf{X}^{(n+1)} &=& \mathbf{ \Phi}\text{ST}_{\lambda_n}[\mathbf{ \Phi}^{T}\tilde{\mathbf{X}}]
\end{eqnarray}
where $P_{+}$ denotes the projection on the positive orthant, $P_{\mathcal{M}}$ denotes the projection on the multiresolution support $\mathcal{M}$:
\begin{equation}
P_{\mathcal{M}}d_j[k] = \left\{\begin{array}{cc} d_j[k] & \text{if} \ (j,k) \in \mathcal{M} , \\0 & \text{otherwise} \end{array} . \right.
\end{equation}
and $\text{ST}_{\lambda_n}$ the soft-thresholding with threshold $\lambda_n$:
\begin{equation}
\text{ST}_{\lambda_n} [d] = \left\{\begin{array}{cc} \mathrm{sign}(d)(|d| - \lambda_n) & \text{if} \ |d| \geqslant \lambda_n , \\0 & \text{otherwise} \end{array} . \right.
\end{equation}
We chose a decreasing threshold $\lambda_n = \frac{N_{\max} - n}{N_{\max} - 1},n=1,2,\cdots,N_{\max}$.
The final estimate of the Poisson intensity is: $\hat{\mathbf{\Lambda}} = \mathbf{X}^{(N_{\max})}$. Algorithm~\ref{alg1} summarizes the main steps of the MS-VSTS + IUWT denoising algorithm.
\begin{algorithm}[!h]
\caption{MS-VSTS + IUWT Denoising}
\label{alg1}
\begin{algorithmic}[1]
\REQUIRE $\quad$ data $a_0:=\mathbf{Y}$, number of iterations $N_{\max}$, threshold $\kappa$ \\
\underline{\emph{\textbf{Detection}}} \\
\FOR{$j=1$ to $J$}
\STATE Compute $a_j$ and $d_j$ using (\ref{eq27}).
\STATE Hard threshold $|d_j[k]|$ with threshold $\kappa \sigma_j$ and update $\mathcal{M}$.
\ENDFOR \\
\underline{\emph{\textbf{Estimation}}} \\
\STATE Initialize $\mathbf{X}^{(0)}=0$, $\lambda_0 = 1$.
\FOR{$n=0$ to $N_{\max}-1$}
\STATE $\tilde{\mathbf{X}}= P_{+}[\mathbf{ X}^{(n)} + \mathbf{ \Phi} P_{\mathcal{M}} \mathbf{ \Phi}^{T} (\mathbf{ Y} - \mathbf{ X}^{(n)})]$.
\STATE $\mathbf{X}^{(n+1)} = \mathbf{ \Phi}\text{ST}_{\lambda_n}[\mathbf{ \Phi}^{T}\tilde{\mathbf{X}}]$.
\STATE $\lambda_{n+1} = \frac{N_{\max} - (n+1)}{N_{\max} - 1}$.
\ENDFOR
\STATE Get the estimate $\hat{\mathbf{\Lambda}} = \mathbf{X}^{(N_{\max})}$.
\end{algorithmic}
\end{algorithm}
\section{Multi-resolution support adaptation}
When two sources are too close, the less intense source may not be detected because of the negative wavelet coefficients of the brightest source. To avoid such a drawback, we may update the multi-resolution support at each iteration. The idea is to withdraw the detected sources and to make a detection on the remaining residual, so as to detect the sources which may have been missed at the first detection.
At each iteration $n$, we compute the MS-VSTS of $\mathbf{X}^{(n)}$. We denote $d^{(n)}_j[k]$ the stabilised coefficients of $\mathbf{X}^{(n)}$. We make a hard thresholding on $(d_j[k]-d^{(n)}_j[k])$ with the same thresholds as in the detection step. Significant coefficients are added to the multiresolution support $\mathcal{M}$.
\begin{algorithm}
\caption{MS-VSTS + IUWT Denoising + Multiresolution Support Adaptation}
\label{alg4}
\begin{algorithmic}[1]
\REQUIRE $\quad$ data $a_0:=\mathbf{Y}$, number of iterations $N_{\max}$, threshold $\kappa$ \\
\underline{\emph{\textbf{Detection}}} \\
\FOR{$j=1$ to $J$}
\STATE Compute $a_j$ and $d_j$ using (\ref{eq27}).
\STATE Hard threshold $|d_j[k]|$ with threshold $\kappa \sigma_j$ and update $\mathcal{M}$.
\ENDFOR \\
\underline{\emph{\textbf{Estimation}}} \\
\STATE Initialize $\mathbf{X}^{(0)}=0$, $\lambda_0 = 1$.
\FOR{$n=0$ to $N_{\max}-1$}
\STATE $\tilde{\mathbf{X}}= P_{+}[\mathbf{ X}^{(n)} + \mathbf{ \Phi} P_{\mathcal{M}} \mathbf{ \Phi}^{T} (\mathbf{ Y} - \mathbf{ X}^{(n)})]$.
\STATE $\mathbf{X}^{(n+1)} = \mathbf{ \Phi}\text{ST}_{\lambda_n}[\mathbf{ \Phi}^{T}\tilde{\mathbf{X}}]$.
\STATE Compute the MS-VSTS on $\mathbf{X}^{(n)}$ to get the stabilised coeffcients $d^{(n)}_j$.
\STATE Hard threshold $|d_j[k]-d^{(n)}_j[k]|$ and update $\mathcal{M}$.
\STATE $\lambda_{n+1} = \frac{N_{\max} - (n+1)}{N_{\max} - 1}$.
\ENDFOR
\STATE Get the estimate $\hat{\mathbf{\Lambda}} = \mathbf{X}^{(N_{\max})}$.
\end{algorithmic}
\end{algorithm}
The main steps of the algorithm are summarized in Algorithm~\ref{alg4}. In practice, we use Algorithm~\ref{alg4} instead of Algorithm~\ref{alg1} in our experiments.
\section{MS-VST + Curvelets}
Insignificant coefficients are zeroed by using the same hypothesis testing framework as in the wavelet scale. At each wavelet scale $j$ and ridgelet band $k$, we make a hard thresholding on curvelet coefficients with threshold $\kappa \sigma_{j,k}$, $\kappa= 3,4 \text{ or } 5$. Finally, a direct reconstruction can be performed by first inverting the local ridgelet transforms and then inverting the MS-VST + IUWT~(Equation~(\ref{eq30})). An iterative reconstruction may also be performed.
Algorithm~\ref{algcurv} summarizes the main steps of the MS-VSTS + Curvelets denoising algorithm.
\begin{algorithm}
\caption{MS-VSTS + Curvelets Denoising}
\label{algcurv}
\begin{algorithmic}[1]
\STATE Apply the MS-VST + IUWT with $J$ scales to get the stabilized wavelet subbands $d_j$.
\STATE Set $B_1 = B_{\min}$.
\FOR{$j=1$ to $J$}
\STATE Partition the subband $d_j$ with blocks of side-length $B_j$ and apply the digital ridgelet transform to each block to obtain the stabilized curvelets coefficients.
\IF {$j$ modulo $2=1$}
\STATE $B_{j+1} = 2 B_j$
\ELSE
\STATE $B_{j+1} = B_j$
\ENDIF \\
\STATE HTs on the stabilized curvelet coefficients.
\ENDFOR \\
\STATE Invert the ridgelet transform in each block before inverting the MS-VST + IUWT.
\end{algorithmic}
\end{algorithm}
\section{Experiments}
The method was tested on simulated Fermi data. The simulated data are the sum of a Milky Way diffuse background model and 1000 gamma ray point sources. We based our Galactic diffuse emission model intensity on the model $gll\_iem\_v02$ obtained at the Fermi Science Support Center~\citep{Models}
. This model results from a fit of the LAT photons with various gas templates as well as inverse Compton in several energy bands. We used a realistic point-spread function for the sources, based on Monte Carlo simulations of the LAT and accelerator tests, that scale approximately as $0.8(E/1GeV)^{-0.8}$ degrees. The position of the 205 brightest sources were taken from the Fermi 3-month source list~\citep{Abdo}. The position of the 795 remaining sources follow the LAT 1-year Point Source Catalog~\citep{Catalog}
sources distribution: each simulated source was randomly sorted in a box of $\Delta$l=5$^o$ and $\Delta$b=1$^o$ around a LAT 1-year catalog source. We simulated each source assuming a power-law dependence with its spectral index given by the 3-month source list and the first year catalog. We used an exposure of $3.10^{10} s.cm^2$ corresponding approximatively to one year of Fermi all-sky survey around 1 GeV. The simulated counts map shown here correspond to photons energy from 150 MeV to 20 GeV.
Fig.~\ref{rechsd} compares the result of denoising with MS-VST + IUWT (Algorithm~\ref{alg1}), MS-VST + curvelets (Algorithm~\ref{algcurv}) and Anscombe VST + wavelet shrinkage on a simulated Fermi map. Fig.~\ref{recface} shows one HEALPix face of the results.
As expected from theory, the Anscombe method produces poor results to denoise Fermi data, because the underlyning intensity is too weak.
Both wavelet and curvelet denoising on the sphere perform much better.
For this application, wavelets are slightly better than curvelets ($SNR_{wavelets} = 65.8 dB$, $SNR_{curvelets} = 37.3 dB$, $SNR (dB) = 20 \log (\sigma_{signal} / \sigma_{noise})$). As this image contains many point sources, thisresult is expected. Indeed wavelet are better than curvelets to represent isotropic objects.
\begin{figure}[htb]
\centering{
\hbox{
\includegraphics[width=3in,height=2.4in]{13822fg10.pdf}
\includegraphics[width=3in,height=2.4in]{13822fg11.pdf}}
\hbox{
\includegraphics[width=3in,height=2.4in]{13822fg12.pdf}
\includegraphics[width=3in,height=2.4in]{13822fg13.pdf}}
\hbox{
\includegraphics[width=3in,height=2.4in]{13822fg14.pdf}
\includegraphics[width=3in,height=2.4in]{13822fg15.pdf}}
\caption{\emph{Top Left}: Fermi simulated map without noise.
\emph{Top Right}: Fermi simulated map with Poisson noise.
\emph{Middle Left}: Fermi simulated map denoised with Anscombe VST + wavelet shrinkage.
\emph{Middle Right}: Fermi simulated map denoised with MS-VSTS + curvelets (Algorithm~\ref{algcurv}).
\emph{Bottom Left}: Fermi simulated map denoised with MS-VSTS + IUWT (Algorithm~\ref{alg1}) with threshold $5\sigma_j$.
\emph{Bottom Right}: Fermi simulated map denoised with MS-VSTS + IUWT (Algorithm~\ref{alg1}) with threshold $3\sigma_j$.
Pictures are in logarithmic scale.}
\label{rechsd}
}
\end{figure}
\begin{figure*}
\begin{center}
\includegraphics[width=2.5in]{13822fg16.pdf} \hfill
\includegraphics[width=2.5in]{13822fg17.pdf} \hfill
\includegraphics[width=2.5in]{13822fg18.pdf} \hfill
\includegraphics[width=2.5in]{13822fg19.pdf} \hfill
\includegraphics[width=2.5in]{13822fg20.pdf} \hfill
\includegraphics[width=2.5in]{13822fg21.pdf} \hfill
\caption{View of a single HEALPix face from the results of Figure~\ref{rechsd}.
\emph{Top Left}: Fermi simulated map without noise.
\emph{Top Right}: Fermi simulated map with Poisson noise.
\emph{Middle Left}: Fermi simulated map denoised with Anscombe VST + wavelet shrinkage.
\emph{Middle Right}: Fermi simulated map denoised with MS-VSTS + curvelets (Algorithm~\ref{algcurv}).
\emph{Bottom Left}: Fermi simulated map denoised with MS-VSTS + IUWT (Algorithm~\ref{alg1}) with threshold $5\sigma_j$.
\emph{Bottom Right}: Fermi simulated map denoised with MS-VSTS + IUWT (Algorithm~\ref{alg1}) with threshold $3\sigma_j$.
Pictures are in logarithmic scale.
}
\label{recface}
\end{center}
\end{figure*}
|
##### Copyright 2020 The TensorFlow Authors.
```python
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# MNIST classification
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/mnist">View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/mnist.ipynb">Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/mnist.ipynb">View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/mnist.ipynb">Download notebook</a>
</td>
</table>
This tutorial builds a quantum neural network (QNN) to classify a simplified version of MNIST, similar to the approach used in <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al</a>. The performance of the quantum neural network on this classical data problem is compared with a classical neural network.
## Setup
```python
!pip install -q tensorflow==2.1.0
```
Install TensorFlow Quantum:
```python
!pip install -q tensorflow-quantum
```
Now import TensorFlow and the module dependencies:
```python
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
import seaborn as sns
import collections
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
```
## 1. Load the data
In this tutorial you will build a binary classifier to distinguish between the digits 3 and 6, following <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al.</a> This section covers the data handling that:
- Loads the raw data from Keras.
- Filters the dataset to only 3s and 6s.
- Downscales the images so they fit can fit in a quantum computer.
- Removes any contradictory examples.
- Converts the binary images to Cirq circuits.
- Converts the Circ circuits to TensorFlow Quantum circuits.
### 1.1 Load the raw data
Load the MNIST dataset distributed with Keras.
```python
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0
print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_train))
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
Number of original training examples: 60000
Number of original test examples: 60000
Filter the dataset to keep just the 3s and 6s, remove the other classes. At the same time convert the label, `y`, to boolean: `True` for `3` and `False` for 6.
```python
def filter_36(x, y):
keep = (y == 3) | (y == 6)
x, y = x[keep], y[keep]
y = y == 3
return x,y
```
```python
x_train, y_train = filter_36(x_train, y_train)
x_test, y_test = filter_36(x_test, y_test)
print("Number of filtered training examples:", len(x_train))
print("Number of filtered test examples:", len(x_test))
```
Number of filtered training examples: 12049
Number of filtered test examples: 1968
Show the first example:
```python
print(y_train[0])
plt.imshow(x_train[0, :, :, 0])
plt.colorbar()
```
### 1.2 Downscale the images
An image size of 28x28 is much too large for current quantum computers. Resize the image down to 4x4:
```python
x_train_small = tf.image.resize(x_train, (4,4)).numpy()
x_test_small = tf.image.resize(x_test, (4,4)).numpy()
```
Again, display the first training example—after resize:
```python
print(y_train[0])
plt.imshow(x_train_small[0,:,:,0], vmin=0, vmax=1)
plt.colorbar()
```
### 1.3 Remove contradictory examples
From section *3.3 Learning to Distinguish Digits* of <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al.</a>, filter the dataset to remove images that are labeled as belonging to both classes.
This is not a standard machine-learning procedure, but is included in the interest of following the paper.
```python
def remove_contradicting(xs, ys):
mapping = collections.defaultdict(set)
# Determine the set of labels for each unique image:
for x,y in zip(xs,ys):
mapping[tuple(x.flatten())].add(y)
new_x = []
new_y = []
for x,y in zip(xs, ys):
labels = mapping[tuple(x.flatten())]
if len(labels) == 1:
new_x.append(x)
new_y.append(list(labels)[0])
else:
# Throw out images that match more than one label.
pass
num_3 = sum(1 for value in mapping.values() if True in value)
num_6 = sum(1 for value in mapping.values() if False in value)
num_both = sum(1 for value in mapping.values() if len(value) == 2)
print("Number of unique images:", len(mapping.values()))
print("Number of 3s: ", num_3)
print("Number of 6s: ", num_6)
print("Number of contradictory images: ", num_both)
print()
print("Initial number of examples: ", len(xs))
print("Remaining non-contradictory examples: ", len(new_x))
return np.array(new_x), np.array(new_y)
```
The resulting counts do not closely match the reported values, but the exact procedure is not specified.
It is also worth noting here that applying filtering contradictory examples at this point does not totally prevent the model from receiving contradictory training examples: the next step binarizes the data which will cause more collisions.
```python
x_train_nocon, y_train_nocon = remove_contradicting(x_train_small, y_train)
```
Number of unique images: 10387
Number of 3s: 4961
Number of 6s: 5475
Number of contradictory images: 49
Initial number of examples: 12049
Remaining non-contradictory examples: 11520
### 1.3 Encode the data as quantum circuits
To process images using a quantum computer, <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al.</a> proposed representing each pixel with a qubit, with the state depending on the value of the pixel. The first step is to convert to a binary encoding.
```python
THRESHOLD = 0.5
x_train_bin = np.array(x_train_nocon > THRESHOLD, dtype=np.float32)
x_test_bin = np.array(x_test_small > THRESHOLD, dtype=np.float32)
```
The qubits at pixel indices with values that exceed a threshold, are rotated through an $X$ gate.
```python
def convert_to_circuit(image):
"""Encode truncated classical image into quantum datapoint."""
values = np.ndarray.flatten(image)
qubits = cirq.GridQubit.rect(4, 4)
circuit = cirq.Circuit()
for i, value in enumerate(values):
if value:
circuit.append(cirq.X(qubits[i]))
return circuit
x_train_circ = [convert_to_circuit(x) for x in x_train_bin]
x_test_circ = [convert_to_circuit(x) for x in x_test_bin]
```
Here is the circuit created for the first example (circuit diagrams do not show qubits with zero gates):
```python
SVGCircuit(x_train_circ[0])
```
findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.
Compare this circuit to the indices where the image value exceeds the threshold:
```python
bin_img = x_train_bin[0,:,:,0]
indices = np.array(np.where(bin_img)).T
indices
```
array([[2, 2],
[3, 1]])
Convert these `Cirq` circuits to tensors for `tfq`:
```python
x_train_tfcirc = tfq.convert_to_tensor(x_train_circ)
x_test_tfcirc = tfq.convert_to_tensor(x_test_circ)
```
## 2. Quantum neural network
There is little guidance for a quantum circuit structure that classifies images. Since the classification is based on the expectation of the readout qubit, <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al.</a> propose using two qubit gates, with the readout qubit always acted upon. This is similar in some ways to running small a <a href="https://arxiv.org/abs/1511.06464" class="external">Unitary RNN</a> across the pixels.
### 2.1 Build the model circuit
This following example shows this layered approach. Each layer uses *n* instances of the same gate, with each of the data qubits acting on the readout qubit.
Start with a simple class that will add a layer of these gates to a circuit:
```python
class CircuitLayerBuilder():
def __init__(self, data_qubits, readout):
self.data_qubits = data_qubits
self.readout = readout
def add_layer(self, circuit, gate, prefix):
for i, qubit in enumerate(self.data_qubits):
symbol = sympy.Symbol(prefix + '-' + str(i))
circuit.append(gate(qubit, self.readout)**symbol)
```
Build an example circuit layer to see how it looks:
```python
demo_builder = CircuitLayerBuilder(data_qubits = cirq.GridQubit.rect(4,1),
readout=cirq.GridQubit(-1,-1))
circuit = cirq.Circuit()
demo_builder.add_layer(circuit, gate = cirq.XX, prefix='xx')
SVGCircuit(circuit)
```
Now build a two-layered model, matching the data-circuit size, and include the preparation and readout operations.
```python
def create_quantum_model():
"""Create a QNN model circuit and readout operation to go along with it."""
data_qubits = cirq.GridQubit.rect(4, 4) # a 4x4 grid.
readout = cirq.GridQubit(-1, -1) # a single qubit at [-1,-1]
circuit = cirq.Circuit()
# Prepare the readout qubit.
circuit.append(cirq.X(readout))
circuit.append(cirq.H(readout))
builder = CircuitLayerBuilder(
data_qubits = data_qubits,
readout=readout)
# Then add layers (experiment by adding more).
builder.add_layer(circuit, cirq.XX, "xx1")
builder.add_layer(circuit, cirq.ZZ, "zz1")
# Finally, prepare the readout qubit.
circuit.append(cirq.H(readout))
return circuit, cirq.Z(readout)
```
```python
model_circuit, model_readout = create_quantum_model()
```
### 2.2 Wrap the model-circuit in a tfq-keras model
Build the Keras model with the quantum components. This model is fed the "quantum data", from `x_train_circ`, that encodes the classical data. It uses a *Parametrized Quantum Circuit* layer, `tfq.layers.PQC`, to train the model circuit, on the quantum data.
To classify these images, <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi et al.</a> proposed taking the expectation of a readout qubit in a parameterized circuit. The expectation returns a value between 1 and -1.
```python
# Build the Keras model.
model = tf.keras.Sequential([
# The input is the data-circuit, encoded as a tf.string
tf.keras.layers.Input(shape=(), dtype=tf.string),
# The PQC layer returns the expected value of the readout gate, range [-1,1].
tfq.layers.PQC(model_circuit, model_readout),
])
```
Next, describe the training procedure to the model, using the `compile` method.
Since the the expected readout is in the range `[-1,1]`, optimizing the hinge loss is a somewhat natural fit.
Note: Another valid approach would be to shift the output range to `[0,1]`, and treat it as the probability the model assigns to class `3`. This could be used with a standard a `tf.losses.BinaryCrossentropy` loss.
To use the hinge loss here you need to make two small adjustments. First convert the labels, `y_train`, from boolean to `[-1,1]`, as expected by the hinge loss.
```python
y_train_hinge = 2.0*y_train-1.0
y_test_hinge = 2.0*y_test-1.0
```
Second, use a custiom `hinge_accuracy` metric that correctly handles `[-1, 1]` as the `y_true` labels argument.
`tf.losses.BinaryAccuracy(threshold=0.0)` expects `y_true` to be a boolean, and so can't be used with hinge loss).
```python
def hinge_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true) > 0.0
y_pred = tf.squeeze(y_pred) > 0.0
result = tf.cast(y_true == y_pred, tf.float32)
return tf.reduce_mean(result)
```
```python
model.compile(
loss=tf.keras.losses.Hinge(),
optimizer=tf.keras.optimizers.Adam(),
metrics=[hinge_accuracy])
```
```python
print(model.summary())
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
pqc (PQC) (None, 1) 32
=================================================================
Total params: 32
Trainable params: 32
Non-trainable params: 0
_________________________________________________________________
None
### Train the quantum model
Now train the model—this takes about 45 min. If you don't want to wait that long, use a small subset of the data (set `NUM_EXAMPLES=500`, below). This doesn't really affect the model's progress during training (it only has 32 parameters, and doesn't need much data to constrain these). Using fewer examples just ends training earlier (5min), but runs long enough to show that it is making progress in the validation logs.
```python
EPOCHS = 3
BATCH_SIZE = 32
NUM_EXAMPLES = len(x_train_tfcirc)
```
```python
x_train_tfcirc_sub = x_train_tfcirc[:NUM_EXAMPLES]
y_train_hinge_sub = y_train_hinge[:NUM_EXAMPLES]
```
Training this model to convergence should achieve >85% accuracy on the test set.
```python
qnn_history = model.fit(
x_train_tfcirc_sub, y_train_hinge_sub,
batch_size=32,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test_tfcirc, y_test_hinge))
qnn_results = model.evaluate(x_test_tfcirc, y_test)
```
Train on 11520 samples, validate on 1968 samples
Epoch 1/3
11520/11520 [==============================] - 377s 33ms/sample - loss: 1.0000 - hinge_accuracy: 0.5016 - val_loss: 1.0005 - val_hinge_accuracy: 0.4798
Epoch 2/3
11520/11520 [==============================] - 369s 32ms/sample - loss: 1.0000 - hinge_accuracy: 0.4995 - val_loss: 1.0011 - val_hinge_accuracy: 0.4098
Epoch 3/3
11520/11520 [==============================] - 369s 32ms/sample - loss: 0.9997 - hinge_accuracy: 0.5090 - val_loss: 1.0054 - val_hinge_accuracy: 0.4844
1968/1968 [==============================] - 3s 1ms/sample - loss: 1.0054 - hinge_accuracy: 0.4844
Note: The training accuracy reports the average over the epoch. The validation accuracy is evaluated at the end of each epoch.
## 3. Classical neural network
While the quantum neural network works for this simplified MNIST problem, a basic classical neural network can easily outperform a QNN on this task. After a single epoch, a classical neural network can achieve >98% accuracy on the holdout set.
In the following example, a classical neural network is used for for the 3-6 classification problem using the entire 28x28 image instead of subsampling the image. This easily converges to nearly 100% accuracy of the test set.
```python
def create_classical_model():
# A simple model based off LeNet from https://keras.io/examples/mnist_cnn/
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, [3, 3], activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.Conv2D(64, [3, 3], activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(1))
return model
model = create_classical_model()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()
```
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 1) 129
=================================================================
Total params: 1,198,721
Trainable params: 1,198,721
Non-trainable params: 0
_________________________________________________________________
```python
model.fit(x_train,
y_train,
batch_size=128,
epochs=1,
verbose=1,
validation_data=(x_test, y_test))
cnn_results = model.evaluate(x_test, y_test)
```
Train on 12049 samples, validate on 1968 samples
12049/12049 [==============================] - 3s 277us/sample - loss: 0.0409 - accuracy: 0.9849 - val_loss: 0.0056 - val_accuracy: 0.9990
1968/1968 [==============================] - 0s 105us/sample - loss: 0.0056 - accuracy: 0.9990
The above model has nearly 1.2M parameters. For a more fair comparison, try a 37-parameter model, on the subsampled images:
```python
def create_fair_classical_model():
# A simple model based off LeNet from https://keras.io/examples/mnist_cnn/
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(4,4,1)))
model.add(tf.keras.layers.Dense(2, activation='relu'))
model.add(tf.keras.layers.Dense(1))
return model
model = create_fair_classical_model()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()
```
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_1 (Flatten) (None, 16) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 34
_________________________________________________________________
dense_3 (Dense) (None, 1) 3
=================================================================
Total params: 37
Trainable params: 37
Non-trainable params: 0
_________________________________________________________________
```python
model.fit(x_train_bin,
y_train_nocon,
batch_size=128,
epochs=20,
verbose=2,
validation_data=(x_test_bin, y_test))
fair_nn_results = model.evaluate(x_test_bin, y_test)
```
Train on 11520 samples, validate on 1968 samples
Epoch 1/20
11520/11520 - 0s - loss: 0.6929 - accuracy: 0.5027 - val_loss: 0.6903 - val_accuracy: 0.4868
Epoch 2/20
11520/11520 - 0s - loss: 0.6848 - accuracy: 0.5027 - val_loss: 0.6751 - val_accuracy: 0.4868
Epoch 3/20
11520/11520 - 0s - loss: 0.6530 - accuracy: 0.5240 - val_loss: 0.6218 - val_accuracy: 0.6448
Epoch 4/20
11520/11520 - 0s - loss: 0.5901 - accuracy: 0.7362 - val_loss: 0.5542 - val_accuracy: 0.8349
Epoch 5/20
11520/11520 - 0s - loss: 0.5267 - accuracy: 0.8625 - val_loss: 0.4975 - val_accuracy: 0.8694
Epoch 6/20
11520/11520 - 0s - loss: 0.4745 - accuracy: 0.8777 - val_loss: 0.4527 - val_accuracy: 0.8709
Epoch 7/20
11520/11520 - 0s - loss: 0.4325 - accuracy: 0.8826 - val_loss: 0.4171 - val_accuracy: 0.8725
Epoch 8/20
11520/11520 - 0s - loss: 0.3987 - accuracy: 0.8842 - val_loss: 0.3891 - val_accuracy: 0.8714
Epoch 9/20
11520/11520 - 0s - loss: 0.3718 - accuracy: 0.8843 - val_loss: 0.3668 - val_accuracy: 0.8704
Epoch 10/20
11520/11520 - 0s - loss: 0.3504 - accuracy: 0.8852 - val_loss: 0.3486 - val_accuracy: 0.8709
Epoch 11/20
11520/11520 - 0s - loss: 0.3331 - accuracy: 0.8854 - val_loss: 0.3339 - val_accuracy: 0.8709
Epoch 12/20
11520/11520 - 0s - loss: 0.3190 - accuracy: 0.8854 - val_loss: 0.3217 - val_accuracy: 0.8709
Epoch 13/20
11520/11520 - 0s - loss: 0.3073 - accuracy: 0.8868 - val_loss: 0.3114 - val_accuracy: 0.8720
Epoch 14/20
11520/11520 - 0s - loss: 0.2973 - accuracy: 0.8917 - val_loss: 0.3024 - val_accuracy: 0.9187
Epoch 15/20
11520/11520 - 0s - loss: 0.2886 - accuracy: 0.9155 - val_loss: 0.2947 - val_accuracy: 0.9187
Epoch 16/20
11520/11520 - 0s - loss: 0.2810 - accuracy: 0.9155 - val_loss: 0.2878 - val_accuracy: 0.9187
Epoch 17/20
11520/11520 - 0s - loss: 0.2743 - accuracy: 0.9155 - val_loss: 0.2817 - val_accuracy: 0.9187
Epoch 18/20
11520/11520 - 0s - loss: 0.2684 - accuracy: 0.9155 - val_loss: 0.2767 - val_accuracy: 0.9187
Epoch 19/20
11520/11520 - 0s - loss: 0.2631 - accuracy: 0.9155 - val_loss: 0.2718 - val_accuracy: 0.9187
Epoch 20/20
11520/11520 - 0s - loss: 0.2584 - accuracy: 0.9155 - val_loss: 0.2676 - val_accuracy: 0.9197
1968/1968 [==============================] - 0s 23us/sample - loss: 0.2676 - accuracy: 0.9197
## 4. Comparison
Higher resolution input and a more powerful model make this problem easy for the CNN. While a classical model of similar power (~32 parameters) trains to a similar accuracy in a fraction of the time. One way or the other, the classical neural network easily outperforms the quantum neural network. For classical data, it is difficult to beat a classical neural network.
```python
qnn_accuracy = qnn_results[1]
cnn_accuracy = cnn_results[1]
fair_nn_accuracy = fair_nn_results[1]
sns.barplot(["Quantum", "Classical, full", "Classical, fair"],
[qnn_accuracy, cnn_accuracy, fair_nn_accuracy])
```
|
lemma closed_limpts: "closed {x::'a::metric_space. x islimpt S}" |
mobius_complex := proc()
local T,V,E,F,R,r,v,a,mob,mob_arc,i,e;
V := [seq(seq([i,a],a=0..2),i=0..1)];
F := [
seq([[0,a],[0,modp(a+1,3)],[1,a]],a=0..2),
seq([[0,a],[0,modp(a+1,3)],[1,modp(a+1,3)]],a=0..2),
seq([[0,a],[1,modp(a+1,3)],[1,modp(a+2,3)]],a=0..2)
];
E := map(f -> ([f[1],f[2]],[f[1],f[3]],[f[2],f[3]]),F);
T := table([]);
T["vertices"] := V;
T["edges"] := E;
T["faces"] := F;
T["max_simplices"] := T["faces"];
T["all_simplices"] := [map(v -> [v],V),op(E),op(F)];
v := table():
for a from 0 to 2 do
v[0,a] := [a/6,0];
v[1,a] := [a/3,1];
od:
R := 1;
r := 1/3:
mob := mobius_embedding(R,r);
mob_arc := mobius_arc(R,r);
T["plot"] :=
display(
spacecurve(mob(t,1),t=0..1,colour=blue),
plot3d(mob(t,u),t=0..1,u=0..1,style=wireframe,colour="LightGray"),
seq(mob_arc(op(v[op(e[1])]),op(v[op(e[2])]),colour=red),e in E),
axes=none,scaling=constrained
);
return eval(T);
end():
flat_mobius_complex := proc(n::posint)
local f,g,T,V,E,F,r,v,a,i;
f := (i) -> modp(i+1,4*n+2);
g := (i) -> modp(i+2*n+3/2+(-1)^i/2,4*n+2);
V := [seq(i,i=0..4*n+1)];
E := [seq([i,f(i)],i=0..4*n+1),seq([i,g(i)],i=0..4*n+1)];
F := [seq([i,f(i),g(i)],i=0..4*n+1)];
T := table([]);
T["vertices"] := V;
T["edges"] := E;
T["faces"] := F;
T["max_simplices"] := T["faces"];
T["all_simplices"] := [map(v -> [v],V),op(E),op(F)];
T["embedding_dim"] := 3;
T["embedding"] := table([seq(i = evalf(mobius_embedding(0.2)(i/(4*n+2),1)),i=0..4*n+1)]);
T["plot"] := `plot/raw_simplicial_complex`(T["vertices"])(T["faces"],3,T["embedding"]);
return eval(T);
end: |
//#define CGAL_PMP_REMESHING_VERBOSE
//#define CGAL_PMP_REMESHING_DEBUG
//#define CGAL_PMP_REMESHING_VERY_VERBOSE
//#define CGAL_PMP_REMESHING_VERBOSE_PROGRESS
#include <QtCore/qglobal.h>
#include <CGAL/Three/Polyhedron_demo_plugin_interface.h>
#include "Scene_surface_mesh_item.h"
#include "Scene_polyhedron_selection_item.h"
#include <CGAL/iterator.h>
#include <CGAL/Polygon_mesh_processing/remesh.h>
#include <CGAL/boost/graph/graph_traits_Polyhedron_3.h>
#include <CGAL/boost/graph/properties_Polyhedron_3.h>
#include <CGAL/Polygon_mesh_processing/triangulate_faces.h>
#include <CGAL/utility.h>
#include <boost/graph/graph_traits.hpp>
#include <boost/unordered_set.hpp>
#include <CGAL/property_map.h>
#include <QElapsedTimer>
#include <QAction>
#include <QMainWindow>
#include <QApplication>
#include <QString>
#include <QDialog>
#include <QtPlugin>
#include <QMessageBox>
#include <vector>
#include <algorithm>
#include <queue>
#include <sstream>
#include <cmath>
#ifdef CGAL_LINKED_WITH_TBB
#include "tbb/parallel_for.h"
#include "tbb/blocked_range.h"
#include "tbb/partitioner.h"
#endif
#include "ui_Isotropic_remeshing_dialog.h"
typedef Scene_surface_mesh_item Scene_facegraph_item;
typedef Scene_facegraph_item::Face_graph FaceGraph;
typedef boost::graph_traits<FaceGraph>::face_descriptor face_descriptor;
// give a halfedge and a target edge length, put in `out` points
// which the edge equally spaced such that splitting the edge
// using the sequence of points make the edges shorter than
// `target_length`
template <class TriangleMesh, class PointPMap, class PointOutputIterator>
PointOutputIterator
sample_edge(
typename boost::graph_traits<TriangleMesh>::halfedge_descriptor hd,
TriangleMesh& triangle_mesh,
double target_length,
const PointPMap& pmap,
PointOutputIterator out)
{
typedef typename boost::property_traits<PointPMap>::value_type Point_3;
typedef typename CGAL::Kernel_traits<Point_3>::Kernel::Vector_3 Vector_3;
typename boost::property_traits<PointPMap>::reference src=get(pmap, source(hd,triangle_mesh) );
typename boost::property_traits<PointPMap>::reference tgt=get(pmap, target(hd,triangle_mesh) );
double length = std::sqrt( CGAL::squared_distance(src, tgt) );
if ( length <= target_length ) return out;
double nb_points = std::floor( length / target_length );
Vector_3 unit = (tgt-src) / (nb_points+1);
for(double i=0; i<nb_points; ++i)
*out++=src+unit*(i+1);
return out;
}
// given a set of points that are expected to be on an edge, split
// that edge and retriangulate the face incident to the edge
// Points are sorted so that they are sorted from the source to the target
// of the edge (the sequence does not contains edge endpoints)
template <class TriangleMesh, class PointPMap, class PointRange, class EdgeOutputIterator>
EdgeOutputIterator
split_identical_edges(
typename boost::graph_traits<TriangleMesh>::halfedge_descriptor hd,
TriangleMesh& tm,
const PointPMap& pmap,
const PointRange& points,
EdgeOutputIterator out)
{
typedef typename PointRange::value_type Point_3;
typedef boost::graph_traits<TriangleMesh> GT;
typedef typename GT::halfedge_descriptor halfedge_descriptor;
for(const Point_3& p : points)
{
// split the edge
halfedge_descriptor new_hd=CGAL::Euler::split_edge(hd,tm);
// set the vertex point
put(pmap, target(new_hd, tm), p);
*out++=edge(new_hd, tm);
}
*out++=edge(hd, tm);
return out;
}
// HedgeRange is expected to be a range with value type being
// std::pair<halfedge_descriptor, TriangleMesh*>
// Given a set of halfedges representing different edges
// but with identical endpoints, and a target edge length
// we split all edges identically so that subedges are
// or length <= length
template <class HedgeRange, class Edges_to_protect>
void split_long_duplicated_edge(const HedgeRange& hedge_range,
double target_length,
Edges_to_protect& edges_to_protect)
{
typedef typename HedgeRange::value_type Pair;
typedef typename Pair::first_type halfedge_descriptor;
typedef typename boost::remove_pointer<
typename Pair::second_type>::type TriangleMesh;
typedef typename boost::property_map<TriangleMesh,
CGAL::vertex_point_t>::type PointPMap;
typedef typename boost::property_traits<PointPMap>::value_type Point_3;
if (hedge_range.empty()) return;
const Pair& p = *hedge_range.begin();
PointPMap pmap = get(boost::vertex_point, *p.second);
std::vector<Point_3> points;
halfedge_descriptor hd = p.first;
// collect points to be add inside the edges
sample_edge(hd, *p.second, target_length, pmap, std::back_inserter(points) );
CGAL_assertion_code(Point_3 src = get(pmap, source(hd, *p.second));)
CGAL_assertion_code(Point_3 tgt = get(pmap, target(hd, *p.second));)
// split the edges and collect faces to triangulate
for(const Pair& h_and_p : hedge_range)
{
halfedge_descriptor hc=h_and_p.first;
TriangleMesh* polyc = h_and_p.second;
PointPMap pmap_2 = get(boost::vertex_point, *polyc);
//make sure halfedge are consistently oriented
CGAL_assertion( get(pmap_2, source(hc, *polyc)) == src );
CGAL_assertion( get(pmap_2, target(hc, *polyc)) == tgt );
typedef typename Edges_to_protect::value_type::second_type Edge_set;
Edge_set& edge_set = edges_to_protect[polyc];
// now split the halfedge and incident faces
split_identical_edges(hc,*polyc,pmap_2, points,
std::inserter( edge_set, edge_set.begin()));
}
}
using namespace CGAL::Three;
class Polyhedron_demo_isotropic_remeshing_plugin :
public QObject,
public Polyhedron_demo_plugin_interface
{
Q_OBJECT
Q_INTERFACES(CGAL::Three::Polyhedron_demo_plugin_interface)
Q_PLUGIN_METADATA(IID "com.geometryfactory.PolyhedronDemo.PluginInterface/1.0" FILE "isotropic_remeshing_plugin.json")
typedef boost::graph_traits<FaceGraph>::edge_descriptor edge_descriptor;
typedef boost::graph_traits<FaceGraph>::halfedge_descriptor halfedge_descriptor;
typedef boost::graph_traits<FaceGraph>::face_descriptor face_descriptor;
typedef boost::unordered_set<edge_descriptor> Edge_set;
typedef Scene_polyhedron_selection_item::Is_constrained_map<Edge_set> Edge_constrained_pmap;
public:
void init(QMainWindow* mainWindow, Scene_interface* scene_interface, Messages_interface*)
{
this->scene = scene_interface;
this->mw = mainWindow;
actionIsotropicRemeshing_ = new QAction("Isotropic Remeshing", mw);
actionIsotropicRemeshing_->setProperty("subMenuName", "Polygon Mesh Processing");
if (actionIsotropicRemeshing_) {
connect(actionIsotropicRemeshing_, SIGNAL(triggered()),
this, SLOT(isotropic_remeshing()));
}
}
QList<QAction*> actions() const {
return QList<QAction*>() << actionIsotropicRemeshing_;
}
bool applicable(QAction*) const
{
if (scene->selectionIndices().size() == 1)
{
return qobject_cast<Scene_facegraph_item*>(scene->item(scene->mainSelectionIndex()))
|| qobject_cast<Scene_polyhedron_selection_item*>(scene->item(scene->mainSelectionIndex()));
}
Q_FOREACH(int index, scene->selectionIndices())
{
//if one polyhedron is found in the selection, it's fine
if (qobject_cast<Scene_facegraph_item*>(scene->item(index)))
return true;
}
return false;
}
typedef boost::property_map<FaceGraph, CGAL::face_patch_id_t<int> >::type Patch_id_pmap;
void detect_and_split_duplicates(std::vector<Scene_facegraph_item*>& selection,
std::map<FaceGraph*,Edge_set>& edges_to_protect,
double target_length)
{
typedef EPICK::Point_3 Point_3;
typedef std::pair<Point_3,Point_3> Segment_3;
typedef std::map< Segment_3,
std::vector< std::pair<halfedge_descriptor, FaceGraph*> > > MapType;
typedef boost::property_map<FaceGraph,
CGAL::vertex_point_t>::type PointPMap;
MapType duplicated_edges;
for(Scene_facegraph_item* poly_item : selection){
FaceGraph& pmesh = *poly_item->polyhedron();
PointPMap pmap = get(boost::vertex_point, pmesh);
for(edge_descriptor ed : edges(pmesh)){
halfedge_descriptor hd = halfedge(ed,pmesh);
Point_3 p = get(pmap, source(hd,pmesh)), q = get(pmap, target(hd,pmesh));
Segment_3 s = CGAL::make_sorted_pair(p,q);
if (s.first==q) hd=opposite(hd,pmesh); // make sure the halfedges are consistently oriented
duplicated_edges[s].push_back( std::make_pair(hd,&pmesh) );
}
}
// consistently split duplicate edges and triangulate incident faces
typedef std::pair<face_descriptor, FaceGraph*> Face_and_poly;
std::set< Face_and_poly > faces_to_triangulate;
for(const MapType::value_type& p : duplicated_edges)
if (p.second.size()>1){
//collect faces to retriangulate
typedef std::pair<halfedge_descriptor, FaceGraph*> Pair_type;
for(const Pair_type& h_and_p : p.second)
{
halfedge_descriptor hc=h_and_p.first;
FaceGraph* polyc = h_and_p.second;
if ( !is_border(hc, *polyc) )
faces_to_triangulate.insert( Face_and_poly(face(hc,*polyc), polyc) );
if ( !is_border(opposite(hc, *polyc), *polyc) )
faces_to_triangulate.insert(
Face_and_poly(face(opposite(hc, *polyc),*polyc), polyc) );
}
// split the edges
split_long_duplicated_edge(p.second, target_length, edges_to_protect);
}
// now retriangulate
namespace PMP=CGAL::Polygon_mesh_processing;
for(Face_and_poly f_and_p : faces_to_triangulate)
PMP::triangulate_face(f_and_p.first, *f_and_p.second);
}
void do_split_edges(Scene_polyhedron_selection_item* selection_item,
SMesh& pmesh,
double target_length)
{
std::vector<edge_descriptor> p_edges;
for(edge_descriptor e : edges(pmesh))
{
if(get(selection_item->constrained_edges_pmap(), e))
p_edges.push_back(e);
}
for(face_descriptor f : selection_item->selected_facets)
{
for(halfedge_descriptor he : halfedges_around_face(halfedge(f, pmesh), pmesh))
{
if (selection_item->selected_facets.find(face(opposite(he, pmesh), pmesh))
== selection_item->selected_facets.end())
p_edges.push_back(edge(he, pmesh));
}
}
if (!p_edges.empty())
CGAL::Polygon_mesh_processing::split_long_edges(
p_edges
, target_length
, *selection_item->polyhedron()
, PMP::parameters::geom_traits(EPICK())
.edge_is_constrained_map(selection_item->constrained_edges_pmap()));
else
std::cout << "No selected or boundary edges to be split" << std::endl;
}
public Q_SLOTS:
void isotropic_remeshing()
{
if (scene->selectionIndices().size() > 1)
{
isotropic_remeshing_of_several_polyhedra();
return;
}
const Scene_interface::Item_id index = scene->mainSelectionIndex();
Scene_facegraph_item* poly_item =
qobject_cast<Scene_facegraph_item*>(scene->item(index));
Scene_polyhedron_selection_item* selection_item =
qobject_cast<Scene_polyhedron_selection_item*>(scene->item(index));
if (poly_item || selection_item)
{
// Create dialog box
QDialog dialog(mw);
Ui::Isotropic_remeshing_dialog ui
= remeshing_dialog(&dialog, poly_item, selection_item);
// Get values
int i = dialog.exec();
if (i == QDialog::Rejected)
{
std::cout << "Remeshing aborted" << std::endl;
return;
}
bool edges_only = ui.splitEdgesOnly_checkbox->isChecked();
bool preserve_duplicates = ui.preserveDuplicates_checkbox->isChecked();
double target_length = ui.edgeLength_dspinbox->value();
unsigned int nb_iter = ui.nbIterations_spinbox->value();
unsigned int nb_smooth = ui.nbSmoothing_spinbox->value();
bool protect = ui.protect_checkbox->isChecked();
bool smooth_features = ui.smooth1D_checkbox->isChecked();
// wait cursor
QApplication::setOverrideCursor(Qt::WaitCursor);
QElapsedTimer time;
time.start();
typedef boost::graph_traits<FaceGraph>::edge_descriptor edge_descriptor;
typedef boost::graph_traits<FaceGraph>::face_descriptor face_descriptor;
FaceGraph& pmesh = (poly_item != NULL)
? *poly_item->polyhedron()
: *selection_item->polyhedron();
Patch_id_pmap fpmap = get(CGAL::face_patch_id_t<int>(), pmesh);
bool fpmap_valid = false;
{
for(face_descriptor f : faces(pmesh))
{
if (get(fpmap, f) != 1)
{
fpmap_valid = true;
break;/*1 is the default value for both Surface_mesh and Polyhedron*/
}
}
}
if (selection_item)
{
if (edges_only)
{
do_split_edges(selection_item, pmesh, target_length);
}
else //not edges_only
{
if(protect &&
!CGAL::Polygon_mesh_processing::internal::constraints_are_short_enough(
*selection_item->polyhedron(),
selection_item->constrained_edges_pmap(),
get(CGAL::vertex_point, *selection_item->polyhedron()),
CGAL::Static_property_map<face_descriptor, std::size_t>(1),
4. / 3. * target_length))
{
QApplication::restoreOverrideCursor();
//If facets are selected, splitting edges will add facets that won't be selected, and it will mess up the rest.
//If there is only edges, it will work fine because new edges are dealt with in the code, so we can directly
//split and continue.
// Possibility todo: check if the barycenter of a new face is inside an old selected face to
//select it again.
if(!selection_item->selected_facets.empty())
{
QMessageBox::warning(mw, tr("Error"),
tr("Isotropic remeshing : protect_constraints cannot be set to"
" true with constraints larger than 4/3 * target_edge_length."
" Aborting."));
return;
}
else if(QMessageBox::question(mw, tr("Error"),
tr("Isotropic remeshing : protect_constraints cannot be set to"
" true with constraints larger than 4/3 * target_edge_length."
" Do you wish to split the constrained edges ?")) !=
QMessageBox::Yes)
{
return;
}
else
{
QApplication::setOverrideCursor(Qt::WaitCursor);
do_split_edges(selection_item, pmesh, target_length);
}
}
if (selection_item->selected_facets.empty() && !selection_item->isEmpty())
{
if (fpmap_valid)
CGAL::Polygon_mesh_processing::isotropic_remeshing(faces(*selection_item->polyhedron())
, target_length
, *selection_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.edge_is_constrained_map(selection_item->constrained_edges_pmap())
.relax_constraints(smooth_features)
.number_of_relaxation_steps(nb_smooth)
.vertex_is_constrained_map(selection_item->constrained_vertices_pmap())
.face_patch_map(fpmap));
else
CGAL::Polygon_mesh_processing::isotropic_remeshing(faces(*selection_item->polyhedron())
, target_length
, *selection_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.edge_is_constrained_map(selection_item->constrained_edges_pmap())
.relax_constraints(smooth_features)
.number_of_relaxation_steps(nb_smooth)
.vertex_is_constrained_map(selection_item->constrained_vertices_pmap())
);
}
else //selected_facets not empty
{
if (fpmap_valid)
CGAL::Polygon_mesh_processing::isotropic_remeshing(selection_item->selected_facets
, target_length
, *selection_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.edge_is_constrained_map(selection_item->constrained_edges_pmap())
.relax_constraints(smooth_features)
.number_of_relaxation_steps(nb_smooth)
.vertex_is_constrained_map(selection_item->constrained_vertices_pmap())
.face_patch_map(fpmap));
else
CGAL::Polygon_mesh_processing::isotropic_remeshing(selection_item->selected_facets
, target_length
, *selection_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.edge_is_constrained_map(selection_item->constrained_edges_pmap())
.relax_constraints(smooth_features)
.number_of_relaxation_steps(nb_smooth)
.vertex_is_constrained_map(selection_item->constrained_vertices_pmap()));
}
}
SMesh mesh_ = *selection_item->polyhedron();
std::vector<bool> are_edges_removed;
are_edges_removed.resize(mesh_.number_of_edges()+mesh_.number_of_removed_edges());
std::vector<bool> are_edges_constrained;
are_edges_constrained.resize(are_edges_removed.size());
for(std::size_t i=0; i< are_edges_removed.size(); ++i)
{
are_edges_removed[i] = mesh_.is_removed(SMesh::Edge_index(static_cast<int>(i)));
if(!are_edges_removed[i])
are_edges_constrained[i] = get(selection_item->constrained_edges_pmap(), SMesh::Edge_index(static_cast<int>(i)));
}
int i0, i1,
nE(mesh_.number_of_edges()+mesh_.number_of_removed_edges());
//get constrained values in order.
if (nE > 0)
{
i0=0; i1=nE-1;
while (1)
{
// find first removed and last un-removed
while (!are_edges_removed[i0] && i0 < i1) ++i0;
while ( are_edges_removed[i1] && i0 < i1) --i1;
if (i0 >= i1) break;
// swap
std::swap(are_edges_constrained[i0], are_edges_constrained[i1]);
std::swap(are_edges_removed[i0], are_edges_removed[i1]);
}
// remember new size
nE = are_edges_removed[i0] ? i0 : i0+1;
}
selection_item->polyhedron_item()->setColor(
selection_item->polyhedron_item()->color());
if(fpmap_valid)
{
selection_item->polyhedron_item()->setItemIsMulticolor(true);
selection_item->polyhedron_item()->computeItemColorVectorAutomatically(true);
}
else
{
selection_item->polyhedron_item()->setItemIsMulticolor(false);
}
selection_item->polyhedron_item()->polyhedron()->collect_garbage();
//fix constrained_edges_map
for(int i=0; i< nE; ++i)
{
Scene_polyhedron_selection_item::Is_constrained_map<Scene_polyhedron_selection_item::Selection_set_edge>
pmap = selection_item->constrained_edges_pmap();
put(pmap, SMesh::Edge_index(i), are_edges_constrained[i]);
}
selection_item->poly_item_changed();
selection_item->clear<face_descriptor>();
selection_item->changed_with_poly_item();
}
else if (poly_item)
{
boost::property_map<FaceGraph, CGAL::edge_is_feature_t>::type eif
= get(CGAL::edge_is_feature, pmesh);
if (edges_only)
{
std::vector<edge_descriptor> edges_to_split;
for(edge_descriptor e : edges(pmesh))
{
if( is_border(e, pmesh) || get(eif, e) )
edges_to_split.push_back(e);
}
if (!edges_to_split.empty())
{
if (fpmap_valid)
CGAL::Polygon_mesh_processing::split_long_edges(
edges_to_split
, target_length
, pmesh
, PMP::parameters::geom_traits(EPICK())
. edge_is_constrained_map(eif)
. face_patch_map(fpmap));
else
CGAL::Polygon_mesh_processing::split_long_edges(
edges_to_split
, target_length
, pmesh
, PMP::parameters::geom_traits(EPICK())
. edge_is_constrained_map(eif));
}
else
std::cout << "No border to be split" << std::endl;
}
else
{
// tricks to use the function detect_and_split_duplicates
// that uses several poly items
std::map<FaceGraph*, Edge_set > edges_to_protect_map;
std::vector<Scene_facegraph_item*> poly_items(1, poly_item);
Edge_set& edges_to_protect = edges_to_protect_map[poly_item->polyhedron()];
if (preserve_duplicates)
{
detect_and_split_duplicates(poly_items, edges_to_protect_map, target_length);
}
Scene_polyhedron_selection_item::Is_constrained_map<Edge_set> ecm(&edges_to_protect);
for(edge_descriptor e : edges(pmesh))
{
if (eif[e])
edges_to_protect.insert(e);
}
if(protect &&
!CGAL::Polygon_mesh_processing::internal::constraints_are_short_enough(
pmesh,
ecm,
get(CGAL::vertex_point, pmesh),
CGAL::Static_property_map<face_descriptor, std::size_t>(1),
4. / 3. * target_length))
{
QApplication::restoreOverrideCursor();
QMessageBox::warning(mw, tr("Error"),
tr("Isotropic remeshing : protect_constraints cannot be set to"
" true with constraints larger than 4/3 * target_edge_length."
" Aborting."));
return;
}
if (fpmap_valid)
CGAL::Polygon_mesh_processing::isotropic_remeshing(
faces(*poly_item->polyhedron())
, target_length
, *poly_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.number_of_relaxation_steps(nb_smooth)
.edge_is_constrained_map(ecm)
.relax_constraints(smooth_features)
.face_patch_map(fpmap));
else
CGAL::Polygon_mesh_processing::isotropic_remeshing(
faces(*poly_item->polyhedron())
, target_length
, *poly_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter)
.protect_constraints(protect)
.number_of_relaxation_steps(nb_smooth)
.edge_is_constrained_map(ecm)
.relax_constraints(smooth_features));
}
if (fpmap_valid)
{
PMP::connected_components(pmesh, fpmap, PMP::parameters::edge_is_constrained_map(eif));
poly_item->setItemIsMulticolor(true);
poly_item->show_feature_edges(true);
}
else
poly_item->setItemIsMulticolor(false);
poly_item->invalidateOpenGLBuffers();
Q_EMIT poly_item->itemChanged();
}
else{
std::cout << "Can't remesh that type of thing" << std::endl;
}
std::cout << "ok (" << time.elapsed() << " ms)" << std::endl;
// default cursor
QApplication::restoreOverrideCursor();
}
}
void isotropic_remeshing_of_several_polyhedra()
{
// Remeshing parameters
bool edges_only = false, preserve_duplicates = false;
double target_length = 0.;
unsigned int nb_iter = 1;
bool protect = false;
bool smooth_features = true;
std::vector<Scene_facegraph_item*> selection;
for(int index : scene->selectionIndices())
{
Scene_facegraph_item* poly_item =
qobject_cast<Scene_facegraph_item*>(scene->item(index));
if (poly_item == NULL)
{
std::cout << scene->item(index)->name().data()
<< " is not a FaceGraph, remeshing skipped\n";
continue;
}
else
{
selection.push_back(poly_item);
if (target_length == 0.)//parameters have not been set yet
{
QDialog dialog(mw);
Ui::Isotropic_remeshing_dialog ui = remeshing_dialog(&dialog, poly_item);
ui.objectName->setText(QString::number(scene->selectionIndices().size())
.append(QString(" items to be remeshed")));
int i = dialog.exec();
if (i == QDialog::Rejected)
{
std::cout << "Remeshing aborted" << std::endl;
return;
}
edges_only = ui.splitEdgesOnly_checkbox->isChecked();
preserve_duplicates = ui.preserveDuplicates_checkbox->isChecked();
target_length = ui.edgeLength_dspinbox->value();
nb_iter = ui.nbIterations_spinbox->value();
protect = ui.protect_checkbox->isChecked();
smooth_features = ui.smooth1D_checkbox->isChecked();
}
}
}
if(target_length == 0.)//parameters have not been set
{ // i.e. no item is a polyhedron
std::cout << "Remeshing aborted" << std::endl;
return;
}
// wait cursor
QApplication::setOverrideCursor(Qt::WaitCursor);
int total_time = 0;
// typedef boost::graph_traits<FaceGraph>::edge_descriptor edge_descriptor;
std::map<FaceGraph*,Edge_set > edges_to_protect;
if(preserve_duplicates)
detect_and_split_duplicates(selection, edges_to_protect, target_length);
#ifdef CGAL_LINKED_WITH_TBB
QElapsedTimer time;
time.start();
tbb::parallel_for(
tbb::blocked_range<std::size_t>(0, selection.size()),
Remesh_polyhedron_item_for_parallel_for<Remesh_polyhedron_item>(
selection, edges_to_protect, edges_only, target_length, nb_iter, protect, smooth_features));
total_time = time.elapsed();
#else
Remesh_polyhedron_item remesher(edges_only,
target_length, nb_iter, protect, smooth_features);
for(Scene_facegraph_item* poly_item : selection)
{
QElapsedTimer time;
time.start();
remesher(poly_item, edges_to_protect[poly_item->polyhedron()]);
total_time += time.elapsed();
std::cout << "Remeshing of " << poly_item->name().data()
<< " done in " << time.elapsed() << " ms" << std::endl;
}
#endif
std::cout << "Remeshing of all selected items done in "
<< total_time << " ms" << std::endl;
for(Scene_facegraph_item* poly_item : selection)
{
//destroys the patch_id_map for the Surface_mesh_item to avoid assertions.
poly_item->resetColors();
poly_item->invalidateOpenGLBuffers();
Q_EMIT poly_item->itemChanged();
}
// default cursor
QApplication::restoreOverrideCursor();
}
private:
Scene_interface *scene;
QMainWindow* mw;
struct Remesh_polyhedron_item
{
typedef boost::graph_traits<FaceGraph>::edge_descriptor edge_descriptor;
typedef boost::graph_traits<FaceGraph>::halfedge_descriptor halfedge_descriptor;
typedef boost::graph_traits<FaceGraph>::face_descriptor face_descriptor;
bool edges_only_;
double target_length_;
unsigned int nb_iter_;
bool protect_;
bool smooth_features_;
protected:
void remesh(Scene_facegraph_item* poly_item,
Edge_set& edges_to_protect) const
{
//fill face_index property map
if (edges_only_)
{
std::vector<halfedge_descriptor> border;
CGAL::Polygon_mesh_processing::border_halfedges(
faces(*poly_item->polyhedron())
, *poly_item->polyhedron()
, std::back_inserter(border));
std::vector<edge_descriptor> border_edges;
for(halfedge_descriptor h : border)
border_edges.push_back(edge(h, *poly_item->polyhedron()));
CGAL::Polygon_mesh_processing::split_long_edges(
border_edges
, target_length_
, *poly_item->polyhedron());
}
else
{
std::cout << "Isotropic remeshing of "
<< poly_item->name().toStdString() << " started..." << std::endl;
Scene_polyhedron_selection_item::Is_constrained_map<Edge_set> ecm(&edges_to_protect);
CGAL::Polygon_mesh_processing::isotropic_remeshing(
faces(*poly_item->polyhedron())
, target_length_
, *poly_item->polyhedron()
, CGAL::Polygon_mesh_processing::parameters::number_of_iterations(nb_iter_)
.protect_constraints(protect_)
.edge_is_constrained_map(ecm)
.face_patch_map(get(CGAL::face_patch_id_t<int>(), *poly_item->polyhedron()))
.relax_constraints(smooth_features_));
std::cout << "Isotropic remeshing of "
<< poly_item->name().toStdString() << " done." << std::endl;
}
}
public:
Remesh_polyhedron_item(
const bool edges_only,
const double target_length,
const unsigned int nb_iter,
const bool protect,
const bool smooth_features)
: edges_only_(edges_only)
, target_length_(target_length)
, nb_iter_(nb_iter)
, protect_(protect)
, smooth_features_(smooth_features)
{}
Remesh_polyhedron_item(const Remesh_polyhedron_item& remesh)
: edges_only_(remesh.edges_only_)
, target_length_(remesh.target_length_)
, nb_iter_(remesh.nb_iter_)
, protect_(remesh.protect_)
, smooth_features_(remesh.smooth_features_)
{}
void operator()(Scene_facegraph_item* poly_item,
Edge_set& edges_to_protect) const
{
remesh(poly_item, edges_to_protect);
}
};
#ifdef CGAL_LINKED_WITH_TBB
template<typename RemeshFunctor>
struct Remesh_polyhedron_item_for_parallel_for
: RemeshFunctor
{
const std::vector<Scene_facegraph_item*>& selection_;
std::map<FaceGraph*,Edge_set >& edges_to_protect_;
public:
// Constructor
Remesh_polyhedron_item_for_parallel_for(
const std::vector<Scene_facegraph_item*>& selection,
std::map<FaceGraph*,Edge_set >& edges_to_protect,
const bool edges_only,
const double target_length,
const unsigned int nb_iter,
const bool protect,
const bool smooth_features)
: RemeshFunctor(edges_only, target_length, nb_iter, protect, smooth_features)
, selection_(selection), edges_to_protect_(edges_to_protect)
{}
// Constructor
Remesh_polyhedron_item_for_parallel_for(
const Remesh_polyhedron_item_for_parallel_for &remesh)
: RemeshFunctor(remesh)
, selection_(remesh.selection_)
, edges_to_protect_(remesh.edges_to_protect_)
{}
// operator()
void operator()(const tbb::blocked_range<size_t>& r) const
{
for (size_t i = r.begin(); i != r.end(); ++i)
RemeshFunctor::remesh(selection_[i], edges_to_protect_[selection_[i]->polyhedron()]);
}
};
#endif
Ui::Isotropic_remeshing_dialog
remeshing_dialog(QDialog* dialog,
Scene_facegraph_item* poly_item,
Scene_polyhedron_selection_item* selection_item = NULL)
{
Ui::Isotropic_remeshing_dialog ui;
ui.setupUi(dialog);
connect(ui.buttonBox, SIGNAL(accepted()), dialog, SLOT(accept()));
connect(ui.buttonBox, SIGNAL(rejected()), dialog, SLOT(reject()));
//connect checkbox to spinbox
connect(ui.splitEdgesOnly_checkbox, SIGNAL(toggled(bool)),
ui.nbIterations_spinbox, SLOT(setDisabled(bool)));
connect(ui.splitEdgesOnly_checkbox, SIGNAL(toggled(bool)),
ui.protect_checkbox, SLOT(setDisabled(bool)));
connect(ui.protect_checkbox, SIGNAL(toggled(bool)),
ui.smooth1D_checkbox, SLOT(setDisabled(bool)));
connect(ui.splitEdgesOnly_checkbox, SIGNAL(toggled(bool)),
ui.smooth1D_checkbox, SLOT(setDisabled(bool)));
connect(ui.preserveDuplicates_checkbox, SIGNAL(toggled(bool)),
ui.protect_checkbox, SLOT(setChecked(bool)));
connect(ui.preserveDuplicates_checkbox, SIGNAL(toggled(bool)),
ui.protect_checkbox, SLOT(setDisabled(bool)));
//Set default parameters
Scene_interface::Bbox bbox = poly_item != NULL ? poly_item->bbox()
: (selection_item != NULL ? selection_item->bbox()
: scene->bbox());
ui.objectName->setText(poly_item != NULL ? poly_item->name()
: (selection_item != NULL ? selection_item->name()
: QString("Remeshing parameters")));
ui.objectNameSize->setText(
tr("Object bbox size (w,h,d): <b>%1</b>, <b>%2</b>, <b>%3</b>")
.arg(bbox.xmax()-bbox.xmin(), 0, 'g', 3)
.arg(bbox.ymax()-bbox.ymin(), 0, 'g', 3)
.arg(bbox.zmax()-bbox.zmin(), 0, 'g', 3));
double diago_length = CGAL::sqrt((bbox.xmax()-bbox.xmin())*(bbox.xmax()-bbox.xmin())
+ (bbox.ymax()-bbox.ymin())*(bbox.ymax()-bbox.ymin())
+ (bbox.zmax()-bbox.zmin())*(bbox.zmax()-bbox.zmin()));
double log = std::log10(diago_length);
unsigned int nb_decimals = (log > 0) ? 5 : (std::ceil(-log)+3);
ui.edgeLength_dspinbox->setDecimals(nb_decimals);
ui.edgeLength_dspinbox->setSingleStep(1e-3);
ui.edgeLength_dspinbox->setRange(1e-6 * diago_length, //min
2. * diago_length);//max
ui.edgeLength_dspinbox->setValue(0.05 * diago_length);
std::ostringstream oss;
oss << "Diagonal length of the Bbox of the selection to remesh is ";
oss << diago_length << "." << std::endl;
oss << "Default is 5% of it" << std::endl;
ui.edgeLength_dspinbox->setToolTip(QString::fromStdString(oss.str()));
ui.nbIterations_spinbox->setSingleStep(1);
ui.nbIterations_spinbox->setRange(1/*min*/, 1000/*max*/);
ui.nbIterations_spinbox->setValue(1);
ui.protect_checkbox->setChecked(false);
ui.smooth1D_checkbox->setChecked(true);
if (NULL != selection_item)
{
//do not preserve duplicates in selection mode
ui.preserveDuplicates_checkbox->setDisabled(true);
ui.preserveDuplicates_checkbox->setChecked(false);
}
return ui;
}
private:
QAction* actionIsotropicRemeshing_;
}; // end Polyhedron_demo_isotropic_remeshing_plugin
#include "Isotropic_remeshing_plugin.moc"
|
If $f$ is holomorphic on every ball centered at $c$, then $f$ is holomorphic on the whole complex plane. |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the library KASKADE 7 */
/* see http://www.zib.de/projects/kaskade7-finite-element-toolbox */
/* */
/* Copyright (C) 2002-2009 Zuse Institute Berlin */
/* */
/* KASKADE 7 is distributed under the terms of the ZIB Academic License. */
/* see $KASKADE/academic.txt */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef SCHURBLOCKLU_SOLVER_HH
#define SCHURBLOCKLU_SOLVER_HH
/**
* @file
* @brief Routines for the solution of (sparse) linear systems
* @author Anton Schiela
*/
#include <iostream>
#include <memory> // std::unique_ptr
#include "dune/common/fmatrix.hh"
#include "dune/istl/matrix.hh"
#include "linalg/umfpack_solve.hh"
#include "linalg/linearsystem.hh"
#include "linalg/simpleLAPmatrix.hh"
#include <boost/timer/timer.hpp>
namespace Kaskade
{
/** \ingroup linalg
*\ brief Adapter class for DUNE::IterativeSolver
*/
void printVec(std::vector<double> const&v, int vend=1000000);
template<class Factorization=UMFFactorization<double> >
class BlockLUFactorization
{
public:
/// needs a matrix
static const bool needMatrix = true;
template<class Sys>
BlockLUFactorization(Sys const& lin, int start2, int end2, int start3, int end3)
{
MatrixAsTriplet<double> matL;
lin.getMatrixBlocks(matL,start3,end3,start2,end2);
matA.resize(0);
lin.getMatrixBlocks(matA,start2,end2,start2,end2);
std::cout << "Inner, " << std::flush;
boost::timer::cpu_timer timer;
factoredL.reset(new Factorization(matL.nrows(),
2,
matL.ridx,
matL.cidx,
matL.data,
MatrixProperties::GENERAL));
std::cout << "Finished: " << (double)(timer.elapsed().user)/1e9 << " sec." << std::endl;
}
void resetBlock22(MatrixAsTriplet<double>const & matA_) { matA = matA_; };
void solve(std::vector<double>const& rhs, std::vector<double>& sol, int nr=1);
MatrixAsTriplet<double> matA;
std::unique_ptr<Factorization> factoredL;
};
struct BlockSchurParameters
{
BlockSchurParameters(bool reg_)
: refactorizeInner(true), refactorizeOuter(true)
{
if(reg_) regularizationMethod=AddId; else regularizationMethod=None;
}
BlockSchurParameters(bool reg_, bool innerf_, bool outerf_)
: refactorizeInner(innerf_), refactorizeOuter(outerf_)
{
if(reg_) regularizationMethod=AddId; else regularizationMethod=None;
}
bool refactorizeInner, refactorizeOuter;
typedef enum { None=0, AddId=1, CG = 2} RegularizationMethod;
RegularizationMethod regularizationMethod;
};
/// Solver, which is especially designed for the hyperthermia planning problem.
template<class Factorization>
class DirectBlockSchurSolver
{
public:
int start1, end1, start2, end2, start3, end3;
/// needs a matrix
static const bool needMatrix = true;
DirectBlockSchurSolver(bool doregularize = false) :
start1(0), end1(1),
start2(1), end2(2),
start3(2), end3(3),
report(false), paras(doregularize)
{}
void ax(std::vector<double>& sol, std::vector<double>const &r) const;
void resolve(std::vector<double>& sol, SparseLinearSystem const& lin) const;
void resolveAdjAndNormal(std::vector<double>& sol, SparseLinearSystem const& lin) const;
void resolveNormal(std::vector<double>& sol, SparseLinearSystem const& lin,std::vector<double>const *addrhs=0);
void solve(std::vector<double>& sol,
SparseLinearSystem const& lin);
void solveAdjAndNormal(std::vector<double>& sol,
SparseLinearSystem const& lin);
void solveTCG(std::vector<double>& sol1, std::vector<double>& sol2,
SparseLinearSystem const& linT, SparseLinearSystem const& linN, std::vector<double>const & normalStep, double nu0);
/// Solves always exactly
void setRelativeAccuracy(double) {}
/// Always exact solution
double getRelativeAccuracy() {return 0.0;}
void onChangedLinearization() {flushFactorization(); }
void flushFactorization()
{
if(factorization.get() && paras.refactorizeInner) factorization.reset();
mC.setSize(0,0);
B.resize(0);
AinvB.resize(0);
matANormal.flush();
}
bool report;
/// Always exact solution
double getAbsoluteAccuracy() {return 0.0;}
bool improvementPossible() { return false; }
void resetParameters(BlockSchurParameters const& p_) { paras=p_; }
private:
void resolveN(std::vector<double>& sol, std::vector<double>const &r,std::vector<double>const &s,std::vector<double>const &t) const;
void tsolve(std::vector<double>& sol1,std::vector<double>& sol2,
std::vector<double>const &r,std::vector<double>const &s,std::vector<double>const &t) const;
void fwd(std::vector<double>& sol, std::vector<double>const &r,std::vector<double>const &s,std::vector<double>const &t) const;
void bwd(std::vector<double>& sol, std::vector<double>const &x2,std::vector<double>const &s,std::vector<double>const &t) const;
void buildNewSchurComplement(SparseLinearSystem const& lin,int task);
std::unique_ptr<BlockLUFactorization<Factorization> > factorization;
MatrixAsTriplet<double> matANormal;
Dune::Matrix<Dune::FieldMatrix<double,1,1> > mC,mCNormal;
std::vector<double> B,AinvB;
int rowsB, colsBC, rowsC;
int rows1, rows2, rows3;
BlockSchurParameters paras;
};
class ModifiedSparseSystem : public SparseLinearSystem
{
public:
ModifiedSparseSystem(SparseLinearSystem const &lin_, MatrixAsTriplet<double> const& mat_, MatrixAsTriplet<double> const& mat2_
, std::vector<double> const& scaling_) :
scaling(scaling_), lin(&lin_), mat(mat_), mat2(mat2_)
{
}
virtual int rows(int rbegin, int rend) const { return lin->rows(rbegin,rend);}
virtual int cols(int colbegin, int colend) const { return lin->cols(colbegin,colend);}
/// Return matrix blocks of the linear system in triplet format
virtual void getMatrixBlocks(MatrixAsTriplet<double>& m, int rbegin, int rend, int colbegin, int colend) const
{
lin->getMatrixBlocks(m,rbegin,rend,colbegin,colend);
MatrixAsTriplet<double> mm(mat);
mm *= -1.0;
if(rbegin==0 && colbegin == 0) m+=mm;
}
void resetLin(SparseLinearSystem const& lin_)
{
lin=&lin_;
}
/// value of function
virtual double getValue() const { return lin->getValue();}
/// Return components of the right hand side of the linear system
virtual void getRHSBlocks(std::vector<double>& rhs, int rbegin, int rend) const
{
rhs.resize(0);
lin->getRHSBlocks(rhs,rbegin,rend);
if(rbegin==0)
{
std::vector<double> t(lin->rows(3,4),0.0);
std::vector<double> Mt(lin->rows(0,1),0.0);
lin->getRHSBlocks(t,3,4);
for(int i=0; i<t.size();++i)
{
t[i] *=scaling[i];
}
mat2.ax(Mt,t);
for(int i=0; i<Mt.size();++i)
{
// std::cout << Mt[i] << std::endl;
}
for(int i=0; i<lin->rows(0,1); ++i)
rhs[i] -= Mt[i];
}
}
/// number of column blocks
virtual int nColBlocks() const { return lin->nColBlocks();};
/// number of row blocks
virtual int nRowBlocks() const { return lin->nRowBlocks();};
private:
std::vector<double> scaling;
SparseLinearSystem const* lin;
MatrixAsTriplet<double> mat;
MatrixAsTriplet<double> mat2;
};
/// Solver, which is especially designed for the hyperthermia planning problem with amplitude ratio
template<class Factorization>
class ARDirectBlockSchurSolver
{
public:
/// needs a matrix
static const bool needMatrix = true;
ARDirectBlockSchurSolver(bool doregularize = false) : report(false), DBSSolver(doregularize), justsolved(false)
{
}
void resolve(std::vector<double>& sol, SparseLinearSystem const& lin) const;
void solve(std::vector<double>& sol,
SparseLinearSystem const& lin);
/// Solves always exactly
void setRelativeAccuracy(double) {}
/// Always exact solution
double getRelativeAccuracy() {return 0.0;}
void onChangedLinearization() {flushFactorization(); }
void flushFactorization()
{
DBSSolver.flushFactorization();
F.setSize(0,0);
FT.setSize(0,0);
FTVinv.setSize(0,0);
FTVinvF.setSize(0,0);
Vinv.setSize(0,0);
}
bool report;
/// Always exact solution
double getAbsoluteAccuracy() {return 0.0;}
bool improvementPossible() { return false; }
private:
std::vector<double> scaling;
DirectBlockSchurSolver<Factorization> DBSSolver;
Dune::Matrix<Dune::FieldMatrix<double,1,1> > Vinv,F,FT, FTVinv, FTVinvF;
std::unique_ptr<MatrixAsTriplet<double> > L,FTVi;
std::unique_ptr<ModifiedSparseSystem> linMod;
bool justsolved;
};
} // namespace Kaskade
#endif
|
In the 2000 film Hey Ram , the lead character , Saket Ram ( played by Kamal Haasan ) and his friend , Amjad Khan ( played by Shah Rukh Khan ) are shown as employees of Wheeler , who was portrayed by Lewis K. <unk> , before the 1947 Hindu @-@ Muslim riots . In a 2003 volume of the South Asian Studies journal , Sudeshna Gusha published a research article examining Wheeler 's use of photography in his excavations and publications in the Indian subcontinent . In 2011 , the academic journal Public Archaeology published a research paper by Moshenska and Schadla @-@ Hall that analysed Wheeler 's role in presenting archaeology to the British public . Two years later , the Papers from the Institute of Archaeology issued a short comic strip by Moshenska and Alex <unk> depicting Wheeler 's activities in studying the archaeology of Libya during World War II .
|
Require Import hdl.
Record semantics (G C M : Type) : Type :=
mkSemantics {
at_external : C -> ee -> Prop;
after_external : C -> C;
step : G -> C*M -> C*M (*bigstep*)
}.
Definition hdl_process : semantics st st env :=
mkSemantics
_ _ _
guarded
unguard
(fun restart p =>
let (c,m) := p in
match interp m c with
| (m', stskip) => (restart, m')
| (m', c') => (c', m')
end).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.