text
stringlengths 0
3.34M
|
---|
lemma sigma_sets_image: assumes S: "S \<in> sigma_sets \<Omega> M" and "M \<subseteq> Pow \<Omega>" "f ` \<Omega> = \<Omega>" "inj_on f \<Omega>" and M: "\<And>y. y \<in> M \<Longrightarrow> f ` y \<in> M" shows "(f ` S) \<in> sigma_sets \<Omega> M" |
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Johannes HΓΆlzl, Mario Carneiro
-/
import data.int.basic
/-!
# Square root of natural numbers
This file defines an efficient binary implementation of the square root function that returns the
unique `r` such that `r * r β€ n < (r + 1) * (r + 1)`. It takes advantage of the binary
representation by replacing the multiplication by 2 appearing in
`(a + b)^2 = a^2 + 2 * a * b + b^2` by a bitmask manipulation.
## Reference
See [Wikipedia, *Methods of computing square roots*]
[https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_(base_2)].
-/
namespace nat
theorem sqrt_aux_dec {b} (h : b β 0) : shiftr b 2 < b :=
begin
simp only [shiftr_eq_div_pow],
apply (nat.div_lt_iff_lt_mul' (dec_trivial : 0 < 4)).2,
have := nat.mul_lt_mul_of_pos_left
(dec_trivial : 1 < 4) (nat.pos_of_ne_zero h),
rwa mul_one at this
end
/-- Auxiliary function for `nat.sqrt`. See e.g.
<https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_(base_2)> -/
def sqrt_aux : β β β β β β β
| b r n := if b0 : b = 0 then r else
let b' := shiftr b 2 in
have b' < b, from sqrt_aux_dec b0,
match (n - (r + b : β) : β€) with
| (n' : β) := sqrt_aux b' (div2 r + b) n'
| _ := sqrt_aux b' (div2 r) n
end
/-- `sqrt n` is the square root of a natural number `n`. If `n` is not a
perfect square, it returns the largest `k:β` such that `k*k β€ n`. -/
@[pp_nodot] def sqrt (n : β) : β :=
match size n with
| 0 := 0
| succ s := sqrt_aux (shiftl 1 (bit0 (div2 s))) 0 n
end
theorem sqrt_aux_0 (r n) : sqrt_aux 0 r n = r :=
by rw sqrt_aux; simp
local attribute [simp] sqrt_aux_0
theorem sqrt_aux_1 {r n b} (h : b β 0) {n'} (hβ : r + b + n' = n) :
sqrt_aux b r n = sqrt_aux (shiftr b 2) (div2 r + b) n' :=
by rw sqrt_aux; simp only [h, hβ.symm, int.coe_nat_add, if_false];
rw [add_comm _ (n':β€), add_sub_cancel, sqrt_aux._match_1]
theorem sqrt_aux_2 {r n b} (h : b β 0) (hβ : n < r + b) :
sqrt_aux b r n = sqrt_aux (shiftr b 2) (div2 r) n :=
begin
rw sqrt_aux; simp only [h, hβ, if_false],
cases int.eq_neg_succ_of_lt_zero
(sub_lt_zero.2 (int.coe_nat_lt_coe_nat_of_lt hβ)) with k e,
rw [e, sqrt_aux._match_1]
end
private def is_sqrt (n q : β) : Prop := q*q β€ n β§ n < (q+1)*(q+1)
local attribute [-simp] mul_eq_mul_left_iff mul_eq_mul_right_iff
private lemma sqrt_aux_is_sqrt_lemma (m r n : β)
(hβ : r*r β€ n)
(m') (hm : shiftr (2^m * 2^m) 2 = m')
(H1 : n < (r + 2^m) * (r + 2^m) β
is_sqrt n (sqrt_aux m' (r * 2^m) (n - r * r)))
(H2 : (r + 2^m) * (r + 2^m) β€ n β
is_sqrt n (sqrt_aux m' ((r + 2^m) * 2^m) (n - (r + 2^m) * (r + 2^m)))) :
is_sqrt n (sqrt_aux (2^m * 2^m) ((2*r)*2^m) (n - r*r)) :=
begin
have b0 :=
have b0:_, from ne_of_gt (pow_pos (show 0 < 2, from dec_trivial) m),
nat.mul_ne_zero b0 b0,
have lb : n - r * r < 2 * r * 2^m + 2^m * 2^m β
n < (r+2^m)*(r+2^m),
{ rw [tsub_lt_iff_right hβ],
simp [left_distrib, right_distrib, two_mul, mul_comm, mul_assoc,
add_comm, add_assoc, add_left_comm] },
have re : div2 (2 * r * 2^m) = r * 2^m,
{ rw [div2_val, mul_assoc,
nat.mul_div_cancel_left _ (dec_trivial:2>0)] },
cases lt_or_ge n ((r+2^m)*(r+2^m)) with hl hl,
{ rw [sqrt_aux_2 b0 (lb.2 hl), hm, re], apply H1 hl },
{ cases le.dest hl with n' e,
rw [@sqrt_aux_1 (2 * r * 2^m) (n-r*r) (2^m * 2^m) b0 (n - (r + 2^m) * (r + 2^m)),
hm, re, β right_distrib],
{ apply H2 hl },
apply eq.symm, apply tsub_eq_of_eq_add_rev,
rw [β add_assoc, (_ : r*r + _ = _)],
exact (add_tsub_cancel_of_le hl).symm,
simp [left_distrib, right_distrib, two_mul, mul_comm, mul_assoc, add_assoc] },
end
private lemma sqrt_aux_is_sqrt (n) : β m r,
r*r β€ n β n < (r + 2^(m+1)) * (r + 2^(m+1)) β
is_sqrt n (sqrt_aux (2^m * 2^m) (2*r*2^m) (n - r*r))
| 0 r hβ hβ := by apply sqrt_aux_is_sqrt_lemma 0 r n hβ 0 rfl;
intro h; simp; [exact β¨hβ, hβ©, exact β¨h, hββ©]
| (m+1) r hβ hβ := begin
apply sqrt_aux_is_sqrt_lemma
(m+1) r n hβ (2^m * 2^m)
(by simp [shiftr, pow_succ, div2_val, mul_comm, mul_left_comm];
repeat {rw @nat.mul_div_cancel_left _ 2 dec_trivial});
intro h,
{ have := sqrt_aux_is_sqrt m r hβ h,
simpa [pow_succ, mul_comm, mul_assoc] },
{ rw [pow_succ', mul_two, β add_assoc] at hβ,
have := sqrt_aux_is_sqrt m (r + 2^(m+1)) h hβ,
rwa show (r + 2^(m + 1)) * 2^(m+1) = 2 * (r + 2^(m + 1)) * 2^m,
by simp [pow_succ, mul_comm, mul_left_comm] }
end
private lemma sqrt_is_sqrt (n : β) : is_sqrt n (sqrt n) :=
begin
generalize e : size n = s, cases s with s; simp [e, sqrt],
{ rw [size_eq_zero.1 e, is_sqrt], exact dec_trivial },
{ have := sqrt_aux_is_sqrt n (div2 s) 0 (zero_le _),
simp [show 2^div2 s * 2^div2 s = shiftl 1 (bit0 (div2 s)), by
{ generalize: div2 s = x,
change bit0 x with x+x,
rw [one_shiftl, pow_add] }] at this,
apply this,
rw [β pow_add, β mul_two], apply size_le.1,
rw e, apply (@div_lt_iff_lt_mul _ _ 2 dec_trivial).1,
rw [div2_val], apply lt_succ_self }
end
theorem sqrt_le (n : β) : sqrt n * sqrt n β€ n :=
(sqrt_is_sqrt n).left
theorem sqrt_le' (n : β) : (sqrt n) ^ 2 β€ n :=
eq.trans_le (sq (sqrt n)) (sqrt_le n)
theorem lt_succ_sqrt (n : β) : n < succ (sqrt n) * succ (sqrt n) :=
(sqrt_is_sqrt n).right
theorem lt_succ_sqrt' (n : β) : n < (succ (sqrt n)) ^ 2 :=
trans_rel_left (Ξ» i j, i < j) (lt_succ_sqrt n) (sq (succ (sqrt n))).symm
theorem sqrt_le_add (n : β) : n β€ sqrt n * sqrt n + sqrt n + sqrt n :=
by rw β succ_mul; exact le_of_lt_succ (lt_succ_sqrt n)
theorem le_sqrt {m n : β} : m β€ sqrt n β m*m β€ n :=
β¨Ξ» h, le_trans (mul_self_le_mul_self h) (sqrt_le n),
Ξ» h, le_of_lt_succ $ mul_self_lt_mul_self_iff.2 $
lt_of_le_of_lt h (lt_succ_sqrt n)β©
theorem le_sqrt' {m n : β} : m β€ sqrt n β m ^ 2 β€ n :=
by simpa only [pow_two] using le_sqrt
theorem sqrt_lt {m n : β} : sqrt m < n β m < n*n :=
lt_iff_lt_of_le_iff_le le_sqrt
theorem sqrt_lt' {m n : β} : sqrt m < n β m < n ^ 2 :=
lt_iff_lt_of_le_iff_le le_sqrt'
theorem sqrt_le_self (n : β) : sqrt n β€ n :=
le_trans (le_mul_self _) (sqrt_le n)
theorem sqrt_le_sqrt {m n : β} (h : m β€ n) : sqrt m β€ sqrt n :=
le_sqrt.2 (le_trans (sqrt_le _) h)
@[simp] lemma sqrt_zero : sqrt 0 = 0 :=
by rw [sqrt, size_zero, sqrt._match_1]
theorem sqrt_eq_zero {n : β} : sqrt n = 0 β n = 0 :=
β¨Ξ» h, nat.eq_zero_of_le_zero $ le_of_lt_succ $ (@sqrt_lt n 1).1 $
by rw [h]; exact dec_trivial,
by { rintro rfl, simp }β©
theorem eq_sqrt {n q} : q = sqrt n β q*q β€ n β§ n < (q+1)*(q+1) :=
β¨Ξ» e, e.symm βΈ sqrt_is_sqrt n,
Ξ» β¨hβ, hββ©, le_antisymm (le_sqrt.2 hβ) (le_of_lt_succ $ sqrt_lt.2 hβ)β©
theorem eq_sqrt' {n q} : q = sqrt n β q ^ 2 β€ n β§ n < (q+1) ^ 2 :=
by simpa only [pow_two] using eq_sqrt
theorem le_three_of_sqrt_eq_one {n : β} (h : sqrt n = 1) : n β€ 3 :=
le_of_lt_succ $ (@sqrt_lt n 2).1 $
by rw [h]; exact dec_trivial
theorem sqrt_lt_self {n : β} (h : 1 < n) : sqrt n < n :=
sqrt_lt.2 $ by
have := nat.mul_lt_mul_of_pos_left h (lt_of_succ_lt h);
rwa [mul_one] at this
theorem sqrt_pos {n : β} : 0 < sqrt n β 0 < n := le_sqrt
theorem sqrt_add_eq (n : β) {a : β} (h : a β€ n + n) : sqrt (n*n + a) = n :=
le_antisymm
(le_of_lt_succ $ sqrt_lt.2 $ by rw [succ_mul, mul_succ, add_succ, add_assoc];
exact lt_succ_of_le (nat.add_le_add_left h _))
(le_sqrt.2 $ nat.le_add_right _ _)
theorem sqrt_add_eq' (n : β) {a : β} (h : a β€ n + n) : sqrt (n ^ 2 + a) = n :=
(congr_arg (Ξ» i, sqrt (i + a)) (sq n)).trans (sqrt_add_eq n h)
theorem sqrt_eq (n : β) : sqrt (n*n) = n :=
sqrt_add_eq n (zero_le _)
theorem sqrt_eq' (n : β) : sqrt (n ^ 2) = n :=
sqrt_add_eq' n (zero_le _)
theorem exists_mul_self (x : β) :
(β n, n * n = x) β sqrt x * sqrt x = x :=
β¨Ξ» β¨n, hnβ©, by rw [β hn, sqrt_eq], Ξ» h, β¨sqrt x, hβ©β©
theorem exists_mul_self' (x : β) :
(β n, n ^ 2 = x) β (sqrt x) ^ 2 = x :=
by simpa only [pow_two] using exists_mul_self x
theorem sqrt_mul_sqrt_lt_succ (n : β) : sqrt n * sqrt n < n + 1 :=
lt_succ_iff.mpr (sqrt_le _)
theorem sqrt_mul_sqrt_lt_succ' (n : β) : (sqrt n) ^ 2 < n + 1 :=
lt_succ_iff.mpr (sqrt_le' _)
theorem succ_le_succ_sqrt (n : β) : n + 1 β€ (sqrt n + 1) * (sqrt n + 1) :=
le_of_pred_lt (lt_succ_sqrt _)
theorem succ_le_succ_sqrt' (n : β) : n + 1 β€ (sqrt n + 1) ^ 2 :=
le_of_pred_lt (lt_succ_sqrt' _)
/-- There are no perfect squares strictly between mΒ² and (m+1)Β² -/
theorem not_exists_sq {n m : β} (hl : m * m < n) (hr : n < (m + 1) * (m + 1)) :
Β¬ β t, t * t = n :=
begin
rintro β¨t, rflβ©,
have h1 : m < t, from nat.mul_self_lt_mul_self_iff.mpr hl,
have h2 : t < m + 1, from nat.mul_self_lt_mul_self_iff.mpr hr,
exact (not_lt_of_ge $ le_of_lt_succ h2) h1
end
theorem not_exists_sq' {n m : β} (hl : m ^ 2 < n) (hr : n < (m + 1) ^ 2) :
Β¬ β t, t ^ 2 = n :=
by simpa only [pow_two]
using not_exists_sq (by simpa only [pow_two] using hl) (by simpa only [pow_two] using hr)
end nat
|
/-
Copyright (c) 2015 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Robert Y. Lewis
-/
import algebra.order.ring
import algebra.group_power.basic
/-!
# Lemmas about the interaction of power operations with order
Note that some lemmas are in `algebra/group_power/lemmas.lean` as they import files which
depend on this file.
-/
variables {A G M R : Type*}
section preorder
variables [monoid M] [preorder M] [covariant_class M M (*) (β€)]
@[to_additive nsmul_le_nsmul_of_le_right, mono]
lemma pow_le_pow_of_le_left' [covariant_class M M (function.swap (*)) (β€)]
{a b : M} (hab : a β€ b) : β i : β, a ^ i β€ b ^ i
| 0 := by simp
| (k+1) := by { rw [pow_succ, pow_succ],
exact mul_le_mul' hab (pow_le_pow_of_le_left' k) }
attribute [mono] nsmul_le_nsmul_of_le_right
@[to_additive nsmul_nonneg]
theorem one_le_pow_of_one_le' {a : M} (H : 1 β€ a) : β n : β, 1 β€ a ^ n
| 0 := by simp
| (k + 1) := by { rw pow_succ, exact one_le_mul H (one_le_pow_of_one_le' k) }
@[to_additive nsmul_nonpos]
theorem pow_le_one' {a : M} (H : a β€ 1) (n : β) : a ^ n β€ 1 :=
@one_le_pow_of_one_le' (order_dual M) _ _ _ _ H n
@[to_additive nsmul_le_nsmul]
theorem pow_le_pow' {a : M} {n m : β} (ha : 1 β€ a) (h : n β€ m) : a ^ n β€ a ^ m :=
let β¨k, hkβ© := nat.le.dest h in
calc a ^ n β€ a ^ n * a ^ k : le_mul_of_one_le_right' (one_le_pow_of_one_le' ha _)
... = a ^ m : by rw [β hk, pow_add]
@[to_additive nsmul_le_nsmul_of_nonpos]
theorem pow_le_pow_of_le_one' {a : M} {n m : β} (ha : a β€ 1) (h : n β€ m) : a ^ m β€ a ^ n :=
@pow_le_pow' (order_dual M) _ _ _ _ _ _ ha h
@[to_additive nsmul_pos]
theorem one_lt_pow' {a : M} (ha : 1 < a) {k : β} (hk : k β 0) : 1 < a ^ k :=
begin
rcases nat.exists_eq_succ_of_ne_zero hk with β¨l, rflβ©,
clear hk,
induction l with l IH,
{ simpa using ha },
{ rw pow_succ,
exact one_lt_mul' ha IH }
end
@[to_additive nsmul_neg]
theorem pow_lt_one' {a : M} (ha : a < 1) {k : β} (hk : k β 0) : a ^ k < 1 :=
@one_lt_pow' (order_dual M) _ _ _ _ ha k hk
@[to_additive nsmul_lt_nsmul]
theorem pow_lt_pow' [covariant_class M M (*) (<)] {a : M} {n m : β} (ha : 1 < a) (h : n < m) :
a ^ n < a ^ m :=
begin
rcases nat.le.dest h with β¨k, rflβ©, clear h,
rw [pow_add, pow_succ', mul_assoc, β pow_succ],
exact lt_mul_of_one_lt_right' _ (one_lt_pow' ha k.succ_ne_zero)
end
end preorder
section linear_order
variables [monoid M] [linear_order M] [covariant_class M M (*) (β€)]
@[to_additive nsmul_nonneg_iff]
lemma one_le_pow_iff {x : M} {n : β} (hn : n β 0) : 1 β€ x ^ n β 1 β€ x :=
β¨le_imp_le_of_lt_imp_lt $ Ξ» h, pow_lt_one' h hn, Ξ» h, one_le_pow_of_one_le' h nβ©
@[to_additive nsmul_nonpos_iff]
lemma pow_le_one_iff {x : M} {n : β} (hn : n β 0) : x ^ n β€ 1 β x β€ 1 :=
@one_le_pow_iff (order_dual M) _ _ _ _ _ hn
@[to_additive nsmul_pos_iff]
lemma one_lt_pow_iff {x : M} {n : β} (hn : n β 0) : 1 < x ^ n β 1 < x :=
lt_iff_lt_of_le_iff_le (pow_le_one_iff hn)
@[to_additive nsmul_neg_iff]
lemma pow_lt_one_iff {x : M} {n : β} (hn : n β 0) : x ^ n < 1 β x < 1 :=
lt_iff_lt_of_le_iff_le (one_le_pow_iff hn)
@[to_additive nsmul_eq_zero_iff]
lemma pow_eq_one_iff {x : M} {n : β} (hn : n β 0) : x ^ n = 1 β x = 1 :=
by simp only [le_antisymm_iff, pow_le_one_iff hn, one_le_pow_iff hn]
end linear_order
section group
variables [group G] [preorder G] [covariant_class G G (*) (β€)]
@[to_additive zsmul_nonneg]
theorem one_le_zpow {x : G} (H : 1 β€ x) {n : β€} (hn : 0 β€ n) :
1 β€ x ^ n :=
begin
lift n to β using hn,
rw zpow_coe_nat,
apply one_le_pow_of_one_le' H,
end
end group
namespace canonically_ordered_comm_semiring
variables [canonically_ordered_comm_semiring R]
theorem pow_pos {a : R} (H : 0 < a) (n : β) : 0 < a ^ n :=
pos_iff_ne_zero.2 $ pow_ne_zero _ H.ne'
end canonically_ordered_comm_semiring
section ordered_semiring
variables [ordered_semiring R] {a x y : R} {n m : β}
@[simp] theorem pow_pos (H : 0 < a) : β (n : β), 0 < a ^ n
| 0 := by { nontriviality, rw pow_zero, exact zero_lt_one }
| (n+1) := by { rw pow_succ, exact mul_pos H (pow_pos _) }
@[simp] theorem pow_nonneg (H : 0 β€ a) : β (n : β), 0 β€ a ^ n
| 0 := by { rw pow_zero, exact zero_le_one}
| (n+1) := by { rw pow_succ, exact mul_nonneg H (pow_nonneg _) }
theorem pow_add_pow_le (hx : 0 β€ x) (hy : 0 β€ y) (hn : n β 0) : x ^ n + y ^ n β€ (x + y) ^ n :=
begin
rcases nat.exists_eq_succ_of_ne_zero hn with β¨k, rflβ©,
induction k with k ih, { simp only [pow_one] },
let n := k.succ,
have h1 := add_nonneg (mul_nonneg hx (pow_nonneg hy n)) (mul_nonneg hy (pow_nonneg hx n)),
have h2 := add_nonneg hx hy,
calc x^n.succ + y^n.succ
β€ x*x^n + y*y^n + (x*y^n + y*x^n) :
by { rw [pow_succ _ n, pow_succ _ n], exact le_add_of_nonneg_right h1 }
... = (x+y) * (x^n + y^n) :
by rw [add_mul, mul_add, mul_add, add_comm (y*x^n), β add_assoc,
β add_assoc, add_assoc (x*x^n) (x*y^n), add_comm (x*y^n) (y*y^n), β add_assoc]
... β€ (x+y)^n.succ :
by { rw [pow_succ _ n], exact mul_le_mul_of_nonneg_left (ih (nat.succ_ne_zero k)) h2 }
end
theorem pow_lt_pow_of_lt_left (Hxy : x < y) (Hxpos : 0 β€ x) (Hnpos : 0 < n) :
x ^ n < y ^ n :=
begin
cases lt_or_eq_of_le Hxpos,
{ rw β tsub_add_cancel_of_le (nat.succ_le_of_lt Hnpos),
induction (n - 1), { simpa only [pow_one] },
rw [pow_add, pow_add, nat.succ_eq_add_one, pow_one, pow_one],
apply mul_lt_mul ih (le_of_lt Hxy) h (le_of_lt (pow_pos (lt_trans h Hxy) _)) },
{ rw [βh, zero_pow Hnpos], apply pow_pos (by rwa βh at Hxy : 0 < y),}
end
lemma pow_lt_one (hβ : 0 β€ a) (hβ : a < 1) {n : β} (hn : n β 0) : a ^ n < 1 :=
(one_pow n).subst (pow_lt_pow_of_lt_left hβ hβ (nat.pos_of_ne_zero hn))
theorem strict_mono_on_pow (hn : 0 < n) : strict_mono_on (Ξ» x : R, x ^ n) (set.Ici 0) :=
Ξ» x hx y hy h, pow_lt_pow_of_lt_left h hx hn
theorem one_le_pow_of_one_le (H : 1 β€ a) : β (n : β), 1 β€ a ^ n
| 0 := by rw [pow_zero]
| (n+1) := by { rw pow_succ, simpa only [mul_one] using mul_le_mul H (one_le_pow_of_one_le n)
zero_le_one (le_trans zero_le_one H) }
lemma pow_mono (h : 1 β€ a) : monotone (Ξ» n : β, a ^ n) :=
monotone_nat_of_le_succ $ Ξ» n,
by { rw pow_succ, exact le_mul_of_one_le_left (pow_nonneg (zero_le_one.trans h) _) h }
theorem pow_le_pow (ha : 1 β€ a) (h : n β€ m) : a ^ n β€ a ^ m :=
pow_mono ha h
lemma strict_mono_pow (h : 1 < a) : strict_mono (Ξ» n : β, a ^ n) :=
have 0 < a := zero_le_one.trans_lt h,
strict_mono_nat_of_lt_succ $ Ξ» n, by simpa only [one_mul, pow_succ]
using mul_lt_mul h (le_refl (a ^ n)) (pow_pos this _) this.le
lemma pow_lt_pow (h : 1 < a) (h2 : n < m) : a ^ n < a ^ m :=
strict_mono_pow h h2
lemma pow_lt_pow_iff (h : 1 < a) : a ^ n < a ^ m β n < m :=
(strict_mono_pow h).lt_iff_lt
lemma strict_anti_pow (hβ : 0 < a) (hβ : a < 1) : strict_anti (Ξ» n : β, a ^ n) :=
strict_anti_nat_of_succ_lt $ Ξ» n,
by simpa only [pow_succ, one_mul] using mul_lt_mul hβ le_rfl (pow_pos hβ n) zero_le_one
lemma pow_lt_pow_iff_of_lt_one (hβ : 0 < a) (hβ : a < 1) : a ^ m < a ^ n β n < m :=
(strict_anti_pow hβ hβ).lt_iff_lt
lemma pow_lt_pow_of_lt_one (h : 0 < a) (ha : a < 1) {i j : β} (hij : i < j) : a ^ j < a ^ i :=
(pow_lt_pow_iff_of_lt_one h ha).2 hij
@[mono] lemma pow_le_pow_of_le_left {a b : R} (ha : 0 β€ a) (hab : a β€ b) : β i : β, a^i β€ b^i
| 0 := by simp
| (k+1) := by { rw [pow_succ, pow_succ],
exact mul_le_mul hab (pow_le_pow_of_le_left _) (pow_nonneg ha _) (le_trans ha hab) }
lemma one_lt_pow (ha : 1 < a) {n : β} (hn : n β 0) : 1 < a ^ n :=
pow_zero a βΈ pow_lt_pow ha (pos_iff_ne_zero.2 hn)
lemma pow_le_one : β (n : β) (hβ : 0 β€ a) (hβ : a β€ 1), a ^ n β€ 1
| 0 hβ hβ := (pow_zero a).le
| (n + 1) hβ hβ := (pow_succ' a n).le.trans (mul_le_one (pow_le_one n hβ hβ) hβ hβ)
lemma sq_pos_of_pos (ha : 0 < a) : 0 < a ^ 2 := by { rw sq, exact mul_pos ha ha }
end ordered_semiring
section ordered_ring
variables [ordered_ring R] {a : R}
lemma sq_pos_of_neg (ha : a < 0) : 0 < a ^ 2 := by { rw sq, exact mul_pos_of_neg_of_neg ha ha }
lemma pow_bit0_pos_of_neg (ha : a < 0) (n : β) : 0 < a ^ bit0 n :=
begin
rw pow_bit0',
exact pow_pos (mul_pos_of_neg_of_neg ha ha) _,
end
lemma pow_bit1_neg (ha : a < 0) (n : β) : a ^ bit1 n < 0 :=
begin
rw [bit1, pow_succ],
exact mul_neg_of_neg_of_pos ha (pow_bit0_pos_of_neg ha n),
end
end ordered_ring
section linear_ordered_semiring
variable [linear_ordered_semiring R]
lemma pow_le_one_iff_of_nonneg {a : R} (ha : 0 β€ a) {n : β} (hn : n β 0) : a ^ n β€ 1 β a β€ 1 :=
begin
refine β¨_, pow_le_one n haβ©,
rw [βnot_lt, βnot_lt],
exact mt (Ξ» h, one_lt_pow h hn),
end
lemma one_le_pow_iff_of_nonneg {a : R} (ha : 0 β€ a) {n : β} (hn : n β 0) : 1 β€ a ^ n β 1 β€ a :=
begin
refine β¨_, Ξ» h, one_le_pow_of_one_le h nβ©,
rw [βnot_lt, βnot_lt],
exact mt (Ξ» h, pow_lt_one ha h hn),
end
lemma one_lt_pow_iff_of_nonneg {a : R} (ha : 0 β€ a) {n : β} (hn : n β 0) : 1 < a ^ n β 1 < a :=
lt_iff_lt_of_le_iff_le (pow_le_one_iff_of_nonneg ha hn)
lemma pow_lt_one_iff_of_nonneg {a : R} (ha : 0 β€ a) {n : β} (hn : n β 0) : a ^ n < 1 β a < 1 :=
lt_iff_lt_of_le_iff_le (one_le_pow_iff_of_nonneg ha hn)
lemma sq_le_one_iff {a : R} (ha : 0 β€ a) : a^2 β€ 1 β a β€ 1 :=
pow_le_one_iff_of_nonneg ha (nat.succ_ne_zero _)
lemma sq_lt_one_iff {a : R} (ha : 0 β€ a) : a^2 < 1 β a < 1 :=
pow_lt_one_iff_of_nonneg ha (nat.succ_ne_zero _)
lemma one_le_sq_iff {a : R} (ha : 0 β€ a) : 1 β€ a^2 β 1 β€ a :=
one_le_pow_iff_of_nonneg ha (nat.succ_ne_zero _)
lemma one_lt_sq_iff {a : R} (ha : 0 β€ a) : 1 < a^2 β 1 < a :=
one_lt_pow_iff_of_nonneg ha (nat.succ_ne_zero _)
@[simp] theorem pow_left_inj {x y : R} {n : β} (Hxpos : 0 β€ x) (Hypos : 0 β€ y) (Hnpos : 0 < n) :
x ^ n = y ^ n β x = y :=
(@strict_mono_on_pow R _ _ Hnpos).inj_on.eq_iff Hxpos Hypos
lemma lt_of_pow_lt_pow {a b : R} (n : β) (hb : 0 β€ b) (h : a ^ n < b ^ n) : a < b :=
lt_of_not_ge $ Ξ» hn, not_lt_of_ge (pow_le_pow_of_le_left hb hn _) h
lemma le_of_pow_le_pow {a b : R} (n : β) (hb : 0 β€ b) (hn : 0 < n) (h : a ^ n β€ b ^ n) : a β€ b :=
le_of_not_lt $ Ξ» h1, not_le_of_lt (pow_lt_pow_of_lt_left h1 hb hn) h
@[simp] lemma sq_eq_sq {a b : R} (ha : 0 β€ a) (hb : 0 β€ b) : a ^ 2 = b ^ 2 β a = b :=
pow_left_inj ha hb dec_trivial
end linear_ordered_semiring
section linear_ordered_ring
variable [linear_ordered_ring R]
lemma pow_abs (a : R) (n : β) : |a| ^ n = |a ^ n| :=
((abs_hom.to_monoid_hom : R β* R).map_pow a n).symm
lemma abs_neg_one_pow (n : β) : |(-1 : R) ^ n| = 1 :=
by rw [βpow_abs, abs_neg, abs_one, one_pow]
theorem pow_bit0_nonneg (a : R) (n : β) : 0 β€ a ^ bit0 n :=
by { rw pow_bit0, exact mul_self_nonneg _ }
theorem sq_nonneg (a : R) : 0 β€ a ^ 2 :=
pow_bit0_nonneg a 1
alias sq_nonneg β pow_two_nonneg
theorem pow_bit0_pos {a : R} (h : a β 0) (n : β) : 0 < a ^ bit0 n :=
(pow_bit0_nonneg a n).lt_of_ne (pow_ne_zero _ h).symm
theorem sq_pos_of_ne_zero (a : R) (h : a β 0) : 0 < a ^ 2 :=
pow_bit0_pos h 1
alias sq_pos_of_ne_zero β pow_two_pos_of_ne_zero
variables {x y : R}
theorem sq_abs (x : R) : |x| ^ 2 = x ^ 2 :=
by simpa only [sq] using abs_mul_abs_self x
theorem abs_sq (x : R) : |x ^ 2| = x ^ 2 :=
by simpa only [sq] using abs_mul_self x
theorem sq_lt_sq (h : |x| < y) : x ^ 2 < y ^ 2 :=
by simpa only [sq_abs] using pow_lt_pow_of_lt_left h (abs_nonneg x) (1:β).succ_pos
theorem sq_lt_sq' (h1 : -y < x) (h2 : x < y) : x ^ 2 < y ^ 2 :=
sq_lt_sq (abs_lt.mpr β¨h1, h2β©)
theorem sq_le_sq (h : |x| β€ |y|) : x ^ 2 β€ y ^ 2 :=
by simpa only [sq_abs] using pow_le_pow_of_le_left (abs_nonneg x) h 2
theorem sq_le_sq' (h1 : -y β€ x) (h2 : x β€ y) : x ^ 2 β€ y ^ 2 :=
sq_le_sq (le_trans (abs_le.mpr β¨h1, h2β©) (le_abs_self _))
theorem abs_lt_abs_of_sq_lt_sq (h : x^2 < y^2) : |x| < |y| :=
lt_of_pow_lt_pow 2 (abs_nonneg y) $ by rwa [β sq_abs x, β sq_abs y] at h
theorem abs_lt_of_sq_lt_sq (h : x^2 < y^2) (hy : 0 β€ y) : |x| < y :=
begin
rw [β abs_of_nonneg hy],
exact abs_lt_abs_of_sq_lt_sq h,
end
theorem abs_lt_of_sq_lt_sq' (h : x^2 < y^2) (hy : 0 β€ y) : -y < x β§ x < y :=
abs_lt.mp $ abs_lt_of_sq_lt_sq h hy
theorem abs_le_abs_of_sq_le_sq (h : x^2 β€ y^2) : |x| β€ |y| :=
le_of_pow_le_pow 2 (abs_nonneg y) (1:β).succ_pos $ by rwa [β sq_abs x, β sq_abs y] at h
theorem abs_le_of_sq_le_sq (h : x^2 β€ y^2) (hy : 0 β€ y) : |x| β€ y :=
begin
rw [β abs_of_nonneg hy],
exact abs_le_abs_of_sq_le_sq h,
end
theorem abs_le_of_sq_le_sq' (h : x^2 β€ y^2) (hy : 0 β€ y) : -y β€ x β§ x β€ y :=
abs_le.mp $ abs_le_of_sq_le_sq h hy
end linear_ordered_ring
section linear_ordered_comm_ring
variables [linear_ordered_comm_ring R]
/-- Arithmetic mean-geometric mean (AM-GM) inequality for linearly ordered commutative rings. -/
lemma two_mul_le_add_sq (a b : R) : 2 * a * b β€ a ^ 2 + b ^ 2 :=
sub_nonneg.mp ((sub_add_eq_add_sub _ _ _).subst ((sub_sq a b).subst (sq_nonneg _)))
alias two_mul_le_add_sq β two_mul_le_add_pow_two
end linear_ordered_comm_ring
|
% Extract WINDOWSIZE consecutive frames at position SLIDINGWINDOWIDX from a
% given image sequence.
function slidingWindow = assembleSlidingWindows(I, startFrame, windowSize, slidingWindowIdx)
% Get the index of the reference frame for the current sliding window.
refFrameIdx = startFrame + slidingWindowIdx - 1;
% Extract sliding window from the given input sequence.
slidingWindow.frames = I(:,:,(refFrameIdx - floor(windowSize / 2)):(refFrameIdx + floor(windowSize / 2)));
% Capture reference frame for the current window.
slidingWindow.referenceFrame = I(:,:,refFrameIdx);
|
June 1, 1944 β Squadron party. Real whiskey, but didnβt get too hi β just happy. Met a W.A.F. (Women Air Force) & we talked English history.
I keep writing about orgasms as crying & then I think about that.
Eva Hagberg Fisher's forthcoming book (out next week) How To Be Loved figuratively fell in my lap. I was at coffee with a friend, saying I needed a new book to read, but I needed that book to be about recovery because I just needed to be heard and understood, and lo and behold, my inbox pinged.
A Godly woman has to make a living, after all.
My friend told me, itβs just about you needing control. I did not respond when she said this. Considering nothing is just about anything.
"We argue about whether we should be more frightened" and "I tried to lose myself once"
She had grown up and now lived in a cold climate that encouraged looking down.
Today on Dagobah, Ep. 1: "The Landing"
Please God forgive me. Please God forgive me.
The day my brother died, my mom ran naked in the street. |
subroutine pair_distances
! Calculate pair_dist(i,j) : distances for all the pairs of atoms
! x_dist(i,j) is xyz_dist(1,i,j)
! y_dist(i,j) is xyz_dist(2,i,j)
! z_dist(i,j) is xyz_dist(3,i,j)
! cutoffmat(i,j) : the cutoff matrix (logical), if needed
use cluster, only : natom
use enforce, only : x, y, z, u, v, w
use distance
!USE OMP_LIB !>>> CPU_TIME
implicit none
integer :: i, j
!REAL(8) :: t_start, t_end !>>> CPU_TIME
!t_start = OMP_GET_WTIME() !>>> CPU_TIME
!$OMP PARALLEL DEFAULT(SHARED)
!----------------------------------------------
! Creating x_dist, y_dist, z_dist and pair_dist
! This should replicate the results of
! MolDyn M2.3; I need a way of dealing with the
! problem of reproducibility
!$OMP SECTIONS PRIVATE(i, j)
!$OMP SECTION
do j=1, natom
do i=1, natom
xyz_dist(1,i,j) = x(j) +u(j) -x(i) -u(i)
end do
end do
!$OMP SECTION
do j=1, natom
do i=1, natom
xyz_dist(2,i,j) = y(j) +v(j) -y(i) -v(i)
end do
end do
!$OMP SECTION
do j=1, natom
do i=1, natom
xyz_dist(3,i,j) = z(j) +w(j) -z(i) -w(i)
end do
end do
!$OMP END SECTIONS
!$OMP DO PRIVATE(i, j), SCHEDULE(STATIC)
do i=1, natom
do j=1, natom
pair_dist(j,i) = DSQRT(SUM(xyz_dist(:,j,i)**2))
end do
end do
!$OMP END DO
!!write(*,*) "pair_distances:> pair(1,2) pair(1,natom)",pair_dist(1,2), pair_dist(1,natom)
!
!---------------------------------------------------------
! Creating the cutoff matrix (logical),
! considering different pair kinds: (1)A-A, (2)B-B, (3)A-B
!$OMP SINGLE PRIVATE(i,j)
IF (cutoffmat_req) THEN
DO i = 1, natom-1
DO j = i+1, natom
cutoffmat(j,i) = (pair_dist(j,i) .LE. dcutoff(pairkindmat(j,i) ) )
cutoffmat(i,j) = cutoffmat(j,i)
END DO
END DO
END IF
!$OMP END SINGLE
!$OMP END PARALLEL
!t_end = OMP_GET_WTIME() !>>> CPU_TIME
!! Writing the cpu time for the subroutine !>>> CPU_TIME
!WRITE(500,'(1I10, 4X, 1F10.8)') ipas, t_end -t_start !>>> CPU_TIME
end subroutine pair_distances
|
#ifndef SCOL_H
#define SCOL_H
#include "Common.h"
// SP_IVP.h
// Spherical Collapse IVP
//#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_deriv.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_serial.h> /* access to serial N_Vector */
#include <sunmatrix/sunmatrix_dense.h> /* access to dense SUNMatrix */
#include <sunlinsol/sunlinsol_dense.h> /* access to dense SUNLinearSolver */
#include <cvode/cvode_direct.h> /* access to CVDls interface */
#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype */
#include <sundials/sundials_math.h> /* contains the macros ABS, SUNSQR, EXP */
#define Ith(v,i) NV_Ith_S(v,i-1) /* Ith numbers components 1..NEQ */
#define IJth(A,i,j) SM_ELEMENT_D(A,i-1,j-1) /* IJth numbers rows,cols 1..NEQ */
#define NEQ 2 /* number of equations */
#define Y1 RCONST(0.0) /* initial y components */
#define RTOL RCONST(1.0e-5) /* scalar relative tolerance */
#define ATOL1 RCONST(1.0e-11) /* vector absolute tolerance components */
#define ATOL2 RCONST(1.0e-11)
#define T00 RCONST(-10.41431317630211772495840705232694745063781738281250) /* initial time */
#define NOUT 1024 /* number of output times */
#define NOUT_DC 10 /* number of output times */
#define HALF RCONST(0.5) /* 0.5 */
#define ONE RCONST(1.0) /* 1.0 */
#define TWO RCONST(2.0)
#define THREE RCONST(3.0) /* 3.0 */
#define DELTA1 RCONST(3e-5) /* Delta_i1 */
#define DELTA2 RCONST(0.00012) /* Delta_i2 */
#define EPSILON RCONST(1.0e-9)
#define ZERO RCONST(0.0)
#define NINE RCONST(9.0)
#define TEN RCONST(10.0)
#define Gnewton RCONST(4.302e-09)
#define coef RCONST(8987404.41) // 1/H0^2
// structures to store spherical collapse calculations
typedef struct arrays{
int count;
double xx[1002];
double yy[1002]; } *arrays_T;
typedef struct arrays3D{
int count;
double xx[1002];
double yy[1002];
double zz[1002]; } *arrays_T3;
typedef struct usdat {
realtype IC;
realtype OM;
realtype Rth;
realtype T1;
double par1;
double par2;
double par3;
int mymg;
double maxt;
gsl_spline *spline;
gsl_interp_accel *acc;
} *UserData;
extern int check_flagscol(void *flagvalue, const char *funcname, int opt); //
class SCOL {
public:
// used functions in example file
double maxP_zeta(double sig2, double dsig2dR, double OM, double Z);
double Delta_Lambda(double OM, double Z); //
// solves for y_enviornment
int yenv(double OM_REAL, double XF, double delta_envi, arrays_T xxyy); // gives the environmental dependence of spherical collapse
// solves for y_halo
int SphericalCollapse(double *dC, arrays_T3 xxyyzz, UserData data_vec, double TMULT_REAL, double delta_g); // spherical collapse solver
// solves for a_virial
double myscol(double myscolparams[], double acol, double omega0, double Rthp, double sig1, double sig2, double pars[], int mymg, int yenvf); // solves for virial quantities and stores them in array myscolparams
void PrintOutput(realtype t, realtype y1, realtype y2);
void PrintRootInfo(int root_f1); //
double funcscol(double xi, void *user_data); //
};
#endif
|
lemma eq_vector_fraction_iff [vector_add_divide_simps]: fixes x :: "'a :: real_vector" shows "(x = (u / v) *\<^sub>R a) \<longleftrightarrow> (if v=0 then x = 0 else v *\<^sub>R x = u *\<^sub>R a)" |
(*
Author: Akihisa Yamada (2018-2021)
License: LGPL (see file COPYING.LESSER)
*)
section \<open>Binary Relations\<close>
text \<open>We start with basic properties of binary relations.\<close>
theory Binary_Relations
imports
(* To verify that we don't use the axiom of choice, import
HOL.Complete_Partial_Order HOL.Wellfounded
instead of *) Main
begin
lemma conj_imp_eq_imp_imp: "(P \<and> Q \<Longrightarrow> PROP R) \<equiv> (P \<Longrightarrow> Q \<Longrightarrow> PROP R)"
by standard simp_all
lemma tranclp_trancl: "r\<^sup>+\<^sup>+ = (\<lambda>x y. (x,y) \<in> {(a,b). r a b}\<^sup>+)"
by (auto simp: tranclp_trancl_eq[symmetric])
lemma tranclp_id[simp]: "transp r \<Longrightarrow> tranclp r = r"
using trancl_id[of "{(x,y). r x y}", folded transp_trans] by (auto simp:tranclp_trancl)
lemma transp_tranclp[simp]: "transp (tranclp r)" by (auto simp: tranclp_trancl transp_trans)
lemma funpow_dom: "f ` A \<subseteq> A \<Longrightarrow> (f^^n) ` A \<subseteq> A" by (induct n, auto)
text \<open>Below we introduce an Isabelle-notation for $\{ \ldots x\ldots \mid x \in X \}$.\<close>
syntax
"_range" :: "'a \<Rightarrow> idts \<Rightarrow> 'a set" ("(1{_ /|./ _})")
"_image" :: "'a \<Rightarrow> pttrn \<Rightarrow> 'a set \<Rightarrow> 'a set" ("(1{_ /|./ (_/ \<in> _)})")
translations
"{e |. p}" \<rightleftharpoons> "{e | p. CONST True}"
"{e |. p \<in> A}" \<rightleftharpoons> "CONST image (\<lambda>p. e) A"
lemma image_constant:
assumes "\<And>i. i \<in> I \<Longrightarrow> f i = y"
shows "f ` I = (if I = {} then {} else {y})"
using assms by auto
subsection \<open>Various Definitions\<close>
text \<open>Here we introduce various definitions for binary relations.
The first one is our abbreviation for the dual of a relation.\<close>
abbreviation(input) dual ("(_\<^sup>-)" [1000] 1000) where "r\<^sup>- x y \<equiv> r y x"
lemma conversep_is_dual[simp]: "conversep = dual" by auto
text \<open>Monotonicity is already defined in the library, but we want one restricted to a domain.\<close>
definition monotone_on where
"monotone_on X r s f \<equiv> \<forall>x y. x \<in> X \<longrightarrow> y \<in> X \<longrightarrow> r x y \<longrightarrow> s (f x) (f y)"
lemmas monotone_onI = monotone_on_def[unfolded atomize_eq, THEN iffD2, rule_format]
lemma monotone_onD: "monotone_on X r s f \<Longrightarrow> r x y \<Longrightarrow> x \<in> X \<Longrightarrow> y \<in> X \<Longrightarrow> s (f x) (f y)"
by (auto simp: monotone_on_def)
lemmas monotone_onE = monotone_on_def[unfolded atomize_eq, THEN iffD1, elim_format, rule_format]
lemma monotone_on_UNIV[simp]: "monotone_on UNIV = monotone"
by (intro ext, auto simp: monotone_on_def monotone_def)
lemma monotone_on_dual: "monotone_on X r s f \<Longrightarrow> monotone_on X r\<^sup>- s\<^sup>- f"
by (auto simp: monotone_on_def)
lemma monotone_on_id: "monotone_on X r r id"
by (auto simp: monotone_on_def)
lemma monotone_on_cmono: "A \<subseteq> B \<Longrightarrow> monotone_on B \<le> monotone_on A"
by (intro le_funI, auto simp: monotone_on_def)
text \<open>Here we define the following notions in a standard manner\<close>
text \<open>The symmetric part of a relation:\<close>
definition sympartp where "sympartp r x y \<equiv> r x y \<and> r y x"
lemma sympartpI[intro]:
fixes r (infix "\<sqsubseteq>" 50)
assumes "x \<sqsubseteq> y" and "y \<sqsubseteq> x" shows "sympartp (\<sqsubseteq>) x y"
using assms by (auto simp: sympartp_def)
lemma sympartpE[elim]:
fixes r (infix "\<sqsubseteq>" 50)
assumes "sympartp (\<sqsubseteq>) x y" and "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubseteq> x \<Longrightarrow> thesis" shows thesis
using assms by (auto simp: sympartp_def)
lemma sympartp_dual: "sympartp r\<^sup>- = sympartp r"
by (auto intro!:ext simp: sympartp_def)
lemma sympartp_eq[simp]: "sympartp (=) = (=)" by auto
lemma reflclp_sympartp[simp]: "(sympartp r)\<^sup>=\<^sup>= = sympartp r\<^sup>=\<^sup>=" by auto
definition "equivpartp r x y \<equiv> x = y \<or> r x y \<and> r y x"
lemma sympartp_reflclp_equivp[simp]: "sympartp r\<^sup>=\<^sup>= = equivpartp r" by (auto intro!:ext simp: equivpartp_def)
lemma equivpartI[simp]: "equivpartp r x x"
and sympartp_equivpartpI: "sympartp r x y \<Longrightarrow> equivpartp r x y"
and equivpartpCI[intro]: "(x \<noteq> y \<Longrightarrow> sympartp r x y) \<Longrightarrow> equivpartp r x y"
by (auto simp:equivpartp_def)
lemma equivpartpE[elim]:
assumes "equivpartp r x y"
and "x = y \<Longrightarrow> thesis"
and "r x y \<Longrightarrow> r y x \<Longrightarrow> thesis"
shows "thesis"
using assms by (auto simp: equivpartp_def)
lemma equivpartp_eq[simp]: "equivpartp (=) = (=)" by auto
lemma sympartp_equivpartp[simp]: "sympartp (equivpartp r) = (equivpartp r)"
and equivpartp_equivpartp[simp]: "equivpartp (equivpartp r) = (equivpartp r)"
and equivpartp_sympartp[simp]: "equivpartp (sympartp r) = (equivpartp r)"
by (auto 0 5 intro!:ext)
lemma equivpartp_dual: "equivpartp r\<^sup>- = equivpartp r"
by (auto intro!:ext simp: equivpartp_def)
text \<open>The asymmetric part:\<close>
definition "asympartp r x y \<equiv> r x y \<and> \<not> r y x"
lemma asympartpE[elim]:
fixes r (infix "\<sqsubseteq>" 50)
shows "asympartp (\<sqsubseteq>) x y \<Longrightarrow> (x \<sqsubseteq> y \<Longrightarrow> \<not>y \<sqsubseteq> x \<Longrightarrow> thesis) \<Longrightarrow> thesis"
by (auto simp: asympartp_def)
lemmas asympartpI[intro] = asympartp_def[unfolded atomize_eq, THEN iffD2, unfolded conj_imp_eq_imp_imp, rule_format]
lemma asympartp_eq[simp]: "asympartp (=) = bot" by auto
lemma asympartp_sympartp [simp]: "asympartp (sympartp r) = bot"
and sympartp_asympartp [simp]: "sympartp (asympartp r) = bot"
by (auto intro!: ext)
text \<open>Restriction to a set:\<close>
definition Restrp (infixl "\<restriction>" 60) where "(r \<restriction> A) a b \<equiv> a \<in> A \<and> b \<in> A \<and> r a b"
lemmas RestrpI[intro!] = Restrp_def[unfolded atomize_eq, THEN iffD2, unfolded conj_imp_eq_imp_imp]
lemmas RestrpE[elim!] = Restrp_def[unfolded atomize_eq, THEN iffD1, elim_format, unfolded conj_imp_eq_imp_imp]
lemma Restrp_UNIV[simp]: "r \<restriction> UNIV \<equiv> r" by (auto simp: atomize_eq)
lemma Restrp_Restrp[simp]: "r \<restriction> A \<restriction> B \<equiv> r \<restriction> A \<inter> B" by (auto simp: atomize_eq Restrp_def)
lemma sympartp_Restrp[simp]: "sympartp (r \<restriction> A) \<equiv> sympartp r \<restriction> A"
by (auto simp: atomize_eq)
text \<open>Relational images:\<close>
definition Imagep (infixr "```" 59) where "r ``` A \<equiv> {b. \<exists>a \<in> A. r a b}"
lemma Imagep_Image: "r ``` A = {(a,b). r a b} `` A"
by (auto simp: Imagep_def)
lemma in_Imagep: "b \<in> r ``` A \<longleftrightarrow> (\<exists>a \<in> A. r a b)" by (auto simp: Imagep_def)
lemma ImagepI: "a \<in> A \<Longrightarrow> r a b \<Longrightarrow> b \<in> r ``` A" by (auto simp: in_Imagep)
lemma subset_Imagep: "B \<subseteq> r ``` A \<longleftrightarrow> (\<forall>b\<in>B. \<exists>a\<in>A. r a b)"
by (auto simp: Imagep_def)
text \<open>Bounds of a set:\<close>
definition "bound X r b \<equiv> \<forall>x \<in> X. r x b"
lemma
fixes r (infix "\<sqsubseteq>" 50)
shows boundI[intro!]: "(\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> b) \<Longrightarrow> bound X (\<sqsubseteq>) b"
and boundE[elim]: "bound X (\<sqsubseteq>) b \<Longrightarrow> ((\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> b) \<Longrightarrow> thesis) \<Longrightarrow> thesis"
by (auto simp: bound_def)
lemma bound_empty: "bound {} = (\<lambda>r x. True)" by auto
lemma bound_insert[simp]:
fixes r (infix "\<sqsubseteq>" 50)
shows "bound (insert x X) (\<sqsubseteq>) b \<longleftrightarrow> x \<sqsubseteq> b \<and> bound X (\<sqsubseteq>) b" by auto
text \<open>Extreme (greatest) elements in a set:\<close>
definition "extreme X r e \<equiv> e \<in> X \<and> (\<forall>x \<in> X. r x e)"
lemma
fixes r (infix "\<sqsubseteq>" 50)
shows extremeI[intro]: "e \<in> X \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> e) \<Longrightarrow> extreme X (\<sqsubseteq>) e"
and extremeD: "extreme X (\<sqsubseteq>) e \<Longrightarrow> e \<in> X" "extreme X (\<sqsubseteq>) e \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> e)"
and extremeE[elim]: "extreme X (\<sqsubseteq>) e \<Longrightarrow> (e \<in> X \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> e) \<Longrightarrow> thesis) \<Longrightarrow> thesis"
by (auto simp: extreme_def)
lemma
fixes r (infix "\<sqsubseteq>" 50)
shows extreme_UNIV[simp]: "extreme UNIV (\<sqsubseteq>) t \<longleftrightarrow> (\<forall>x. x \<sqsubseteq> t)" by auto
lemma extremes_equiv: "extreme X r b \<Longrightarrow> extreme X r c \<Longrightarrow> sympartp r b c" by blast
lemma bound_cmono: assumes "X \<subseteq> Y" shows "bound Y \<le> bound X"
using assms by auto
lemma sympartp_sympartp[simp]: "sympartp (sympartp r) = sympartp r" by (auto intro!:ext)
text \<open>Now suprema and infima are given uniformly as follows.
The definition is restricted to a given set.
\<close>
context
fixes A :: "'a set" and less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
abbreviation "extreme_bound X \<equiv> extreme {b \<in> A. bound X (\<sqsubseteq>) b} (\<lambda>x y. y \<sqsubseteq> x)"
lemma extreme_boundI[intro]:
assumes "\<And>b. bound X (\<sqsubseteq>) b \<Longrightarrow> b \<in> A \<Longrightarrow> s \<sqsubseteq> b" and "\<And>x. x \<in> X \<Longrightarrow> x \<sqsubseteq> s" and "s \<in> A"
shows "extreme_bound X s"
using assms by auto
lemma extreme_bound_bound: "extreme_bound X y \<Longrightarrow> x \<in> X \<Longrightarrow> x \<sqsubseteq> y" by auto
lemma extreme_bound_mono:
assumes XY: "X \<subseteq> Y"
and sX: "extreme_bound X sX"
and sY: "extreme_bound Y sY"
shows "sX \<sqsubseteq> sY"
proof-
have "bound X (\<sqsubseteq>) sY" using XY sY by force
with sX sY show ?thesis by (auto 0 4)
qed
lemma extreme_bound_iff:
shows "extreme_bound X s \<longleftrightarrow> s \<in> A \<and> (\<forall>c \<in> A. (\<forall>x \<in> X. x \<sqsubseteq> c) \<longrightarrow> s \<sqsubseteq> c) \<and> (\<forall>x \<in> X. x \<sqsubseteq> s)"
by (auto simp: extreme_def)
lemma extreme_bound_singleton_refl[simp]:
"extreme_bound {x} x \<longleftrightarrow> x \<in> A \<and> x \<sqsubseteq> x" by auto
lemma extreme_bound_image_const:
"x \<sqsubseteq> x \<Longrightarrow> I \<noteq> {} \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> f i = x) \<Longrightarrow> x \<in> A \<Longrightarrow> extreme_bound (f ` I) x"
by (auto simp: image_constant)
lemma extreme_bound_UN_const:
"x \<sqsubseteq> x \<Longrightarrow> I \<noteq> {} \<Longrightarrow> (\<And>i y. i \<in> I \<Longrightarrow> P i y \<longleftrightarrow> x = y) \<Longrightarrow> x \<in> A \<Longrightarrow>
extreme_bound (\<Union>i\<in>I. {y. P i y}) x"
by auto
end
context
fixes ir :: "'i \<Rightarrow> 'i \<Rightarrow> bool" (infix "\<preceq>" 50)
and r :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
and f and A and e and I
assumes fIA: "f ` I \<subseteq> A"
and mono: "monotone_on I (\<preceq>) (\<sqsubseteq>) f"
and e: "extreme I (\<preceq>) e"
begin
lemma monotone_extreme_imp_extreme_bound:
"extreme_bound A (\<sqsubseteq>) (f ` I) (f e)"
using monotone_onD[OF mono] e fIA
by (intro extreme_boundI, auto simp: image_def elim!: extremeE)
lemma monotone_extreme_extreme_boundI:
"x = f e \<Longrightarrow> extreme_bound A (\<sqsubseteq>) (f ` I) x"
using monotone_extreme_imp_extreme_bound by auto
end
subsection \<open>Locales for Binary Relations\<close>
text \<open>We now define basic properties of binary relations,
in form of \emph{locales}~\<^cite>\<open>"Kammuller00" and "locale"\<close>.\<close>
subsubsection \<open>Syntactic Locales\<close>
text \<open>The following locales do not assume anything, but provide infix notations for
relations.\<close>
locale less_eq_syntax =
fixes less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
locale less_syntax =
fixes less :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubset>" 50)
locale equivalence_syntax =
fixes equiv :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sim>" 50)
begin
abbreviation equiv_class ("[_]\<^sub>\<sim>") where "[x]\<^sub>\<sim> \<equiv> { y. x \<sim> y }"
end
text \<open>Next ones introduce abbreviations for dual etc.
To avoid needless constants, one should be careful when declaring them as sublocales.\<close>
locale less_eq_notations = less_eq_syntax
begin
abbreviation (input) greater_eq (infix "\<sqsupseteq>" 50) where "x \<sqsupseteq> y \<equiv> y \<sqsubseteq> x"
abbreviation sym (infix "\<sim>" 50) where "(\<sim>) \<equiv> sympartp (\<sqsubseteq>)"
abbreviation less (infix "\<sqsubset>" 50) where "(\<sqsubset>) \<equiv> asympartp (\<sqsubseteq>)"
abbreviation greater (infix "\<sqsupset>" 50) where "(\<sqsupset>) \<equiv> (\<sqsubset>)\<^sup>-"
abbreviation equiv (infix "(\<simeq>)" 50) where "(\<simeq>) \<equiv> equivpartp (\<sqsubseteq>)"
lemma asym_cases[consumes 1, case_names asym sym]:
assumes "x \<sqsubseteq> y" and "x \<sqsubset> y \<Longrightarrow> thesis" and "x \<sim> y \<Longrightarrow> thesis"
shows thesis
using assms by auto
end
locale less_notations = less_syntax
begin
abbreviation (input) greater (infix "\<sqsupset>" 50) where "x \<sqsupset> y \<equiv> y \<sqsubset> x"
end
locale related_set =
fixes A :: "'a set" and less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
subsubsection \<open>Basic Properties of Relations\<close>
text \<open>In the following we define basic properties in form of locales.\<close>
text \<open>Reflexivity restricted on a set:\<close>
locale reflexive = related_set +
assumes refl[intro]: "x \<in> A \<Longrightarrow> x \<sqsubseteq> x"
begin
lemma eq_implies: "x = y \<Longrightarrow> x \<in> A \<Longrightarrow> x \<sqsubseteq> y" by auto
lemma extreme_singleton[simp]: "x \<in> A \<Longrightarrow> extreme {x} (\<sqsubseteq>) y \<longleftrightarrow> x = y" by auto
lemma extreme_bound_singleton: "x \<in> A \<Longrightarrow> extreme_bound A (\<sqsubseteq>) {x} x" by auto
lemma reflexive_subset: "B \<subseteq> A \<Longrightarrow> reflexive B (\<sqsubseteq>)" apply unfold_locales by auto
end
declare reflexive.intro[intro!]
lemma reflexiveE[elim]:
assumes "reflexive A r" and "(\<And>x. x \<in> A \<Longrightarrow> r x x) \<Longrightarrow> thesis" shows thesis
using assms by (auto simp: reflexive.refl)
lemma reflexive_cong:
"(\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b) \<Longrightarrow> reflexive A r \<longleftrightarrow> reflexive A r'"
by (simp add: reflexive_def)
locale irreflexive = related_set A "(\<sqsubset>)" for A and less (infix "\<sqsubset>" 50) +
assumes irrefl: "x \<in> A \<Longrightarrow> \<not> x \<sqsubset> x"
begin
lemma irreflD[simp]: "x \<sqsubset> x \<Longrightarrow> \<not>x \<in> A" by (auto simp: irrefl)
lemma implies_not_eq: "x \<sqsubset> y \<Longrightarrow> x \<in> A \<Longrightarrow> x \<noteq> y" by auto
lemma Restrp_irreflexive: "irreflexive UNIV ((\<sqsubset>)\<restriction>A)"
apply unfold_locales by auto
lemma irreflexive_subset: "B \<subseteq> A \<Longrightarrow> irreflexive B (\<sqsubset>)" apply unfold_locales by auto
end
declare irreflexive.intro[intro!]
lemma irreflexive_cong:
"(\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b) \<Longrightarrow> irreflexive A r \<longleftrightarrow> irreflexive A r'"
by (simp add: irreflexive_def)
locale transitive = related_set +
assumes trans[trans]: "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubseteq> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubseteq> z"
begin
interpretation less_eq_notations.
lemma Restrp_transitive: "transitive UNIV ((\<sqsubseteq>)\<restriction>A)"
apply unfold_locales
by (auto intro: trans)
lemma bound_trans[trans]: "bound X (\<sqsubseteq>) b \<Longrightarrow> b \<sqsubseteq> c \<Longrightarrow> X \<subseteq> A \<Longrightarrow> b \<in> A \<Longrightarrow> c \<in> A \<Longrightarrow> bound X (\<sqsubseteq>) c"
by (auto 0 4 dest: trans)
lemma transitive_subset:
assumes BA: "B \<subseteq> A" shows "transitive B (\<sqsubseteq>)"
apply unfold_locales
using trans BA by blast
lemma asympartp_transitive: "transitive A (\<sqsubset>)"
apply unfold_locales by (auto dest:trans)
lemma reflclp_transitive: "transitive A (\<sqsubseteq>)\<^sup>=\<^sup>="
apply unfold_locales by (auto dest: trans)
text \<open>The symmetric part is also transitive, but this is done in the later semiattractive locale\<close>
end
declare transitive.intro[intro?]
lemma transitive_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b" shows "transitive A r \<longleftrightarrow> transitive A r'"
proof (intro iffI)
show "transitive A r \<Longrightarrow> transitive A r'"
apply (intro transitive.intro)
apply (unfold r[symmetric])
using transitive.trans.
show "transitive A r' \<Longrightarrow> transitive A r"
apply (intro transitive.intro)
apply (unfold r)
using transitive.trans.
qed
lemma tranclp_transitive: "transitive A (tranclp r)"
using tranclp_trans by unfold_locales
locale symmetric = related_set A "(\<sim>)" for A and equiv (infix "\<sim>" 50) +
assumes sym[sym]: "x \<sim> y \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> y \<sim> x"
begin
lemma sym_iff: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sim> y \<longleftrightarrow> y \<sim> x"
by (auto dest: sym)
lemma Restrp_symmetric: "symmetric UNIV ((\<sim>)\<restriction>A)"
apply unfold_locales by (auto simp: sym_iff)
lemma symmetric_subset: "B \<subseteq> A \<Longrightarrow> symmetric B (\<sim>)"
apply unfold_locales by (auto dest: sym)
end
declare symmetric.intro[intro]
lemma symmetric_cong:
"(\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b) \<Longrightarrow> symmetric A r \<longleftrightarrow> symmetric A r'"
by (auto simp: symmetric_def)
global_interpretation sympartp: symmetric UNIV "sympartp r"
rewrites "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
by auto
lemma sympartp_symmetric: "symmetric A (sympartp r)" by auto
locale antisymmetric = related_set +
assumes antisym: "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubseteq> x \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x = y"
begin
interpretation less_eq_notations.
lemma sym_iff_eq_refl: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sim> y \<longleftrightarrow> x = y \<and> y \<sqsubseteq> y" by (auto dest: antisym)
lemma equiv_iff_eq[simp]: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<simeq> y \<longleftrightarrow> x = y" by (auto dest: antisym elim: equivpartpE)
lemma extreme_unique: "X \<subseteq> A \<Longrightarrow> extreme X (\<sqsubseteq>) x \<Longrightarrow> extreme X (\<sqsubseteq>) y \<longleftrightarrow> x = y"
by (elim extremeE, auto dest!: antisym[OF _ _ subsetD])
lemma ex_extreme_iff_ex1:
"X \<subseteq> A \<Longrightarrow> Ex (extreme X (\<sqsubseteq>)) \<longleftrightarrow> Ex1 (extreme X (\<sqsubseteq>))" by (auto simp: extreme_unique)
lemma ex_extreme_iff_the:
"X \<subseteq> A \<Longrightarrow> Ex (extreme X (\<sqsubseteq>)) \<longleftrightarrow> extreme X (\<sqsubseteq>) (The (extreme X (\<sqsubseteq>)))"
apply (rule iffI)
apply (rule theI')
using extreme_unique by auto
lemma Restrp_antisymmetric: "antisymmetric UNIV ((\<sqsubseteq>)\<restriction>A)"
apply unfold_locales
by (auto dest: antisym)
lemma antisymmetric_subset: "B \<subseteq> A \<Longrightarrow> antisymmetric B (\<sqsubseteq>)"
apply unfold_locales using antisym by auto
end
declare antisymmetric.intro[intro]
lemma antisymmetric_cong:
"(\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b) \<Longrightarrow> antisymmetric A r \<longleftrightarrow> antisymmetric A r'"
by (auto simp: antisymmetric_def)
lemma antisymmetric_union:
fixes less_eq (infix "\<sqsubseteq>" 50)
assumes A: "antisymmetric A (\<sqsubseteq>)" and B: "antisymmetric B (\<sqsubseteq>)"
and AB: "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b \<longrightarrow> b \<sqsubseteq> a \<longrightarrow> a = b"
shows "antisymmetric (A \<union> B) (\<sqsubseteq>)"
proof-
interpret A: antisymmetric A "(\<sqsubseteq>)" using A.
interpret B: antisymmetric B "(\<sqsubseteq>)" using B.
show ?thesis by (auto dest: AB[rule_format] A.antisym B.antisym)
qed
text \<open>The following notion is new, generalizing antisymmetry and transitivity.\<close>
locale semiattractive = related_set +
assumes attract: "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubseteq> x \<Longrightarrow> y \<sqsubseteq> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubseteq> z"
begin
interpretation less_eq_notations.
lemma equiv_order_trans[trans]:
assumes xy: "x \<simeq> y" and yz: "y \<sqsubseteq> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubseteq> z"
using attract[OF _ _ _ x y z] xy yz by (auto elim: equivpartpE)
lemma equiv_transitive: "transitive A (\<simeq>)"
proof unfold_locales
fix x y z
assume x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A" and xy: "x \<simeq> y" and yz: "y \<simeq> z"
show "x \<simeq> z"
using equiv_order_trans[OF xy _ x y z] attract[OF _ _ _ z y x] xy yz by (auto simp:equivpartp_def)
qed
lemma sym_order_trans[trans]:
assumes xy: "x \<sim> y" and yz: "y \<sqsubseteq> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubseteq> z"
using attract[OF _ _ _ x y z] xy yz by auto
interpretation sym: transitive A "(\<sim>)"
proof unfold_locales
fix x y z
assume x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A" and xy: "x \<sim> y" and yz: "y \<sim> z"
show "x \<sim> z"
using sym_order_trans[OF xy _ x y z] attract[OF _ _ _ z y x] xy yz by auto
qed
lemmas sym_transitive = sym.transitive_axioms
lemma extreme_bound_quasi_const:
assumes C: "C \<subseteq> A" and x: "x \<in> A" and C0: "C \<noteq> {}" and const: "\<forall>y \<in> C. y \<sim> x"
shows "extreme_bound A (\<sqsubseteq>) C x"
proof (intro extreme_boundI x)
from C0 obtain c where cC: "c \<in> C" by auto
with C have c: "c \<in> A" by auto
from cC const have cx: "c \<sim> x" by auto
fix b assume b: "b \<in> A" and "bound C (\<sqsubseteq>) b"
with cC have cb: "c \<sqsubseteq> b" by auto
from attract[OF _ _ cb x c b] cx show "x \<sqsubseteq> b" by auto
next
fix c assume "c \<in> C"
with const show "c \<sqsubseteq> x" by auto
qed
lemma extreme_bound_quasi_const_iff:
assumes C: "C \<subseteq> A" and x: "x \<in> A" and y: "y \<in> A" and C0: "C \<noteq> {}" and const: "\<forall>z \<in> C. z \<sim> x"
shows "extreme_bound A (\<sqsubseteq>) C y \<longleftrightarrow> x \<sim> y"
proof (intro iffI)
assume y: "extreme_bound A (\<sqsubseteq>) C y"
note x = extreme_bound_quasi_const[OF C x C0 const]
from extremes_equiv[OF y x]
show "x \<sim> y" by auto
next
assume xy: "x \<sim> y"
with const C sym.trans[OF _ xy _ x y] have Cy: "\<forall>z \<in> C. z \<sim> y" by auto
show "extreme_bound A (\<sqsubseteq>) C y"
using extreme_bound_quasi_const[OF C y C0 Cy].
qed
lemma Restrp_semiattractive: "semiattractive UNIV ((\<sqsubseteq>)\<restriction>A)"
apply unfold_locales
by (auto dest: attract)
lemma semiattractive_subset: "B \<subseteq> A \<Longrightarrow> semiattractive B (\<sqsubseteq>)"
apply unfold_locales using attract by blast
end
lemma semiattractive_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "semiattractive A r \<longleftrightarrow> semiattractive A r'" (is "?l \<longleftrightarrow> ?r")
proof
show "?l \<Longrightarrow> ?r"
apply (intro semiattractive.intro)
apply (unfold r[symmetric])
using semiattractive.attract.
show "?r \<Longrightarrow> ?l"
apply (intro semiattractive.intro)
apply (unfold r)
using semiattractive.attract.
qed
locale attractive = semiattractive +
assumes "semiattractive A (\<sqsubseteq>)\<^sup>-"
begin
interpretation less_eq_notations.
sublocale dual: semiattractive A "(\<sqsubseteq>)\<^sup>-"
rewrites "\<And>r. sympartp (r \<restriction> A) \<equiv> sympartp r \<restriction> A"
and "\<And>r. sympartp (sympartp r) \<equiv> sympartp r"
and "sympartp ((\<sqsubseteq>) \<restriction> A)\<^sup>- \<equiv> (\<sim>) \<restriction> A"
and "sympartp (\<sqsubseteq>)\<^sup>- \<equiv> (\<sim>)"
and "equivpartp (\<sqsubseteq>)\<^sup>- \<equiv> (\<simeq>)"
using attractive_axioms[unfolded attractive_def]
by (auto intro!: ext simp: attractive_axioms_def atomize_eq equivpartp_def)
lemma order_equiv_trans[trans]:
assumes xy: "x \<sqsubseteq> y" and yz: "y \<simeq> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubseteq> z"
using dual.attract[OF _ _ _ z y x] xy yz by auto
lemma order_sym_trans[trans]:
assumes xy: "x \<sqsubseteq> y" and yz: "y \<sim> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubseteq> z"
using dual.attract[OF _ _ _ z y x] xy yz by auto
interpretation Restrp: semiattractive UNIV "(\<sqsubseteq>)\<restriction>A" using Restrp_semiattractive.
interpretation dual.Restrp: semiattractive UNIV "(\<sqsubseteq>)\<^sup>-\<restriction>A" using dual.Restrp_semiattractive.
lemma Restrp_attractive: "attractive UNIV ((\<sqsubseteq>)\<restriction>A)"
apply unfold_locales
using dual.Restrp.attract by auto
lemma attractive_subset: "B \<subseteq> A \<Longrightarrow> attractive B (\<sqsubseteq>)"
apply (intro attractive.intro attractive_axioms.intro)
using semiattractive_subset dual.semiattractive_subset by auto
end
lemma attractive_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "attractive A r \<longleftrightarrow> attractive A r'"
by (simp add: attractive_def attractive_axioms_def r cong: semiattractive_cong)
context antisymmetric begin
sublocale attractive
apply unfold_locales by (auto dest: antisym)
end
context transitive begin
sublocale attractive
rewrites "\<And>r. sympartp (r \<restriction> A) \<equiv> sympartp r \<restriction> A"
and "\<And>r. sympartp (sympartp r) \<equiv> sympartp r"
and "sympartp (\<sqsubseteq>)\<^sup>- \<equiv> sympartp (\<sqsubseteq>)"
and "(sympartp (\<sqsubseteq>))\<^sup>- \<equiv> sympartp (\<sqsubseteq>)"
and "(sympartp (\<sqsubseteq>) \<restriction> A)\<^sup>- \<equiv> sympartp (\<sqsubseteq>) \<restriction> A"
and "asympartp (asympartp (\<sqsubseteq>)) = asympartp (\<sqsubseteq>)"
and "asympartp (sympartp (\<sqsubseteq>)) = bot"
and "asympartp (\<sqsubseteq>) \<restriction> A = asympartp ((\<sqsubseteq>) \<restriction> A)"
apply unfold_locales
by (auto intro!:ext dest: trans simp: atomize_eq)
end
subsection \<open>Combined Properties\<close>
text \<open>Some combinations of the above basic properties are given names.\<close>
locale asymmetric = related_set A "(\<sqsubset>)" for A and less (infix "\<sqsubset>" 50) +
assumes asym: "x \<sqsubset> y \<Longrightarrow> y \<sqsubset> x \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> False"
begin
sublocale irreflexive
apply unfold_locales by (auto dest: asym)
lemma antisymmetric_axioms: "antisymmetric A (\<sqsubset>)"
apply unfold_locales by (auto dest: asym)
lemma Restrp_asymmetric: "asymmetric UNIV ((\<sqsubset>)\<restriction>A)"
apply unfold_locales
by (auto dest:asym)
lemma asymmetric_subset: "B \<subseteq> A \<Longrightarrow> asymmetric B (\<sqsubset>)"
apply unfold_locales using asym by auto
end
lemma asymmetric_iff_irreflexive_antisymmetric:
fixes less (infix "\<sqsubset>" 50)
shows "asymmetric A (\<sqsubset>) \<longleftrightarrow> irreflexive A (\<sqsubset>) \<and> antisymmetric A (\<sqsubset>)" (is "?l \<longleftrightarrow> ?r")
proof
assume ?l
then interpret asymmetric.
show ?r by (auto dest: asym)
next
assume ?r
then interpret irreflexive + antisymmetric A "(\<sqsubset>)" by auto
show ?l by (auto intro!:asymmetric.intro dest: antisym irrefl)
qed
lemma asymmetric_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "asymmetric A r \<longleftrightarrow> asymmetric A r'"
by (simp add: asymmetric_iff_irreflexive_antisymmetric r cong: irreflexive_cong antisymmetric_cong)
locale quasi_ordered_set = reflexive + transitive
begin
lemma quasi_ordered_subset: "B \<subseteq> A \<Longrightarrow> quasi_ordered_set B (\<sqsubseteq>)"
apply intro_locales
using reflexive_subset transitive_subset by auto
end
lemma quasi_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "quasi_ordered_set A r \<longleftrightarrow> quasi_ordered_set A r'"
by (simp add: quasi_ordered_set_def r cong: reflexive_cong transitive_cong)
locale near_ordered_set = antisymmetric + transitive
begin
interpretation Restrp: antisymmetric UNIV "(\<sqsubseteq>)\<restriction>A" using Restrp_antisymmetric.
interpretation Restrp: transitive UNIV "(\<sqsubseteq>)\<restriction>A" using Restrp_transitive.
lemma Restrp_near_order: "near_ordered_set UNIV ((\<sqsubseteq>)\<restriction>A)"..
lemma near_ordered_subset: "B \<subseteq> A \<Longrightarrow> near_ordered_set B (\<sqsubseteq>)"
apply intro_locales
using antisymmetric_subset transitive_subset by auto
end
lemma near_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "near_ordered_set A r \<longleftrightarrow> near_ordered_set A r'"
by (simp add: near_ordered_set_def r cong: antisymmetric_cong transitive_cong)
locale pseudo_ordered_set = reflexive + antisymmetric
begin
interpretation less_eq_notations.
lemma sym_eq[simp]: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sim> y \<longleftrightarrow> x = y"
by (auto simp: refl dest: antisym)
lemma extreme_bound_singleton_eq[simp]: "x \<in> A \<Longrightarrow> extreme_bound A (\<sqsubseteq>) {x} y \<longleftrightarrow> x = y"
by (auto intro!: antisym)
lemma eq_iff: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x = y \<longleftrightarrow> x \<sqsubseteq> y \<and> y \<sqsubseteq> x" by (auto dest: antisym simp:refl)
lemma extreme_order_iff_eq: "e \<in> A \<Longrightarrow> extreme {x \<in> A. x \<sqsubseteq> e} (\<sqsubseteq>) s \<longleftrightarrow> e = s"
by (auto intro!: antisym)
lemma pseudo_ordered_subset: "B \<subseteq> A \<Longrightarrow> pseudo_ordered_set B (\<sqsubseteq>)"
apply intro_locales
using reflexive_subset antisymmetric_subset by auto
end
lemma pseudo_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "pseudo_ordered_set A r \<longleftrightarrow> pseudo_ordered_set A r'"
by (simp add: pseudo_ordered_set_def r cong: reflexive_cong antisymmetric_cong)
locale partially_ordered_set = reflexive + antisymmetric + transitive
begin
sublocale pseudo_ordered_set + quasi_ordered_set + near_ordered_set ..
lemma partially_ordered_subset: "B \<subseteq> A \<Longrightarrow> partially_ordered_set B (\<sqsubseteq>)"
apply intro_locales
using reflexive_subset transitive_subset antisymmetric_subset by auto
end
lemma partially_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "partially_ordered_set A r \<longleftrightarrow> partially_ordered_set A r'"
by (simp add: partially_ordered_set_def r cong: reflexive_cong antisymmetric_cong transitive_cong)
locale strict_ordered_set = irreflexive + transitive A "(\<sqsubset>)"
begin
sublocale asymmetric
proof
fix x y
assume x: "x \<in> A" and y: "y \<in> A"
assume xy: "x \<sqsubset> y"
also assume yx: "y \<sqsubset> x"
finally have "x \<sqsubset> x" using x y by auto
with x show False by auto
qed
lemma near_ordered_set_axioms: "near_ordered_set A (\<sqsubset>)"
using antisymmetric_axioms by intro_locales
interpretation Restrp: asymmetric UNIV "(\<sqsubset>)\<restriction>A" using Restrp_asymmetric.
interpretation Restrp: transitive UNIV "(\<sqsubset>)\<restriction>A" using Restrp_transitive.
lemma Restrp_strict_order: "strict_ordered_set UNIV ((\<sqsubset>)\<restriction>A)"..
lemma strict_ordered_subset: "B \<subseteq> A \<Longrightarrow> strict_ordered_set B (\<sqsubset>)"
apply intro_locales
using irreflexive_subset transitive_subset by auto
end
lemma strict_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "strict_ordered_set A r \<longleftrightarrow> strict_ordered_set A r'"
by (simp add: strict_ordered_set_def r cong: irreflexive_cong transitive_cong)
locale tolerance = symmetric + reflexive A "(\<sim>)"
begin
lemma tolerance_subset: "B \<subseteq> A \<Longrightarrow> tolerance B (\<sim>)"
apply intro_locales
using symmetric_subset reflexive_subset by auto
end
lemma tolerance_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "tolerance A r \<longleftrightarrow> tolerance A r'"
by (simp add: tolerance_def r cong: reflexive_cong symmetric_cong)
global_interpretation equiv: tolerance UNIV "equivpartp r"
rewrites "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
by unfold_locales (auto simp:equivpartp_def)
locale partial_equivalence = symmetric +
assumes "transitive A (\<sim>)"
begin
sublocale transitive A "(\<sim>)"
rewrites "sympartp (\<sim>)\<restriction>A \<equiv> (\<sim>)\<restriction>A"
and "sympartp ((\<sim>)\<restriction>A) \<equiv> (\<sim>)\<restriction>A"
using partial_equivalence_axioms
unfolding partial_equivalence_axioms_def partial_equivalence_def
by (auto simp: atomize_eq sym intro!:ext)
lemma partial_equivalence_subset: "B \<subseteq> A \<Longrightarrow> partial_equivalence B (\<sim>)"
apply (intro partial_equivalence.intro partial_equivalence_axioms.intro)
using symmetric_subset transitive_subset by auto
end
lemma partial_equivalence_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "partial_equivalence A r \<longleftrightarrow> partial_equivalence A r'"
by (simp add: partial_equivalence_def partial_equivalence_axioms_def r
cong: transitive_cong symmetric_cong)
locale equivalence = symmetric + reflexive A "(\<sim>)" + transitive A "(\<sim>)"
begin
sublocale tolerance + partial_equivalence + quasi_ordered_set A "(\<sim>)"..
lemma equivalence_subset: "B \<subseteq> A \<Longrightarrow> equivalence B (\<sim>)"
apply (intro equivalence.intro)
using symmetric_subset transitive_subset by auto
end
lemma equivalence_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "equivalence A r \<longleftrightarrow> equivalence A r'"
by (simp add: equivalence_def r cong: reflexive_cong transitive_cong symmetric_cong)
text \<open>Some combinations lead to uninteresting relations.\<close>
context
fixes r :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<bowtie>" 50)
begin
proposition reflexive_irreflexive_is_empty:
assumes r: "reflexive A (\<bowtie>)" and ir: "irreflexive A (\<bowtie>)"
shows "A = {}"
proof (rule ccontr)
interpret irreflexive A "(\<bowtie>)" using ir.
interpret reflexive A "(\<bowtie>)" using r.
assume "A \<noteq> {}"
then obtain a where a: "a \<in> A" by auto
from a refl have "a \<bowtie> a" by auto
with irrefl a show False by auto
qed
proposition symmetric_antisymmetric_imp_eq:
assumes s: "symmetric A (\<bowtie>)" and as: "antisymmetric A (\<bowtie>)"
shows "(\<bowtie>)\<restriction>A \<le> (=)"
proof-
interpret symmetric A "(\<bowtie>)" + antisymmetric A "(\<bowtie>)" using assms by auto
show "?thesis" using antisym by (auto dest: sym)
qed
proposition nontolerance:
shows "irreflexive A (\<bowtie>) \<and> symmetric A (\<bowtie>) \<longleftrightarrow> tolerance A (\<lambda>x y. \<not> x \<bowtie> y)"
proof (intro iffI conjI, elim conjE)
assume "irreflexive A (\<bowtie>)" and "symmetric A (\<bowtie>)"
then interpret irreflexive A "(\<bowtie>)" + symmetric A "(\<bowtie>)".
show "tolerance A (\<lambda>x y. \<not> x \<bowtie> y)" by (unfold_locales, auto dest: sym irrefl)
next
assume "tolerance A (\<lambda>x y. \<not> x \<bowtie> y)"
then interpret tolerance A "\<lambda>x y. \<not> x \<bowtie> y".
show "irreflexive A (\<bowtie>)" by (auto simp: eq_implies)
show "symmetric A (\<bowtie>)" using sym by auto
qed
proposition irreflexive_transitive_symmetric_is_empty:
assumes irr: "irreflexive A (\<bowtie>)" and tr: "transitive A (\<bowtie>)" and sym: "symmetric A (\<bowtie>)"
shows "(\<bowtie>)\<restriction>A = bot"
proof (intro ext, unfold bot_fun_def bot_bool_def eq_False, rule notI, erule RestrpE)
interpret strict_ordered_set A "(\<bowtie>)" using assms by (unfold strict_ordered_set_def, auto)
interpret symmetric A "(\<bowtie>)" using assms by auto
fix x y assume x: "x \<in> A" and y: "y \<in> A"
assume xy: "x \<bowtie> y"
also note sym[OF xy x y]
finally have "x \<bowtie> x" using x y by auto
with x show False by auto
qed
end
subsection \<open>Totality\<close>
locale semiconnex = related_set A "(\<sqsubset>)" for A and less (infix "\<sqsubset>" 50) +
assumes semiconnex: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sqsubset> y \<or> x = y \<or> y \<sqsubset> x"
begin
lemma cases[consumes 2, case_names less eq greater]:
assumes "x \<in> A" and "y \<in> A" and "x \<sqsubset> y \<Longrightarrow> P" and "x = y \<Longrightarrow> P" and "y \<sqsubset> x \<Longrightarrow> P"
shows "P" using semiconnex assms by auto
lemma neqE:
assumes "x \<in> A" and "y \<in> A"
shows "x \<noteq> y \<Longrightarrow> (x \<sqsubset> y \<Longrightarrow> P) \<Longrightarrow> (y \<sqsubset> x \<Longrightarrow> P) \<Longrightarrow> P"
by (cases rule: cases[OF assms], auto)
lemma semiconnex_subset: "B \<subseteq> A \<Longrightarrow> semiconnex B (\<sqsubset>)"
apply (intro semiconnex.intro)
using semiconnex by auto
end
declare semiconnex.intro[intro]
text \<open>Totality is negated antisymmetry \<^cite>\<open>\<open>Proposition 2.2.4\<close> in "Schmidt1993"\<close>.\<close>
proposition semiconnex_iff_neg_antisymmetric:
fixes less (infix "\<sqsubset>" 50)
shows "semiconnex A (\<sqsubset>) \<longleftrightarrow> antisymmetric A (\<lambda>x y. \<not> x \<sqsubset> y)" (is "?l \<longleftrightarrow> ?r")
proof (intro iffI semiconnex.intro antisymmetric.intro)
assume ?l
then interpret semiconnex.
fix x y
assume "x \<in> A" "y \<in> A" "\<not> x \<sqsubset> y" and "\<not> y \<sqsubset> x"
then show "x = y" by (cases rule: cases, auto)
next
assume ?r
then interpret neg: antisymmetric A "(\<lambda>x y. \<not> x \<sqsubset> y)".
fix x y
show "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sqsubset> y \<or> x = y \<or> y \<sqsubset> x" using neg.antisym by auto
qed
lemma semiconnex_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "semiconnex A r \<longleftrightarrow> semiconnex A r'"
by (simp add: semiconnex_iff_neg_antisymmetric r cong: antisymmetric_cong)
locale semiconnex_irreflexive = semiconnex + irreflexive
begin
lemma neq_iff: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<noteq> y \<longleftrightarrow> x \<sqsubset> y \<or> y \<sqsubset> x" by (auto elim:neqE dest: irrefl)
lemma semiconnex_irreflexive_subset: "B \<subseteq> A \<Longrightarrow> semiconnex_irreflexive B (\<sqsubset>)"
apply (intro semiconnex_irreflexive.intro)
using semiconnex_subset irreflexive_subset by auto
end
lemma semiconnex_irreflexive_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "semiconnex_irreflexive A r \<longleftrightarrow> semiconnex_irreflexive A r'"
by (simp add: semiconnex_irreflexive_def r cong: semiconnex_cong irreflexive_cong)
locale connex = related_set +
assumes comparable: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sqsubseteq> y \<or> y \<sqsubseteq> x"
begin
interpretation less_eq_notations.
sublocale reflexive apply unfold_locales using comparable by auto
lemma comparable_cases[consumes 2, case_names le ge]:
assumes "x \<in> A" and "y \<in> A" and "x \<sqsubseteq> y \<Longrightarrow> P" and "y \<sqsubseteq> x \<Longrightarrow> P" shows "P"
using assms comparable by auto
lemma comparable_three_cases[consumes 2, case_names less eq greater]:
assumes "x \<in> A" and "y \<in> A" and "x \<sqsubset> y \<Longrightarrow> P" and "x \<sim> y \<Longrightarrow> P" and "y \<sqsubset> x \<Longrightarrow> P" shows "P"
using assms comparable by auto
lemma
assumes x: "x \<in> A" and y: "y \<in> A"
shows not_iff_asym: "\<not>x \<sqsubseteq> y \<longleftrightarrow> y \<sqsubset> x"
and not_asym_iff[simp]: "\<not>x \<sqsubset> y \<longleftrightarrow> y \<sqsubseteq> x"
using comparable[OF x y] by auto
lemma connex_subset: "B \<subseteq> A \<Longrightarrow> connex B (\<sqsubseteq>)"
by (intro connex.intro comparable, auto)
end
lemmas connexE = connex.comparable_cases
lemmas connexI[intro] = connex.intro
context
fixes less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
lemma connex_iff_semiconnex_reflexive: "connex A (\<sqsubseteq>) \<longleftrightarrow> semiconnex A (\<sqsubseteq>) \<and> reflexive A (\<sqsubseteq>)"
(is "?c \<longleftrightarrow> ?t \<and> ?r")
proof (intro iffI conjI; (elim conjE)?)
assume ?c then interpret connex.
show ?t apply unfold_locales using comparable by auto
show ?r by unfold_locales
next
assume ?t then interpret semiconnex A "(\<sqsubseteq>)".
assume ?r then interpret reflexive.
from semiconnex show ?c by auto
qed
lemma chain_connect: "Complete_Partial_Order.chain r A \<equiv> connex A r"
by (auto intro!: ext simp: atomize_eq connex_def Complete_Partial_Order.chain_def)
lemma connex_union:
assumes "connex X (\<sqsubseteq>)" and "connex Y (\<sqsubseteq>)" and "\<forall>x \<in> X. \<forall>y \<in> Y. x \<sqsubseteq> y \<or> y \<sqsubseteq> x"
shows "connex (X\<union>Y) (\<sqsubseteq>)"
using assms by (auto simp: connex_def)
end
lemma connex_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "connex A r \<longleftrightarrow> connex A r'"
by (simp add: connex_iff_semiconnex_reflexive r cong: semiconnex_cong reflexive_cong)
locale total_pseudo_ordered_set = connex + antisymmetric
begin
sublocale pseudo_ordered_set ..
lemma not_weak_iff:
assumes x: "x \<in> A" and y: "y \<in> A" shows "\<not> y \<sqsubseteq> x \<longleftrightarrow> x \<sqsubseteq> y \<and> x \<noteq> y"
using x y by (cases rule: comparable_cases, auto intro:antisym)
lemma total_pseudo_ordered_subset: "B \<subseteq> A \<Longrightarrow> total_pseudo_ordered_set B (\<sqsubseteq>)"
apply (intro_locales)
using antisymmetric_subset connex_subset by auto
end
lemma total_pseudo_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "total_pseudo_ordered_set A r \<longleftrightarrow> total_pseudo_ordered_set A r'"
by (simp add: total_pseudo_ordered_set_def r cong: connex_cong antisymmetric_cong)
locale total_quasi_ordered_set = connex + transitive
begin
sublocale quasi_ordered_set ..
lemma total_quasi_ordered_subset: "B \<subseteq> A \<Longrightarrow> total_quasi_ordered_set B (\<sqsubseteq>)"
using transitive_subset connex_subset by intro_locales
end
lemma total_quasi_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "total_quasi_ordered_set A r \<longleftrightarrow> total_quasi_ordered_set A r'"
by (simp add: total_quasi_ordered_set_def r cong: connex_cong transitive_cong)
locale total_ordered_set = total_quasi_ordered_set + antisymmetric
begin
sublocale partially_ordered_set + total_pseudo_ordered_set ..
lemma total_ordered_subset: "B \<subseteq> A \<Longrightarrow> total_ordered_set B (\<sqsubseteq>)"
using total_quasi_ordered_subset antisymmetric_subset by (intro total_ordered_set.intro)
end
lemma total_ordered_set_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
shows "total_ordered_set A r \<longleftrightarrow> total_ordered_set A r'"
by (simp add: total_ordered_set_def r cong: total_quasi_ordered_set_cong antisymmetric_cong)
subsection \<open>Well-Foundedness\<close>
locale well_founded = related_set A "(\<sqsubset>)" for A and less (infix "\<sqsubset>" 50) +
assumes induct[consumes 1, case_names less, induct set]:
"a \<in> A \<Longrightarrow> (\<And>x. x \<in> A \<Longrightarrow> (\<And>y. y \<in> A \<Longrightarrow> y \<sqsubset> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P a"
begin
sublocale asymmetric
proof (intro asymmetric.intro notI)
fix x y
assume xA: "x \<in> A"
then show "y \<in> A \<Longrightarrow> x \<sqsubset> y \<Longrightarrow> y \<sqsubset> x \<Longrightarrow> False"
by (induct arbitrary: y rule: induct, auto)
qed
lemma prefixed_Imagep_imp_empty:
assumes a: "X \<subseteq> ((\<sqsubset>) ``` X) \<inter> A" shows "X = {}"
proof -
from a have XA: "X \<subseteq> A" by auto
have "x \<in> A \<Longrightarrow> x \<notin> X" for x
proof (induct x rule: induct)
case (less x)
with a show ?case by (auto simp: Imagep_def)
qed
with XA show ?thesis by auto
qed
lemma nonempty_imp_ex_extremal:
assumes QA: "Q \<subseteq> A" and Q: "Q \<noteq> {}"
shows "\<exists>z \<in> Q. \<forall>y \<in> Q. \<not> y \<sqsubset> z"
using Q prefixed_Imagep_imp_empty[of Q] QA by (auto simp: Imagep_def)
interpretation Restrp: well_founded UNIV "(\<sqsubset>)\<restriction>A"
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "(\<sqsubset>)\<restriction>A\<restriction>UNIV = (\<sqsubset>)\<restriction>A"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
proof -
have "(\<And>x. (\<And>y. ((\<sqsubset>) \<restriction> A) y x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P a" for a P
using induct[of a P] by (auto simp: Restrp_def)
then show "well_founded UNIV ((\<sqsubset>)\<restriction>A)" apply unfold_locales by auto
qed auto
lemmas Restrp_well_founded = Restrp.well_founded_axioms
lemmas Restrp_induct[consumes 0, case_names less] = Restrp.induct
interpretation Restrp.tranclp: well_founded UNIV "((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+"
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+ \<restriction> UNIV = ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+"
and "(((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+)\<^sup>+\<^sup>+ = ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
proof-
{ fix P x
assume induct_step: "\<And>x. (\<And>y. ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+ y x \<Longrightarrow> P y) \<Longrightarrow> P x"
have "P x"
proof (rule induct_step)
show "\<And>y. ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+ y x \<Longrightarrow> P y"
proof (induct x rule: Restrp_induct)
case (less x)
from \<open>((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+ y x\<close>
show ?case
proof (cases rule: tranclp.cases)
case r_into_trancl
with induct_step less show ?thesis by auto
next
case (trancl_into_trancl b)
with less show ?thesis by auto
qed
qed
qed
}
then show "well_founded UNIV ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+" by unfold_locales auto
qed auto
lemmas Restrp_tranclp_well_founded = Restrp.tranclp.well_founded_axioms
lemmas Restrp_tranclp_induct[consumes 0, case_names less] = Restrp.tranclp.induct
end
context
fixes A :: "'a set" and less :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubset>" 50)
begin
lemma well_foundedI_pf:
assumes pre: "\<And>X. X \<subseteq> A \<Longrightarrow> X \<subseteq> ((\<sqsubset>) ``` X) \<inter> A \<Longrightarrow> X = {}"
shows "well_founded A (\<sqsubset>)"
proof
fix P a assume aA: "a \<in> A" and Ind: "\<And>x. x \<in> A \<Longrightarrow> (\<And>y. y \<in> A \<Longrightarrow> y \<sqsubset> x \<Longrightarrow> P y) \<Longrightarrow> P x"
from Ind have "{a\<in>A. \<not>P a} \<subseteq> ((\<sqsubset>) ``` {a\<in>A. \<not>P a}) \<inter> A" by (auto simp: Imagep_def)
from pre[OF _ this] aA
show "P a" by auto
qed
lemma well_foundedI_extremal:
assumes a: "\<And>X. X \<subseteq> A \<Longrightarrow> X \<noteq> {} \<Longrightarrow> \<exists>x \<in> X. \<forall>y \<in> X. \<not> y \<sqsubset> x"
shows "well_founded A (\<sqsubset>)"
proof (rule well_foundedI_pf)
fix X assume XA: "X \<subseteq> A" and pf: "X \<subseteq> ((\<sqsubset>) ``` X) \<inter> A"
from a[OF XA] pf show "X = {}" by (auto simp: Imagep_def)
qed
lemma well_founded_iff_ex_extremal:
"well_founded A (\<sqsubset>) \<longleftrightarrow> (\<forall>X \<subseteq> A. X \<noteq> {} \<longrightarrow> (\<exists>x \<in> X. \<forall>z \<in> X. \<not> z \<sqsubset> x))"
using well_founded.nonempty_imp_ex_extremal well_foundedI_extremal by blast
end
lemma well_founded_cong:
assumes r: "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> r a b \<longleftrightarrow> r' a b"
and A: "\<And>a b. r' a b \<Longrightarrow> a \<in> A \<longleftrightarrow> a \<in> A'"
and B: "\<And>a b. r' a b \<Longrightarrow> b \<in> A \<longleftrightarrow> b \<in> A'"
shows "well_founded A r \<longleftrightarrow> well_founded A' r'"
proof (intro iffI)
assume wf: "well_founded A r"
show "well_founded A' r'"
proof (intro well_foundedI_extremal)
fix X
assume X: "X \<subseteq> A'" and X0: "X \<noteq> {}"
show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> r' y x"
proof (cases "X \<inter> A = {}")
case True
from X0 obtain x where xX: "x \<in> X" by auto
with True have "x \<notin> A" by auto
with xX X have "\<forall>y\<in>X. \<not> r' y x" by (auto simp: B)
with xX show ?thesis by auto
next
case False
from well_founded.nonempty_imp_ex_extremal[OF wf _ this]
obtain x where x: "x \<in> X \<inter> A" and Ar: "\<And>y. y \<in> X \<Longrightarrow> y \<in> A \<Longrightarrow> \<not> r y x" by auto
have "\<forall>y \<in> X. \<not> r' y x"
proof (intro ballI notI)
fix y assume yX: "y \<in> X" and yx: "r' y x"
from yX X have yA': "y \<in> A'" by auto
show False
proof (cases "y \<in> A")
case True with x Ar[OF yX] yx r show ?thesis by auto
next
case False with yA' x A[OF yx] r X show ?thesis by (auto simp:)
qed
qed
with x show "\<exists>x \<in> X. \<forall>y \<in> X. \<not> r' y x" by auto
qed
qed
next
assume wf: "well_founded A' r'"
show "well_founded A r"
proof (intro well_foundedI_extremal)
fix X
assume X: "X \<subseteq> A" and X0: "X \<noteq> {}"
show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> r y x"
proof (cases "X \<inter> A' = {}")
case True
from X0 obtain x where xX: "x \<in> X" by auto
with True have "x \<notin> A'" by auto
with xX X B have "\<forall>y\<in>X. \<not> r y x" by (auto simp: r in_mono)
with xX show ?thesis by auto
next
case False
from well_founded.nonempty_imp_ex_extremal[OF wf _ this]
obtain x where x: "x \<in> X \<inter> A'" and Ar: "\<And>y. y \<in> X \<Longrightarrow> y \<in> A' \<Longrightarrow> \<not> r' y x" by auto
have "\<forall>y \<in> X. \<not> r y x"
proof (intro ballI notI)
fix y assume yX: "y \<in> X" and yx: "r y x"
from yX X have y: "y \<in> A" by auto
show False
proof (cases "y \<in> A'")
case True with x Ar[OF yX] yx r X y show ?thesis by auto
next
case False with y x A yx r X show ?thesis by auto
qed
qed
with x show "\<exists>x \<in> X. \<forall>y \<in> X. \<not> r y x" by auto
qed
qed
qed
lemma wfP_iff_well_founded_UNIV: "wfP r \<longleftrightarrow> well_founded UNIV r"
by (auto simp: wfP_def wf_def well_founded_def)
lemma well_founded_singleton:
assumes "\<not>r x x" shows "well_founded {x} r"
using assms by (auto simp: well_founded_iff_ex_extremal)
lemma well_founded_Restrp[simp]: "well_founded A (r\<restriction>B) \<longleftrightarrow> well_founded (A\<inter>B) r" (is "?l \<longleftrightarrow> ?r")
proof (intro iffI well_foundedI_extremal)
assume l: ?l
fix X assume XAB: "X \<subseteq> A \<inter> B" and X0: "X \<noteq> {}"
with l[THEN well_founded.nonempty_imp_ex_extremal]
have "\<exists>x\<in>X. \<forall>z\<in>X. \<not> (r \<restriction> B) z x" by auto
with XAB show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> r y x" by (auto simp: Restrp_def)
next
assume r: ?r
fix X assume XA: "X \<subseteq> A" and X0: "X \<noteq> {}"
show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> (r \<restriction> B) y x"
proof (cases "X \<subseteq> B")
case True
with r[THEN well_founded.nonempty_imp_ex_extremal, of X] XA X0
have "\<exists>z\<in>X. \<forall>y\<in>X. \<not> r y z" by auto
then show ?thesis by auto
next
case False
then obtain x where x: "x \<in> X - B" by auto
then have "\<forall>y\<in>X. \<not> (r \<restriction> B) y x" by auto
with x show ?thesis by auto
qed
qed
lemma (in well_founded) well_founded_subset:
assumes "B \<subseteq> A" shows "well_founded B (\<sqsubset>)"
using assms well_founded_axioms by (auto simp: well_founded_iff_ex_extremal)
lemma well_founded_extend:
fixes less (infix "\<sqsubset>" 50)
assumes A: "well_founded A (\<sqsubset>)"
assumes B: "well_founded B (\<sqsubset>)"
assumes AB: "\<forall>a \<in> A. \<forall>b \<in> B. \<not>b \<sqsubset> a"
shows "well_founded (A \<union> B) (\<sqsubset>)"
proof (intro well_foundedI_extremal)
interpret A: well_founded A "(\<sqsubset>)" using A.
interpret B: well_founded B "(\<sqsubset>)" using B.
fix X assume XAB: "X \<subseteq> A \<union> B" and X0: "X \<noteq> {}"
show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> y \<sqsubset> x"
proof (cases "X \<inter> A = {}")
case True
with XAB have XB: "X \<subseteq> B" by auto
from B.nonempty_imp_ex_extremal[OF XB X0] show ?thesis.
next
case False
with A.nonempty_imp_ex_extremal[OF _ this]
obtain e where XAe: "e \<in> X \<inter> A" "\<forall>y\<in>X \<inter> A. \<not> y \<sqsubset> e" by auto
then have eX: "e \<in> X" and eA: "e \<in> A" by auto
{ fix x assume xX: "x \<in> X"
have "\<not>x \<sqsubset> e"
proof (cases "x \<in> A")
case True with XAe xX show ?thesis by auto
next
case False
with xX XAB have "x \<in> B" by auto
with AB eA show ?thesis by auto
qed
}
with eX show ?thesis by auto
qed
qed
lemma closed_UN_well_founded:
fixes r (infix "\<sqsubset>" 50)
assumes XX: "\<forall>X\<in>XX. well_founded X (\<sqsubset>) \<and> (\<forall>x\<in>X. \<forall>y\<in>\<Union>XX. y \<sqsubset> x \<longrightarrow> y \<in> X)"
shows "well_founded (\<Union>XX) (\<sqsubset>)"
proof (intro well_foundedI_extremal)
have *: "X \<in> XX \<Longrightarrow> x\<in>X \<Longrightarrow> y \<in> \<Union>XX \<Longrightarrow> y \<sqsubset> x \<Longrightarrow> y \<in> X" for X x y using XX by blast
fix S
assume S: "S \<subseteq> \<Union>XX" and S0: "S \<noteq> {}"
from S0 obtain x where xS: "x \<in> S" by auto
with S obtain X where X: "X \<in> XX" and xX: "x \<in> X" by auto
from xS xX have Sx0: "S \<inter> X \<noteq> {}" by auto
from X XX interpret well_founded X "(\<sqsubset>)" by auto
from nonempty_imp_ex_extremal[OF _ Sx0]
obtain z where zS: "z \<in> S" and zX: "z \<in> X" and min: "\<forall>y \<in> S \<inter> X. \<not> y \<sqsubset> z" by auto
show "\<exists>x\<in>S. \<forall>y\<in>S. \<not> y \<sqsubset> x"
proof (intro bexI[OF _ zS] ballI notI)
fix y
assume yS: "y \<in> S" and yz: "y \<sqsubset> z"
have yXX: "y \<in> \<Union> XX" using S yS by auto
from *[OF X zX yXX yz] yS have "y \<in> X \<inter> S" by auto
with min yz show False by auto
qed
qed
lemma well_founded_cmono:
assumes r': "r' \<le> r" and wf: "well_founded A r"
shows "well_founded A r'"
proof (intro well_foundedI_extremal)
fix X assume "X \<subseteq> A" and "X \<noteq> {}"
from well_founded.nonempty_imp_ex_extremal[OF wf this]
show "\<exists>x\<in>X. \<forall>y\<in>X. \<not> r' y x" using r' by auto
qed
locale well_founded_ordered_set = well_founded + transitive _ "(\<sqsubset>)"
begin
sublocale strict_ordered_set..
interpretation Restrp: strict_ordered_set UNIV "(\<sqsubset>)\<restriction>A" + Restrp: well_founded UNIV "(\<sqsubset>)\<restriction>A"
using Restrp_strict_order Restrp_well_founded .
lemma Restrp_well_founded_order: "well_founded_ordered_set UNIV ((\<sqsubset>)\<restriction>A)"..
lemma well_founded_ordered_subset: "B \<subseteq> A \<Longrightarrow> well_founded_ordered_set B (\<sqsubset>)"
apply intro_locales
using well_founded_subset transitive_subset by auto
end
lemma (in well_founded) Restrp_tranclp_well_founded_ordered: "well_founded_ordered_set UNIV ((\<sqsubset>)\<restriction>A)\<^sup>+\<^sup>+"
using Restrp_tranclp_well_founded tranclp_transitive by intro_locales
locale well_related_set = related_set +
assumes nonempty_imp_ex_extreme: "X \<subseteq> A \<Longrightarrow> X \<noteq> {} \<Longrightarrow> \<exists>e. extreme X (\<sqsubseteq>)\<^sup>- e"
begin
sublocale connex
proof
fix x y assume "x \<in> A" and "y \<in> A"
with nonempty_imp_ex_extreme[of "{x,y}"] show "x \<sqsubseteq> y \<or> y \<sqsubseteq> x" by auto
qed
lemmas connex_axioms = connex_axioms
interpretation less_eq_notations.
sublocale asym: well_founded A "(\<sqsubset>)"
proof (unfold well_founded_iff_ex_extremal, intro allI impI)
fix X
assume XA: "X \<subseteq> A" and X0: "X \<noteq> {}"
from nonempty_imp_ex_extreme[OF XA X0] obtain e where "extreme X (\<sqsubseteq>)\<^sup>- e" by auto
then show "\<exists>x\<in>X. \<forall>z\<in>X. \<not>z \<sqsubset> x" by (auto intro!: bexI[of _ e])
qed
lemma well_related_subset: "B \<subseteq> A \<Longrightarrow> well_related_set B (\<sqsubseteq>)"
by (auto intro!: well_related_set.intro nonempty_imp_ex_extreme)
end
context
fixes less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
lemma well_related_iff_neg_well_founded:
"well_related_set A (\<sqsubseteq>) \<longleftrightarrow> well_founded A (\<lambda>x y. \<not> y \<sqsubseteq> x)"
by (simp add: well_related_set_def well_founded_iff_ex_extremal extreme_def Bex_def)
lemma well_related_singleton_refl:
assumes "x \<sqsubseteq> x" shows "well_related_set {x} (\<sqsubseteq>)"
by (intro well_related_set.intro exI[of _ x], auto simp: subset_singleton_iff assms)
lemma closed_UN_well_related:
assumes XX: "\<forall>X\<in>XX. well_related_set X (\<sqsubseteq>) \<and> (\<forall>x\<in>X. \<forall>y\<in>\<Union>XX. \<not>x \<sqsubseteq> y \<longrightarrow> y \<in> X)"
shows "well_related_set (\<Union>XX) (\<sqsubseteq>)"
using XX
apply (unfold well_related_iff_neg_well_founded)
using closed_UN_well_founded[of _ "\<lambda>x y. \<not> y \<sqsubseteq> x"].
end
lemma well_related_extend:
fixes r (infix "\<sqsubseteq>" 50)
assumes "well_related_set A (\<sqsubseteq>)" and "well_related_set B (\<sqsubseteq>)" and "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b"
shows "well_related_set (A \<union> B) (\<sqsubseteq>)"
using well_founded_extend[of _ "\<lambda>x y. \<not> y \<sqsubseteq> x", folded well_related_iff_neg_well_founded]
using assms by auto
locale pre_well_ordered_set = semiattractive + well_related_set
begin
interpretation less_eq_notations.
sublocale transitive
proof
fix x y z assume xy: "x \<sqsubseteq> y" and yz: "y \<sqsubseteq> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
from x y z have "\<exists>e. extreme {x,y,z} (\<sqsupseteq>) e" (is "\<exists>e. ?P e") by (auto intro!: nonempty_imp_ex_extreme)
then have "?P x \<or> ?P y \<or> ?P z" by auto
then show "x \<sqsubseteq> z"
proof (elim disjE)
assume "?P x"
then show ?thesis by auto
next
assume "?P y"
then have "y \<sqsubseteq> x" by auto
from attract[OF xy this yz] x y z show ?thesis by auto
next
assume "?P z"
then have zx: "z \<sqsubseteq> x" and zy: "z \<sqsubseteq> y" by auto
from attract[OF yz zy zx] x y z have yx: "y \<sqsubseteq> x" by auto
from attract[OF xy yx yz] x y z show ?thesis by auto
qed
qed
sublocale total_quasi_ordered_set..
lemmas connex_axioms = connex_axioms
lemma strict_weak_trans[trans]:
assumes xy: "x \<sqsubset> y" and yz: "y \<sqsubseteq> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubset> z"
proof (intro asympartpI notI)
from trans xy yz x y z show "x \<sqsubseteq> z" by auto
assume "z \<sqsubseteq> x"
from trans[OF yz this] y z x have "y \<sqsubseteq> x" by auto
with xy show False by auto
qed
lemma weak_strict_trans[trans]:
assumes xy: "x \<sqsubseteq> y" and yz: "y \<sqsubset> z" and x: "x \<in> A" and y: "y \<in> A" and z: "z \<in> A"
shows "x \<sqsubset> z"
proof (intro asympartpI notI)
from trans xy yz x y z show "x \<sqsubseteq> z" by auto
assume "z \<sqsubseteq> x"
from trans[OF this xy] z x y have "z \<sqsubseteq> y" by auto
with yz show False by auto
qed
end
lemma pre_well_ordered_iff:
"pre_well_ordered_set A r \<longleftrightarrow> total_quasi_ordered_set A r \<and> well_founded A (asympartp r)"
(is "?p \<longleftrightarrow> ?t \<and> ?w")
proof safe
assume ?p
then interpret pre_well_ordered_set A r.
show ?t ?w by unfold_locales
next
assume ?t
then interpret total_quasi_ordered_set A r.
assume ?w
then have "well_founded UNIV (asympartp r \<restriction> A)" by simp
also have "asympartp r \<restriction> A = (\<lambda>x y. \<not> r y x) \<restriction> A" by (intro ext, auto simp: not_iff_asym)
finally have "well_related_set A r" by (simp add: well_related_iff_neg_well_founded)
then show ?p by intro_locales
qed
lemma (in semiattractive) pre_well_ordered_iff_well_related:
assumes XA: "X \<subseteq> A"
shows "pre_well_ordered_set X (\<sqsubseteq>) \<longleftrightarrow> well_related_set X (\<sqsubseteq>)" (is "?l \<longleftrightarrow> ?r")
proof
interpret X: semiattractive X using semiattractive_subset[OF XA].
{ assume ?l
then interpret X: pre_well_ordered_set X.
show ?r by unfold_locales
}
assume ?r
then interpret X: well_related_set X.
show ?l by unfold_locales
qed
lemma semiattractive_extend:
fixes r (infix "\<sqsubseteq>" 50)
assumes A: "semiattractive A (\<sqsubseteq>)" and B: "semiattractive B (\<sqsubseteq>)"
and AB: "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b \<and> \<not> b \<sqsubseteq> a"
shows "semiattractive (A \<union> B) (\<sqsubseteq>)"
proof-
interpret A: semiattractive A "(\<sqsubseteq>)" using A.
interpret B: semiattractive B "(\<sqsubseteq>)" using B.
{
fix x y z
assume yB: "y \<in> B" and zA: "z \<in> A" and yz: "y \<sqsubseteq> z"
have False using AB[rule_format, OF zA yB] yz by auto
}
note * = this
show ?thesis
by (auto intro!: semiattractive.intro dest:* AB[rule_format] A.attract B.attract)
qed
lemma pre_well_order_extend:
fixes r (infix "\<sqsubseteq>" 50)
assumes A: "pre_well_ordered_set A (\<sqsubseteq>)" and B: "pre_well_ordered_set B (\<sqsubseteq>)"
and AB: "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b \<and> \<not> b \<sqsubseteq> a"
shows "pre_well_ordered_set (A\<union>B) (\<sqsubseteq>)"
proof-
interpret A: pre_well_ordered_set A "(\<sqsubseteq>)" using A.
interpret B: pre_well_ordered_set B "(\<sqsubseteq>)" using B.
show ?thesis
apply (intro pre_well_ordered_set.intro well_related_extend semiattractive_extend)
apply unfold_locales
by (auto dest: AB[rule_format])
qed
locale well_ordered_set = antisymmetric + well_related_set
begin
sublocale pre_well_ordered_set..
sublocale total_ordered_set..
lemma well_ordered_subset: "B \<subseteq> A \<Longrightarrow> well_ordered_set B (\<sqsubseteq>)"
using well_related_subset antisymmetric_subset by (intro well_ordered_set.intro)
lemmas connex_axioms = connex_axioms
end
lemma (in antisymmetric) well_ordered_iff_well_related:
assumes XA: "X \<subseteq> A"
shows "well_ordered_set X (\<sqsubseteq>) \<longleftrightarrow> well_related_set X (\<sqsubseteq>)" (is "?l \<longleftrightarrow> ?r")
proof
interpret X: antisymmetric X using antisymmetric_subset[OF XA].
{ assume ?l
then interpret X: well_ordered_set X.
show ?r by unfold_locales
}
assume ?r
then interpret X: well_related_set X.
show ?l by unfold_locales
qed
context
fixes A and less_eq (infix "\<sqsubseteq>" 50)
assumes A: "\<forall>a \<in> A. \<forall>b \<in> A. a \<sqsubseteq> b"
begin
interpretation well_related_set A "(\<sqsubseteq>)"
apply unfold_locales
using A by blast
lemmas trivial_well_related = well_related_set_axioms
lemma trivial_pre_well_order: "pre_well_ordered_set A (\<sqsubseteq>)"
apply unfold_locales
using A by blast
end
context
fixes less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
interpretation less_eq_notations.
lemma well_order_extend:
assumes A: "well_ordered_set A (\<sqsubseteq>)" and B: "well_ordered_set B (\<sqsubseteq>)"
and ABa: "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b \<longrightarrow> b \<sqsubseteq> a \<longrightarrow> a = b"
and AB: "\<forall>a \<in> A. \<forall>b \<in> B. a \<sqsubseteq> b"
shows "well_ordered_set (A\<union>B) (\<sqsubseteq>)"
proof-
interpret A: well_ordered_set A "(\<sqsubseteq>)" using A.
interpret B: well_ordered_set B "(\<sqsubseteq>)" using B.
show ?thesis
apply (intro well_ordered_set.intro antisymmetric_union well_related_extend ABa AB)
by unfold_locales
qed
interpretation singleton: antisymmetric "{a}" "(\<sqsubseteq>)" for a apply unfold_locales by auto
lemmas singleton_antisymmetric[intro!] = singleton.antisymmetric_axioms
lemma singleton_well_ordered[intro!]: "a \<sqsubseteq> a \<Longrightarrow> well_ordered_set {a} (\<sqsubseteq>)"
apply unfold_locales by auto
lemma closed_UN_well_ordered:
assumes anti: "antisymmetric (\<Union> XX) (\<sqsubseteq>)"
and XX: "\<forall>X\<in>XX. well_ordered_set X (\<sqsubseteq>) \<and> (\<forall>x\<in>X. \<forall>y\<in>\<Union>XX. \<not> x \<sqsubseteq> y \<longrightarrow> y \<in> X)"
shows "well_ordered_set (\<Union>XX) (\<sqsubseteq>)"
apply (intro well_ordered_set.intro closed_UN_well_related anti)
using XX well_ordered_set.axioms by fast
end
text \<open>Directed sets:\<close>
definition "directed A r \<equiv> \<forall>x \<in> A. \<forall>y \<in> A. \<exists>z \<in> A. r x z \<and> r y z"
lemmas directedI[intro] = directed_def[unfolded atomize_eq, THEN iffD2, rule_format]
lemmas directedD = directed_def[unfolded atomize_eq, THEN iffD1, rule_format]
context
fixes less_eq :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
lemma directedE:
assumes "directed A (\<sqsubseteq>)" and "x \<in> A" and "y \<in> A"
and "\<And>z. z \<in> A \<Longrightarrow> x \<sqsubseteq> z \<Longrightarrow> y \<sqsubseteq> z \<Longrightarrow> thesis"
shows "thesis"
using assms by (auto dest: directedD)
lemma directed_empty[simp]: "directed {} (\<sqsubseteq>)" by auto
lemma directed_union:
assumes dX: "directed X (\<sqsubseteq>)" and dY: "directed Y (\<sqsubseteq>)"
and XY: "\<forall>x\<in>X. \<forall>y\<in>Y. \<exists>z \<in> X \<union> Y. x \<sqsubseteq> z \<and> y \<sqsubseteq> z"
shows "directed (X \<union> Y) (\<sqsubseteq>)"
using directedD[OF dX] directedD[OF dY] XY
apply (intro directedI) by blast
lemma directed_extend:
assumes X: "directed X (\<sqsubseteq>)" and Y: "directed Y (\<sqsubseteq>)" and XY: "\<forall>x\<in>X. \<forall>y\<in>Y. x \<sqsubseteq> y"
shows "directed (X \<union> Y) (\<sqsubseteq>)"
proof -
{ fix x y
assume xX: "x \<in> X" and yY: "y \<in> Y"
let ?g = "\<exists>z\<in>X \<union> Y. x \<sqsubseteq> z \<and> y \<sqsubseteq> z"
from directedD[OF Y yY yY] obtain z where zY: "z \<in> Y" and yz: "y \<sqsubseteq> z" by auto
from xX XY zY yz have ?g by auto
}
then show ?thesis by (auto intro!: directed_union[OF X Y])
qed
end
context connex begin
lemma directed: "directed A (\<sqsubseteq>)"
proof
fix x y
assume x: "x \<in> A" and y: "y \<in> A"
then show "\<exists>z\<in>A. x \<sqsubseteq> z \<and> y \<sqsubseteq> z"
proof (cases rule: comparable_cases)
case le
with refl[OF y] y show ?thesis by (intro bexI[of _ y], auto)
next
case ge
with refl[OF x] x show ?thesis by (intro bexI[of _ x], auto)
qed
qed
end
context
fixes ir :: "'i \<Rightarrow> 'i \<Rightarrow> bool" (infix "\<preceq>" 50)
fixes r :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<sqsubseteq>" 50)
begin
lemma monotone_connex_image:
assumes mono: "monotone_on I (\<preceq>) (\<sqsubseteq>) f"
assumes connex: "connex I (\<preceq>)"
shows "connex (f ` I) (\<sqsubseteq>)"
proof (rule connexI)
fix x y
assume "x \<in> f ` I" and "y \<in> f ` I"
then obtain i j where ij: "i \<in> I" "j \<in> I" and [simp]: "x = f i" "y = f j" by auto
from connex ij have "i \<preceq> j \<or> j \<preceq> i" by (auto elim: connexE)
with ij mono show "x \<sqsubseteq> y \<or> y \<sqsubseteq> x" by (elim disjE, auto dest: monotone_onD)
qed
lemma monotone_directed_image:
assumes mono: "monotone_on I (\<preceq>) (\<sqsubseteq>) f"
assumes dir: "directed I (\<preceq>)" shows "directed (f ` I) (\<sqsubseteq>)"
proof (rule directedI, safe)
fix x y assume x: "x \<in> I" and y: "y \<in> I"
with dir obtain z where z: "z \<in> I" and "x \<preceq> z" and "y \<preceq> z" by (auto elim: directedE)
with mono x y have "f x \<sqsubseteq> f z" and "f y \<sqsubseteq> f z" by (auto dest: monotone_onD)
with z show "\<exists>fz \<in> f ` I. f x \<sqsubseteq> fz \<and> f y \<sqsubseteq> fz" by auto
qed
end
subsection \<open>Order Pairs\<close>
locale compatible = related_set + related_set A "(\<sqsubset>)" for less (infix "\<sqsubset>" 50) +
assumes compat_right[trans]: "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubset> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubset> z"
assumes compat_left[trans]: "x \<sqsubset> y \<Longrightarrow> y \<sqsubseteq> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubset> z"
locale compatible_ordering = reflexive + irreflexive + compatible +
assumes strict_implies_weak: "x \<sqsubset> y \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<sqsubseteq> y"
begin
text \<open>The strict part is necessarily transitive.\<close>
text \<open>The following sequence of declarations are in order to obtain fact names in a manner
similar to the Isabelle/HOL facts of orders.\<close>
sublocale strict: transitive A "(\<sqsubset>)"
using compat_right[OF strict_implies_weak] by unfold_locales
sublocale strict_ordered_set A "(\<sqsubset>)" ..
thm strict.trans asym irrefl
lemma strict_implies_not_weak: "x \<sqsubset> y \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> \<not> y \<sqsubseteq> x"
using irrefl compat_left by blast
end
context transitive begin
interpretation less_eq_notations.
lemma asym_trans[trans]:
shows "x \<sqsubset> y \<Longrightarrow> y \<sqsubseteq> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubset> z"
and "x \<sqsubseteq> y \<Longrightarrow> y \<sqsubset> z \<Longrightarrow> x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> z \<in> A \<Longrightarrow> x \<sqsubset> z"
by (auto 0 3 dest: trans)
end
locale attractive_ordering = compatible_ordering + attractive
locale pseudo_ordering = compatible_ordering + pseudo_ordered_set
begin
sublocale attractive_ordering ..
end
locale quasi_ordering = compatible_ordering + quasi_ordered_set
begin
sublocale attractive_ordering ..
end
locale partial_ordering = compatible_ordering + partially_ordered_set
begin
sublocale pseudo_ordering + quasi_ordering ..
end
locale well_founded_ordering = quasi_ordering + well_founded
locale total_ordering = compatible_ordering + total_ordered_set
begin
sublocale partial_ordering ..
end
locale strict_total_ordering = partial_ordering + semiconnex A "(\<sqsubset>)"
begin
sublocale semiconnex_irreflexive ..
sublocale connex
proof
fix x y assume x: "x \<in> A" and y: "y \<in> A"
then show "x \<sqsubseteq> y \<or> y \<sqsubseteq> x"
apply (cases rule: cases[OF x y])
by (auto dest: strict_implies_weak)
qed
sublocale total_ordering ..
(*
sublocale old: ordering "(\<sqsubseteq>)" "(\<sqsubset>)"
proof-
have "a \<sqsubseteq> b \<Longrightarrow> a \<noteq> b \<Longrightarrow> a \<sqsubset> b" for a b
by (cases a b rule: cases, auto dest: strict_implies_weak)
then show "ordering (\<sqsubseteq>) (\<sqsubset>)"
by (unfold_locales, auto dest:strict_implies_weak trans)
qed
*)
lemma not_weak[simp]:
assumes "x \<in> A" and "y \<in> A" shows "\<not> x \<sqsubseteq> y \<longleftrightarrow> y \<sqsubset> x"
using assms by (cases rule:cases, auto simp: strict_implies_not_weak dest: strict_implies_weak)
lemma not_strict[simp]: "x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> \<not> x \<sqsubset> y \<longleftrightarrow> y \<sqsubseteq> x"
using not_weak by blast
end
subsection \<open>Relating to Classes\<close>
text \<open>In Isabelle 2020, we should declare sublocales in class before declaring dual
sublocales, since otherwise facts would be prefixed by ``dual.dual.''\<close>
context ord begin
abbreviation least where "least X \<equiv> extreme X (\<lambda>x y. y \<le> x)"
abbreviation greatest where "greatest X \<equiv> extreme X (\<le>)"
abbreviation supremum where "supremum X \<equiv> least (Collect (bound X (\<le>)))"
abbreviation infimum where "infimum X \<equiv> greatest (Collect (bound X (\<lambda>x y. y \<le> x)))"
lemma Least_eq_The_least: "Least P = The (least {x. P x})"
by (auto simp: Least_def extreme_def[unfolded atomize_eq, THEN ext])
lemma Greatest_eq_The_greatest: "Greatest P = The (greatest {x. P x})"
by (auto simp: Greatest_def extreme_def[unfolded atomize_eq, THEN ext])
end
lemma Ball_UNIV[simp]: "Ball UNIV = All" by auto
lemma Bex_UNIV[simp]: "Bex UNIV = Ex" by auto
class compat = ord + assumes "compatible_ordering UNIV (\<le>) (<)"
begin
sublocale order: compatible_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
using compat_axioms unfolding class.compat_def by (auto 0 4 simp:atomize_eq)
end
text \<open>We should have imported locale-based facts in classes, e.g.:\<close>
thm order.trans order.strict.trans order.refl order.irrefl order.asym order.extreme_bound_singleton
class attractive_order = ord + assumes "attractive_ordering UNIV (\<le>) (<)"
begin
text \<open>We need to declare subclasses before sublocales in order to preserve facts for superclasses.\<close>
interpretation attractive_ordering UNIV
using attractive_order_axioms unfolding class.attractive_order_def.
subclass compat ..
sublocale order: attractive_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
thm order.extreme_bound_quasi_const
class psorder = ord + assumes "pseudo_ordering UNIV (\<le>) (<)"
begin
interpretation pseudo_ordering UNIV using psorder_axioms unfolding class.psorder_def.
subclass attractive_order ..
sublocale order: pseudo_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
class qorder = ord + assumes "quasi_ordering UNIV (\<le>) (<)"
begin
interpretation quasi_ordering UNIV using qorder_axioms unfolding class.qorder_def.
subclass attractive_order ..
sublocale order: quasi_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
class porder = ord + assumes "partial_ordering UNIV (\<le>) (<)"
begin
interpretation partial_ordering UNIV
using porder_axioms unfolding class.porder_def.
subclass psorder ..
subclass qorder ..
sublocale order: partial_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
class wf_qorder = ord + assumes "well_founded_ordering UNIV (\<le>) (<)"
begin
interpretation well_founded_ordering UNIV
using wf_qorder_axioms unfolding class.wf_qorder_def.
subclass qorder ..
sublocale order: well_founded_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
class totalorder = ord + assumes "total_ordering UNIV (\<le>) (<)"
begin
interpretation total_ordering UNIV
using totalorder_axioms unfolding class.totalorder_def.
subclass porder ..
sublocale order: total_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
text \<open>Isabelle/HOL's @{class preorder} belongs to @{class qorder}, but not vice versa.\<close>
subclass (in preorder) qorder
apply unfold_locales
using order_refl apply assumption
apply simp
using le_less_trans apply assumption
using less_le_trans apply assumption
using less_imp_le apply assumption
using order_trans apply assumption
done
subclass (in order) porder by (unfold_locales, auto)
subclass (in wellorder) wf_qorder
apply (unfold_locales)
using less_induct by auto
text \<open>Isabelle/HOL's @{class linorder} is equivalent to our locale @{locale strict_total_ordering}.\<close>
context linorder begin
interpretation strict_total_ordering UNIV
apply unfold_locales by auto
subclass totalorder ..
sublocale order: strict_total_ordering UNIV
rewrites "\<And>x. x \<in> UNIV \<equiv> True"
and "\<And>X. X \<subseteq> UNIV \<equiv> True"
and "\<And>r. r \<restriction> UNIV \<equiv> r"
and "\<And>P. True \<and> P \<equiv> P"
and "Ball UNIV \<equiv> All"
and "Bex UNIV \<equiv> Ex"
and "sympartp (\<le>)\<^sup>- \<equiv> sympartp (\<le>)"
and "\<And>P1. (True \<Longrightarrow> PROP P1) \<equiv> PROP P1"
and "\<And>P1. (True \<Longrightarrow> P1) \<equiv> Trueprop P1"
and "\<And>P1 P2. (True \<Longrightarrow> PROP P1 \<Longrightarrow> PROP P2) \<equiv> (PROP P1 \<Longrightarrow> PROP P2)"
apply unfold_locales by (auto simp:atomize_eq)
end
text \<open>Tests: facts should be available in the most general classes.\<close>
thm order.strict.trans[where 'a="'a::compat"]
thm order.extreme_bound_quasi_const[where 'a="'a::attractive_order"]
thm order.extreme_bound_singleton_eq[where 'a="'a::psorder"]
thm order.trans[where 'a="'a::qorder"]
thm order.comparable_cases[where 'a="'a::totalorder"]
thm order.cases[where 'a="'a::linorder"]
subsection \<open>Declaring Duals\<close>
sublocale reflexive \<subseteq> sym: reflexive A "sympartp (\<sqsubseteq>)"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- \<equiv> sympartp (\<sqsubseteq>)"
and "\<And>r. sympartp (sympartp r) \<equiv> sympartp r"
and "\<And>r. sympartp r \<restriction> A \<equiv> sympartp (r \<restriction> A)"
by (auto 0 4 simp:atomize_eq)
sublocale quasi_ordered_set \<subseteq> sym: quasi_ordered_set A "sympartp (\<sqsubseteq>)"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
and "sympartp (sympartp (\<sqsubseteq>)) = sympartp (\<sqsubseteq>)"
apply unfold_locales by (auto 0 4 dest: trans)
text \<open>At this point, we declare dual as sublocales.
In the following, ``rewrites'' eventually cleans up redundant facts.\<close>
sublocale reflexive \<subseteq> dual: reflexive A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- \<equiv> sympartp (\<sqsubseteq>)"
and "\<And>r. sympartp (r \<restriction> A) \<equiv> sympartp r \<restriction> A"
and "(\<sqsubseteq>)\<^sup>- \<restriction> A \<equiv> ((\<sqsubseteq>) \<restriction> A)\<^sup>-"
by (auto simp: atomize_eq)
context attractive begin
interpretation less_eq_notations.
sublocale dual: attractive A "(\<sqsupseteq>)"
rewrites "sympartp (\<sqsupseteq>) = (\<sim>)"
and "equivpartp (\<sqsupseteq>) \<equiv> (\<simeq>)"
and "\<And>r. sympartp (r \<restriction> A) \<equiv> sympartp r \<restriction> A"
and "\<And>r. sympartp (sympartp r) \<equiv> sympartp r"
and "(\<sqsubseteq>)\<^sup>- \<restriction> A \<equiv> ((\<sqsubseteq>) \<restriction> A)\<^sup>-"
apply unfold_locales by (auto intro!: ext dest: attract dual.attract simp: atomize_eq)
end
context irreflexive begin
sublocale dual: irreflexive A "(\<sqsubset>)\<^sup>-"
rewrites "(\<sqsubset>)\<^sup>- \<restriction> A \<equiv> ((\<sqsubset>) \<restriction> A)\<^sup>-"
apply unfold_locales by (auto dest: irrefl simp: atomize_eq)
end
sublocale transitive \<subseteq> dual: transitive A "(\<sqsubseteq>)\<^sup>-"
rewrites "(\<sqsubseteq>)\<^sup>- \<restriction> A \<equiv> ((\<sqsubseteq>) \<restriction> A)\<^sup>-"
and "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
and "asympartp (\<sqsubseteq>)\<^sup>- = (asympartp (\<sqsubseteq>))\<^sup>-"
apply unfold_locales by (auto dest: trans simp: atomize_eq intro!:ext)
sublocale antisymmetric \<subseteq> dual: antisymmetric A "(\<sqsubseteq>)\<^sup>-"
rewrites "(\<sqsubseteq>)\<^sup>- \<restriction> A \<equiv> ((\<sqsubseteq>) \<restriction> A)\<^sup>-"
and "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by (auto dest: antisym simp: atomize_eq)
sublocale semiconnex \<subseteq> dual: semiconnex A "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubset>)\<^sup>- = sympartp (\<sqsubset>)"
using semiconnex by auto
sublocale connex \<subseteq> dual: connex A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by (auto intro!: chainI dest:comparable)
sublocale semiconnex_irreflexive \<subseteq> dual: semiconnex_irreflexive A "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubset>)\<^sup>- = sympartp (\<sqsubset>)"
by unfold_locales auto
sublocale pseudo_ordered_set \<subseteq> dual: pseudo_ordered_set A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
sublocale quasi_ordered_set \<subseteq> dual: quasi_ordered_set A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales auto
sublocale partially_ordered_set \<subseteq> dual: partially_ordered_set A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
sublocale total_pseudo_ordered_set \<subseteq> dual: total_pseudo_ordered_set A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
sublocale total_quasi_ordered_set \<subseteq> dual: total_quasi_ordered_set A "(\<sqsubseteq>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales auto
sublocale compatible_ordering \<subseteq> dual: compatible_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
apply unfold_locales
by (auto dest: compat_left compat_right strict_implies_weak)
sublocale attractive_ordering \<subseteq> dual: attractive_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales auto
sublocale pseudo_ordering \<subseteq> dual: pseudo_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
sublocale quasi_ordering \<subseteq> dual: quasi_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales auto
sublocale partial_ordering \<subseteq> dual: partial_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
sublocale total_ordering \<subseteq> dual: total_ordering A "(\<sqsubseteq>)\<^sup>-" "(\<sqsubset>)\<^sup>-"
rewrites "sympartp (\<sqsubseteq>)\<^sup>- = sympartp (\<sqsubseteq>)"
by unfold_locales (auto 0 4)
lemma(in antisymmetric) monotone_extreme_imp_extreme_bound_iff:
fixes ir (infix "\<preceq>" 50)
assumes "f ` C \<subseteq> A" and "monotone_on C (\<preceq>) (\<sqsubseteq>) f" and i: "extreme C (\<preceq>) i"
shows "extreme_bound A (\<sqsubseteq>) (f ` C) x \<longleftrightarrow> f i = x"
using dual.extreme_unique monotone_extreme_extreme_boundI[OF assms] by auto
subsection \<open>Instantiations\<close>
text \<open>Finally, we instantiate our classes for sanity check.\<close>
instance nat :: linorder ..
text \<open>Pointwise ordering of functions are compatible only if the weak part is transitive.\<close>
instance "fun" :: (type,qorder) compat
proof (intro_classes, unfold_locales)
note [simp] = le_fun_def less_fun_def
fix f g h :: "'a \<Rightarrow> 'b"
{ assume fg: "f \<le> g" and gh: "g < h"
show "f < h"
proof (unfold less_fun_def, intro conjI le_funI notI)
from fg have "f x \<le> g x" for x by auto
also from gh have "g x \<le> h x" for x by auto
finally (order.trans) show "f x \<le> h x" for x.
assume hf: "h \<le> f"
then have "h x \<le> f x" for x by auto
also from fg have "f x \<le> g x" for x by auto
finally have "h \<le> g" by auto
with gh show False by auto
qed
}
{ assume fg: "f < g" and gh: "g \<le> h"
show "f < h"
proof (unfold less_fun_def, intro conjI le_funI notI)
from fg have "f x \<le> g x" for x by auto
also from gh have "g x \<le> h x" for x by auto
finally show "f x \<le> h x" for x.
from gh have "g x \<le> h x" for x by auto
also assume hf: "h \<le> f"
then have "h x \<le> f x" for x by auto
finally have "g \<le> f" by auto
with fg show False by auto
qed
}
show "f < g \<Longrightarrow> f \<le> g" by auto
show "\<not>f < f" by auto
show "f \<le> f" by auto
qed
instance "fun" :: (type,qorder) qorder
apply intro_classes
apply unfold_locales
by (auto simp: le_fun_def dest: order.trans)
instance "fun" :: (type,porder) porder
apply intro_classes
apply unfold_locales
proof (intro ext)
fix f g :: "'a \<Rightarrow> 'b" and x :: 'a
assume fg: "f \<le> g" and gf: "g \<le> f"
then have "f x \<le> g x" and "g x \<le> f x" by (auto elim: le_funE)
from order.antisym[OF this] show "f x = g x" by auto
qed
end
|
Solomon ( October 27 , 2011 ) son
|
module Language.Elab.Deriving.Eq
-- Public: Things here we actually use in deriving Eq need to be re-exported to
-- be available at the deriving use-site. A little odd but that's how it is.
-- Otherwise you get extremely vague "Can't reify as X" errors and chances are
-- you don't have an FC to track it down.
import public Language.Elab.Syntax
import public Language.Elab.Types
import public Util
import public Language.Elab.Deriving.Util
import public Language.Reflection
{-
interesting case, happens to work for Show already!
data TagField : Type where
TF : Eq a => Show a => (name : String) -> (value : a) -> TagField
-}
-- A regular instance might look like (Eq a, Eq b, Eq c) => ...
-- This pairing isn't actually neccesary, it's just notational convenience and
-- we don't really have a reason to emulate it.
-- Eq a => Eq b => ... is just as valid as (Eq a, Eq b) => ...
-- It also can result in clearer errors.
addEqAutoImps : List String -> TTImp -> TTImp
addEqAutoImps xs retty
= foldr (\arg,tt => `(Eq ~(iBindVar arg) => ~(tt))) retty xs
-- I want better names here, including better index names
-- e.g we have this now
{-
Language.Elab.Deriving.Eq.eqImplFoo6'Fun : Eq arg6281 => Eq arg6282
=> Eq arg6283 => Foo6' arg6281 arg6282 arg6283 arg6284
-> Foo6' arg6281 arg6282 arg6283 arg6284 -> Bool
-}
||| e.g. (==) : Foo a b -> Foo a b -> Bool
eqClaim : (opname : Name) -> TypeInfo -> Visibility -> Elab Decl
eqClaim op tyinfo vis = do
let conargs = pullExplicits tyinfo
varnames = map (show . name) conargs
params = map (extractNameStr . name) $ filter (not . isIndex) conargs
tysig = `(~(tyinfo.type) -> ~(tyinfo.type) -> Bool)
logMsg "eqClaim" 1 $ ("auto params: ") ++ show params
-- NB: I can't think of a reason not to Inline here
pure $ iClaim MW vis [Inline] (mkTy op (addEqAutoImps params tysig))
eqCon : (opname : Name) -> Constructor -> Elab Clause
eqCon op con = do
let vars = filter (isExplicitPi . piInfo) con.args
pats = makePatNames vars infVars
lhs = iVar op `iApp` (makePat con.name (map (map fst) pats))
`iApp` (makePat con.name (map (map snd) pats))
rhs = makeRhs (catMaybes pats)
logTerm "eqconlhs" 1 "" lhs
logTerm "eqconrhs" 1 "" rhs
pure $ patClause lhs rhs
where
-- Make our pat names, we use Just to flag the vars we want to use, we don't
-- compare indices since they're vacuously the same for an Eq interface.
makePatNames : List ArgInfo
-> Stream String -> (List (Maybe (Name,Name)))
makePatNames [] vs = []
makePatNames (a :: as) (v :: vs)
=
let xs = makePatNames as vs
basicname = UN v
in if isUse0 a.count || a.isIndex
then Nothing :: xs
else Just ((mapName (++ "_1") basicname), (mapName (++ "_2") basicname)) :: xs
-- The lhs of our function, fields we don't want to use are replaced with _
makePat : (con : Name) -> (vars : List (Maybe Name)) -> TTImp
makePat con vars = foldl
(\tt,v => `(~(tt) ~(maybe implicit' bindNameVar v))) (iVar con) vars
-- A little wordy here, it's set up this way instead of a fold to avoid an
-- extra True when building up our && chain. There's no reason to make the
-- user repeat work every time they use their derived implementation.
makeRhs : List (Name,Name) -> TTImp
makeRhs [] = `(True)
makeRhs [(x,y)] = `( ~(iVar x) == ~(iVar y) )
makeRhs ((x,y) :: xs) = `( ~(iVar x) == ~(iVar y) && ~(makeRhs xs) )
||| The record that idris would make for you when you write an implementation.
eqObject : (decname : Name) -> (funname : Name) -> TypeInfo
-> Visibility -> Elab (Decl, Decl)
eqObject decname eqfun tyinfo vis = do
(qname,_) <- lookupName `{{Eq}}
[NS _ (DN _ eqcon)] <- getCons qname
| _ => fail "eqObject: error during Eq constructor lookup"
let conargs = pullExplicits tyinfo
varnames = map (show . name) conargs
varnames' = map (show . name) (filter (not . isIndex) conargs)
retty = `( Eq ~(appTyCon (map (show . name) conargs) tyinfo.name))
tysig = addEqAutoImps varnames' retty
claim = iClaim MW vis [Hint True] (mkTy decname tysig)
neqfun = `(\x,y => not (x == y))
rhs = `( ~(iVar eqcon) ~(iVar eqfun) ~(neqfun))
body = iDef decname [(patClause (iVar decname) rhs)]
pure $ (claim,body)
export
||| Usage: %runElab deriveEq Export `{{Foo}}
||| Currently, this adds eqImplFoo and eqImpltFooFun to the module namespace.
||| This is likely to change in the future to only have eqImplFoo being added
||| to the namespace.
deriveEq : Visibility -> Name -> Elab ()
deriveEq vis eqname = do
(qname,_) <- lookupName eqname -- get the qualified name of our type
-- create human readable names for our instance components
let decn = mapName (\d => "eqImpl" ++ d) eqname
funn = mapName (\d => "eqImpl" ++ d ++ "Fun") eqname
-- Build general info about the type we're deriving (e.g. Foo) that we want
-- to keep around.
tyinfo <- makeTypeInfo qname
-- The components of our eq-ing function
funclaim <- eqClaim funn tyinfo Private -- NB private
funclauses <- traverse (eqCon funn) tyinfo.cons
-- Our function's complete definition
let catchall = patClause `(~(iVar funn) ~implicit' ~implicit') `(False)
fundecl = iDef funn (funclauses ++ [catchall])
-- The actual eqImplFoo : Eq Foo record.
(objclaim,objclause) <- eqObject decn funn tyinfo vis
-- Declare our things into the namespace
-- Both claims first, otherwise we won't find our own Eq in fundecl
declare [funclaim, objclaim]
declare [fundecl, objclause]
|
(* colimits are computed pointwise
*)
(**
It is the product of modules
Then it induces a morphism
*)
(* TODO : Faire les mΓͺmes choses pour les limites de Modules *)
Require Import UniMath.Foundations.Propositions.
Require Import UniMath.Foundations.Sets.
Require Import UniMath.MoreFoundations.Tactics.
Require Import UniMath.CategoryTheory.Core.Prelude.
Require Import UniMath.CategoryTheory.FunctorCategory.
Require Import UniMath.CategoryTheory.whiskering.
Require Import UniMath.CategoryTheory.limits.graphs.limits.
Require Import UniMath.CategoryTheory.limits.graphs.colimits.
Require Import UniMath.CategoryTheory.Monads.Monads.
Require Import UniMath.CategoryTheory.Monads.LModules.
Require Import Modules.Signatures.Signature.
Require Import Modules.Prelims.LModulesColims.
Require Import Modules.Prelims.CoproductsComplements.
Local Open Scope cat.
(*
Lemma compfNat
{C : precategory} {g : graph} {d1 d2 d3 : diagram g C}
{f : β u : vertex g, C β¦ dob d1 u, dob d2 u β§}
(fNat : β (u v : vertex g) (e : edge u v), dmor d1 e Β· f v = f u Β· dmor d2 e)
{f2 : β u : vertex g, C β¦ dob d2 u, dob d3 u β§}
(fNat2 : β (u v : vertex g) (e : edge u v), dmor d2 e Β· f2 v = f2 u Β· dmor d3 e)
(f3 := fun u => f u Β· f2 u)
:
β (u v : vertex g) (e : edge u v), dmor d1 e Β· f3 v = f3 u Β· dmor d3 e .
intros u v e.
etrans;[apply assoc|].
etrans;[apply cancel_postcomposition; apply fNat|].
etrans;[|apply assoc].
etrans;[|apply cancel_precomposition; apply fNat2].
apply pathsinv0.
apply assoc.
Qed.
*)
(*
Lemma compColimOfArrows
(C : precategory) (g : graph) (d1 d2 d3 : diagram g C) (CC1 : ColimCocone d1)
(CC2 : ColimCocone d2)(CC3 : ColimCocone d3)
(f : β u : vertex g, C β¦ dob d1 u, dob d2 u β§)
(fNat : β (u v : vertex g) (e : edge u v), dmor d1 e Β· f v = f u Β· dmor d2 e)
(f2 : β u : vertex g, C β¦ dob d2 u, dob d3 u β§)
(fNat2 : β (u v : vertex g) (e : edge u v), dmor d2 e Β· f2 v = f2 u Β· dmor d3 e)
(x : C) (cc : cocone d2 x) :
colimOfArrows CC1 CC2 f fNat Β· colimOfArrows CC2 CC3 f2 fNat2 =
colimOfArrows CC1 CC3 (fun z => f z Β· f2 z) (compfNat fNat fNat2).
etrans;[apply precompWithColimOfArrows|].
cbn.
unfold colimOfArrows.
apply maponpaths.
Abort.
*)
(*
use map_on_two_paths.
maponpaths
apply maponpaths2.
reflexivity.
.fNat Β· colimOfArrows CC2 CC3 f2 fNat2 .
colimOfArrows CC1 CC3 (fun
colimArrow CC1 x
(make_cocone (Ξ» u : vertex g, f u Β· coconeIn cc u)
(preCompWithColimOfArrows_subproof CC1 CC2 f fNat x cc)).
*)
(* forget ful functor from Modules to functors *)
(* TODO dΓ©placer Γ§a dans Signature.v *)
Section ColimsSig.
Context
{C : category}
{g : graph} (colims_g : Colims_of_shape g C)
(lims_g : Lims_of_shape g C).
Let hsC := (homset_property C).
(* (hsB : has_homsets B) *)
Let coMod R := (LModule_Colims_of_shape C (B := C) R _ colims_g).
Let limMod R := (LModule_Lims_of_shape C (B := C) R _ lims_g).
(* Local Notation limMod := (LModule_Lims_of_shape _ B _ hsC colims_g). *)
(* Local Notation coFunc := (ColimsFunctorCategory_of_shape _ B _ hsC colims_g). *)
(* Local Notation limFunc := (LimsFunctorCategory_of_shape _ B _ hsC lims_g). *)
(* Local Notation bpFunct := *)
(* (BinProducts_functor_precat B C bpC hsC (M : functor _ _) (N : functor _ _)). *)
(* Definition LModule_colim_functor : functor _ _ := *)
(* BinProductObject _ bpFunct. *)
Local Notation MOD R := (precategory_LModule R C).
Local Notation HAR := (signature_precategory (C:=C)).
Variable (d : diagram g HAR).
(* TODO generalize this kind of construction : composition of a diagram and a functor
(here the forget ful functor MOD --> [B , C])
*)
Let FORGET R := (forget_Sig (C:=C) R ).
Let d' R := ( mapdiagram (FORGET R) d : diagram g (MOD R) ).
(* The natural candidate *)
Let F R := (colim (coMod _ (d' R)) : LModule _ _).
Let F' R := (lim (limMod _ (d' R)) : LModule _ _).
(* Local Notation BP := (binproduct_functor bpC). *)
(* Is there a lemma that state the existence of a natural transformation
(A x B) o R --> A o R x B o R ? *)
(* TODO define it without nat_trans *)
Definition Sig_colim_on_mor (R S : Monad C) (f : Monad_Mor R S) :
LModule_Mor _ (F R) (pb_LModule f (F S)).
Proof.
eapply (compose (C:= MOD _)); [| apply pb_LModule_colim_iso].
use colimOfArrows.
- intro u.
exact ((#(dob d u : signature _))%ar f).
- abstract (intros u v e;
apply LModule_Mor_equiv;[apply homset_property|];
apply pathsinv0;
apply signature_Mor_ax).
Defined.
Definition Sig_lim_on_mor (R S : Monad C) (f : Monad_Mor R S) :
LModule_Mor _ (F' R) (pb_LModule f (F' S)).
Proof.
eapply (compose (C:= MOD _)); [| apply pb_LModule_lim_iso].
use limOfArrows.
- intro u.
exact ((#(dob d u : signature _))%ar f).
- abstract(intros u v e;
apply LModule_Mor_equiv;[apply homset_property|];
apply signature_Mor_ax).
Defined.
Definition Sig_colim_signature_data : signature_data := _ ,, Sig_colim_on_mor.
Definition Sig_lim_signature_data : signature_data := _ ,, Sig_lim_on_mor.
Lemma Sig_colim_is_signature : is_signature Sig_colim_signature_data.
Proof.
split.
- intros R c.
apply pathsinv0.
apply colim_endo_is_identity.
intro u.
cbn.
rewrite id_right.
set (cc := colims_g _).
etrans;[apply (colimArrowCommutes cc)|].
cbn.
etrans;[|apply id_left].
apply cancel_postcomposition.
apply signature_id.
- intros U V W m n.
apply LModule_Mor_equiv;[apply homset_property|].
apply nat_trans_eq;[apply homset_property|].
intro c.
cbn.
repeat rewrite id_right.
apply pathsinv0.
apply colimArrowUnique.
intro u.
cbn.
etrans.
{
etrans;[apply assoc|].
apply cancel_postcomposition.
set (cc := colims_g _).
apply (colimArrowCommutes cc).
}
cbn.
etrans.
{
cbn.
rewrite <- assoc.
apply cancel_precomposition.
set (cc := colims_g _).
apply (colimArrowCommutes cc).
}
cbn.
rewrite assoc.
apply cancel_postcomposition.
apply pathsinv0.
etrans.
{
assert (h := signature_comp (dob d u) m n).
eapply LModule_Mor_equiv in h; try apply homset_property.
eapply nat_trans_eq_pointwise in h.
apply h.
}
cbn.
now rewrite id_right.
Qed.
Lemma Sig_lim_is_signature : is_signature Sig_lim_signature_data.
Proof.
split.
- intros R c.
apply pathsinv0.
apply lim_endo_is_identity.
intro u.
cbn.
rewrite id_right.
set (cc := lims_g _).
etrans;[apply (limArrowCommutes cc)|].
cbn.
etrans;[|apply id_right].
apply cancel_precomposition.
apply signature_id.
- intros U V W m n.
apply LModule_Mor_equiv;[apply homset_property|].
apply nat_trans_eq;[apply homset_property|].
intro c.
cbn.
repeat rewrite id_right.
apply pathsinv0.
apply limArrowUnique.
intro u.
cbn.
etrans.
{
rewrite <- assoc.
(* etrans;[apply assoc|]. *)
apply cancel_precomposition.
set (cc := lims_g _).
apply (limArrowCommutes cc).
}
cbn.
etrans.
{
cbn.
rewrite assoc.
apply cancel_postcomposition.
set (cc := lims_g _).
apply (limArrowCommutes cc).
}
cbn.
rewrite <- assoc.
apply cancel_precomposition.
apply pathsinv0.
etrans.
{
assert (h := signature_comp (dob d u) m n).
eapply LModule_Mor_equiv in h; try apply homset_property.
eapply nat_trans_eq_pointwise in h.
apply h.
}
cbn.
now rewrite id_right.
Qed.
Definition Sig_colim : signature _ :=
_ ,, Sig_colim_is_signature.
Definition Sig_lim : signature _ :=
_ ,, Sig_lim_is_signature.
Lemma Sig_coconeIn_laws v :
is_signature_Mor
(dob d v : signature _) Sig_colim
(fun R => (coconeIn (colimCocone (coMod R (d' R))) v )).
Proof.
intros X Y f.
(* not necessary but more convenienet *)
apply nat_trans_eq;[apply homset_property|].
intro x.
cbn.
rewrite id_right.
set (cc1 := colims_g _).
set (cc2 := colims_g _).
apply pathsinv0.
cbn.
apply (colimArrowCommutes cc2).
Qed.
Lemma Sig_coneOut_laws v :
is_signature_Mor
Sig_lim (dob d v : signature _)
(fun R => (coneOut (limCone (limMod R (d' R))) v )).
Proof.
intros X Y f.
(* not necessary but more convenienet *)
apply nat_trans_eq;[apply homset_property|].
intro x.
cbn.
rewrite id_right.
set (cc1 := lims_g _).
set (cc2 := lims_g _).
cbn.
apply (limArrowCommutes cc1).
Qed.
Definition Sig_coconeIn v : signature_precategory β¦ dob d v, Sig_colim β§ :=
_ ,, Sig_coconeIn_laws v.
Definition Sig_coneOut v : signature_precategory β¦ Sig_lim, dob d v β§ :=
_ ,, Sig_coneOut_laws v.
Lemma Sig_coconeIn_commutes (u v : vertex g) (e : edge u v) :
dmor d e Β· Sig_coconeIn v = Sig_coconeIn u.
Proof.
apply signature_Mor_eq.
intro R.
apply (coconeInCommutes (colimCocone (coMod R (d' R)))).
Defined.
Lemma Sig_coneOut_commutes (u v : vertex g) (e : edge u v) :
Sig_coneOut u Β· dmor d e = Sig_coneOut v.
Proof.
apply signature_Mor_eq.
intro R.
apply (coneOutCommutes (limCone (limMod _ (d' R)))).
Defined.
Definition Sig_colim_cocone : cocone d Sig_colim :=
make_cocone Sig_coconeIn Sig_coconeIn_commutes.
Definition Sig_lim_cone : cone d Sig_lim :=
make_cone Sig_coneOut Sig_coneOut_commutes.
Lemma Sig_colimArrow_laws {M : signature C} (cc : cocone d M) :
is_signature_Mor
( Sig_colim) (M)
(fun R => colimArrow (coMod R (d' R)) (M R) (mapcocone (FORGET R) d cc) ).
Proof.
intros R S f.
apply nat_trans_eq;[apply homset_property|].
intro c.
cbn.
repeat rewrite id_right.
apply pathsinv0.
etrans;[apply postcompWithColimArrow|].
apply pathsinv0.
apply colimArrowUnique.
intro u.
cbn.
etrans.
{
rewrite assoc.
apply cancel_postcomposition.
set (cc1 := colims_g _).
apply (colimArrowCommutes cc1).
}
cbn.
etrans.
{
rewrite <- assoc.
apply cancel_precomposition.
set (cc1 := colims_g _).
apply (colimArrowCommutes cc1).
}
cbn.
apply signature_Mor_ax_pw.
Qed.
Lemma Sig_limArrow_laws {M : signature C} (cc : cone d M) :
is_signature_Mor
M ( Sig_lim)
(fun R => limArrow (limMod R (d' R)) (M R) (mapcone (FORGET R) d cc) ).
Proof.
intros R S f.
apply nat_trans_eq;[apply homset_property|].
intro c.
cbn.
repeat rewrite id_right.
apply pathsinv0.
etrans;[apply postCompWithLimArrow|].
apply pathsinv0.
apply limArrowUnique.
intro u.
cbn.
etrans.
{
rewrite <- assoc.
apply cancel_precomposition.
set (cc1 := lims_g _).
apply (limArrowCommutes cc1).
}
cbn.
apply pathsinv0.
etrans.
{
rewrite assoc.
apply cancel_postcomposition.
set (cc1 := lims_g _).
apply (limArrowCommutes cc1).
}
cbn.
apply pathsinv0.
apply signature_Mor_ax_pw.
Qed.
Definition Sig_colimArrow {M : signature _} (cc : cocone d M) :
signature_Mor Sig_colim M := _ ,, Sig_colimArrow_laws cc.
Definition Sig_limArrow {M : signature C} (cc : cone d M) :
signature_Mor M Sig_lim := _ ,, Sig_limArrow_laws cc.
Lemma Sig_isColimCocone : isColimCocone _ _ Sig_colim_cocone.
intros M cc.
use unique_exists.
- exact (Sig_colimArrow cc).
- intro v.
apply signature_Mor_eq.
intro R.
apply (colimArrowCommutes (coMod _ (d' R))).
- intro y.
cbn -[isaprop].
apply impred_isaprop.
intro u.
use signature_category_has_homsets.
- intros y h.
apply signature_Mor_eq.
intro R.
apply (colimArrowUnique (coMod _ (d' R))).
intro u.
apply ( maponpaths (fun z => pr1 z R) (h u)).
Defined.
Lemma Sig_isLimCone : isLimCone _ _ Sig_lim_cone.
intros M cc.
use unique_exists.
- exact (Sig_limArrow cc).
- intro v.
apply signature_Mor_eq.
intro R.
apply (limArrowCommutes (limMod _ (d' R))).
- intro y.
cbn -[isaprop].
apply impred_isaprop.
intro u.
use signature_category_has_homsets.
- intros y h.
apply signature_Mor_eq.
intro R.
apply (limArrowUnique (limMod _ (d' R))).
intro u.
apply ( maponpaths (fun z => pr1 z R) (h u)).
Defined.
Definition Sig_ColimCocone : ColimCocone d :=
make_ColimCocone _ _ _ Sig_isColimCocone.
Definition Sig_LimCone : LimCone d :=
make_LimCone _ _ _ Sig_isLimCone.
End ColimsSig.
Definition Sig_Colims_of_shape {C : category}
(g : graph)
(colims_g : Colims_of_shape g C)
: Colims_of_shape g (signature_category) :=
Sig_ColimCocone (C:= C) (g := g) colims_g.
Definition Sig_Lims_of_shape {C : category}
(g : graph)
(lims_g : Lims_of_shape g C)
: Lims_of_shape g (signature_precategory) :=
Sig_LimCone (C:= C) (g := g) lims_g.
|
subsection \<open>Parametricity of the Heap Monad\<close>
theory DP_CRelVH
imports State_Heap
begin
locale dp_heap =
state_dp_consistency: dp_consistency lookup_st update_st P dp + heap_mem_defs Q lookup update
for P Q :: "heap \<Rightarrow> bool" and dp :: "'k \<Rightarrow> 'v" and lookup :: "'k \<Rightarrow> 'v option Heap"
and lookup_st update update_st +
assumes
rel_state_lookup: "rel_fun (=) (rel_state (=)) lookup_st lookup"
and
rel_state_update: "rel_fun (=) (rel_fun (=) (rel_state (=))) update_st update"
begin
context
includes lifting_syntax heap_monad_syntax
begin
definition "crel_vs R v f \<equiv>
\<forall>heap. P heap \<and> Q heap \<and> state_dp_consistency.cmem heap \<longrightarrow>
(case execute f heap of
None \<Rightarrow> False |
Some (v', heap') \<Rightarrow> P heap' \<and> Q heap' \<and> R v v' \<and> state_dp_consistency.cmem heap'
)
"
abbreviation rel_fun_lifted :: "('a \<Rightarrow> 'c \<Rightarrow> bool) \<Rightarrow> ('b \<Rightarrow> 'd \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('c ==H\<Longrightarrow> 'd) \<Rightarrow> bool" (infixr "===>\<^sub>T" 55) where
"rel_fun_lifted R R' \<equiv> R ===> crel_vs R'"
definition consistentDP :: "('k \<Rightarrow> 'v Heap) \<Rightarrow> bool" where
"consistentDP \<equiv> ((=) ===> crel_vs (=)) dp"
lemma consistentDP_intro:
assumes "\<And>param. Transfer.Rel (crel_vs (=)) (dp param) (dp\<^sub>T param)"
shows "consistentDP dp\<^sub>T"
using assms unfolding consistentDP_def Rel_def by blast
lemma crel_vs_execute_None:
False if "crel_vs R a b" "execute b heap = None" "P heap" "Q heap" "state_dp_consistency.cmem heap"
using that unfolding crel_vs_def by auto
lemma crel_vs_execute_Some:
assumes "crel_vs R a b" "P heap" "Q heap" "state_dp_consistency.cmem heap"
obtains x heap' where "execute b heap = Some (x, heap')" "P heap'" "Q heap'"
using assms unfolding crel_vs_def by (cases "execute b heap") auto
lemma crel_vs_executeD:
assumes "crel_vs R a b" "P heap" "Q heap" "state_dp_consistency.cmem heap"
obtains x heap' where
"execute b heap = Some (x, heap')" "P heap'" "Q heap'" "state_dp_consistency.cmem heap'" "R a x"
using assms unfolding crel_vs_def by (cases "execute b heap") auto
lemma crel_vs_success:
assumes "crel_vs R a b" "P heap" "Q heap" "state_dp_consistency.cmem heap"
shows "success b heap"
using assms unfolding success_def by (auto elim: crel_vs_executeD)
lemma crel_vsI: "crel_vs R a b" if "(state_dp_consistency.crel_vs R OO rel_state (=)) a b"
using that by (auto 4 3 elim: state_dp_consistency.crel_vs_elim rel_state_elim simp: crel_vs_def)
lemma transfer'_return[transfer_rule]:
"(R ===> crel_vs R) Wrap return"
proof -
have "(R ===> (state_dp_consistency.crel_vs R OO rel_state (=))) Wrap return"
by (rule rel_fun_comp1 state_dp_consistency.return_transfer transfer_return)+ auto
then show ?thesis
by (blast intro: rel_fun_mono crel_vsI)
qed
lemma crel_vs_return:
"Transfer.Rel (crel_vs R) (Wrap x) (return y)" if "Transfer.Rel R x y"
using that unfolding Rel_def by (rule transfer'_return[unfolded rel_fun_def, rule_format])
lemma crel_vs_return_ext:
"\<lbrakk>Transfer.Rel R x y\<rbrakk> \<Longrightarrow> Transfer.Rel (crel_vs R) x (Heap_Monad.return y)"
by (fact crel_vs_return[unfolded Wrap_def])
term 0 (**)
lemma bind_transfer[transfer_rule]:
"(crel_vs R0 ===> (R0 ===> crel_vs R1) ===> crel_vs R1) (\<lambda>v f. f v) (\<bind>)"
unfolding rel_fun_def bind_def
by safe (subst crel_vs_def, auto 4 4 elim: crel_vs_execute_Some elim!: crel_vs_executeD)
lemma crel_vs_update:
"crel_vs (=) () (update param (dp param))"
by (rule
crel_vsI relcomppI state_dp_consistency.crel_vs_update
rel_state_update[unfolded rel_fun_def, rule_format] HOL.refl
)+
lemma crel_vs_lookup:
"crel_vs
(\<lambda> v v'. case v' of None \<Rightarrow> True | Some v' \<Rightarrow> v = v' \<and> v = dp param) (dp param) (lookup param)"
by (rule
crel_vsI relcomppI state_dp_consistency.crel_vs_lookup
rel_state_lookup[unfolded rel_fun_def, rule_format] HOL.refl
)+
lemma crel_vs_eq_eq_onp:
"crel_vs (eq_onp (\<lambda> x. x = v)) v s" if "crel_vs (=) v s"
using that unfolding crel_vs_def by (auto split: option.split simp: eq_onp_def)
lemma crel_vs_checkmem:
"Transfer.Rel (crel_vs R) (dp param) (checkmem param s)" if "is_equality R" "Transfer.Rel (crel_vs R) (dp param) s"
unfolding checkmem_def Rel_def that(1)[unfolded is_equality_def]
by (rule bind_transfer[unfolded rel_fun_def, rule_format, OF crel_vs_lookup])
(auto 4 3 split: option.split_asm intro: crel_vs_bind_eq crel_vs_update crel_vs_return[unfolded Wrap_def Rel_def] that(2)[unfolded Rel_def that(1)[unfolded is_equality_def]])
lemma crel_vs_checkmem_tupled:
assumes "v = dp param"
shows "\<lbrakk>is_equality R; Transfer.Rel (crel_vs R) v s\<rbrakk>
\<Longrightarrow> Transfer.Rel (crel_vs R) v (checkmem param s)"
unfolding assms by (fact crel_vs_checkmem)
lemma transfer_fun_app_lifted[transfer_rule]:
"(crel_vs (R0 ===> crel_vs R1) ===> crel_vs R0 ===> crel_vs R1)
App Heap_Monad_Ext.fun_app_lifted"
unfolding Heap_Monad_Ext.fun_app_lifted_def App_def by transfer_prover
lemma crel_vs_fun_app:
"\<lbrakk>Transfer.Rel (crel_vs R0) x x\<^sub>T; Transfer.Rel (crel_vs (R0 ===>\<^sub>T R1)) f f\<^sub>T\<rbrakk> \<Longrightarrow> Transfer.Rel (crel_vs R1) (App f x) (f\<^sub>T . x\<^sub>T)"
unfolding Rel_def using transfer_fun_app_lifted[THEN rel_funD, THEN rel_funD] .
end (* Lifting Syntax *)
end (* Dynamic Programming Problem *)
locale dp_consistency_heap = heap_correct +
fixes dp :: "'a \<Rightarrow> 'b"
begin
interpretation state_mem_correct: mem_correct lookup' update' P
by (rule mem_correct_heap)
interpretation state_dp_consistency: dp_consistency lookup' update' P dp ..
lemma dp_heap: "dp_heap P P lookup lookup' update update'"
by (standard; rule transfer_lookup transfer_update)
sublocale dp_heap P P dp lookup lookup' update update'
by (rule dp_heap)
notation rel_fun_lifted (infixr "===>\<^sub>T" 55)
end
locale heap_correct_empty = heap_correct +
fixes empty
assumes empty_correct: "map_of_heap empty \<subseteq>\<^sub>m Map.empty" and P_empty: "P empty"
locale dp_consistency_heap_empty =
dp_consistency_heap + heap_correct_empty
begin
lemma cmem_empty:
"state_dp_consistency.cmem empty"
using empty_correct
unfolding state_dp_consistency.cmem_def
unfolding map_of_heap_def
unfolding state_dp_consistency.map_of_def
unfolding lookup'_def
unfolding map_le_def
by auto
corollary memoization_correct:
"dp x = v" "state_dp_consistency.cmem m" if
"consistentDP dp\<^sub>T" "Heap_Monad.execute (dp\<^sub>T x) empty = Some (v, m)"
using that unfolding consistentDP_def
by (auto dest!: rel_funD[where x = x] elim!: crel_vs_executeD intro: P_empty cmem_empty)
lemma memoized_success:
"success (dp\<^sub>T x) empty" if "consistentDP dp\<^sub>T"
using that cmem_empty P_empty
by (auto dest!: rel_funD intro: crel_vs_success simp: consistentDP_def)
lemma memoized:
"dp x = fst (the (Heap_Monad.execute (dp\<^sub>T x) empty))" if "consistentDP dp\<^sub>T"
using surjective_pairing memoization_correct(1)[OF that]
memoized_success[OF that, unfolded success_def]
by (cases "execute (dp\<^sub>T x) empty"; auto)
lemma cmem_result:
"state_dp_consistency.cmem (snd (the (Heap_Monad.execute (dp\<^sub>T x) empty)))" if "consistentDP dp\<^sub>T"
using surjective_pairing memoization_correct(2)[OF that(1)]
memoized_success[OF that, unfolded success_def]
by (cases "execute (dp\<^sub>T x) empty"; auto)
end
end (* Theory *)
|
/- Tactic : refl
## Summary
`refl` is a tactic which proves goals of the form `X = X`.
## Details
The `refl` tactic will close any goal of the form `A = B`
where `A` and `B` are *exactly the same thing*.
### Example:
If it looks like this in the top right hand box:
```
A B : set X
β’ A βͺ B = A βͺ B
```
then
`refl,`
will close the goal and solve the level. Don't forget the comma.
-/
/-
We will start by practising with the simplest tactic, namely *refl*. This just proves goals
of the form $A = A$, no matter how complicated $A$ is. Let's see it in action!
-/
/- Hint : Click here for a hint, in case you get stuck.
Just delete `sorry` and type `refl,` (don't forget the comma!).
-/
variables {X : Type} -- hide
/- Lemma : no-side-bar
If A and B are sets, then A βͺ B = A βͺ B.
-/
lemma union_is_union (A B : set X) : A βͺ B = A βͺ B :=
begin
refl,
end
|
import ProofWidgets.Data.Html
namespace ProofWidgets
open Lean Server
/-- An `Expr` presenter is similar to a delaborator but outputs HTML trees instead of syntax, and
the output HTML can contain elements which interact with the original `Expr` in some way. We call
interactive outputs with a reference to the original input *presentations*. -/
structure ExprPresenter where
/-- A user-friendly name for this presenter. For example, "LaTeX". -/
userName : String
/- TODO: there is a general problem of writing env extensions which store an extendable list of
functions to run on `Expr`s, but not all of which are applicable to any single `Expr` (actually,
most are not). Invoking them in sequence is O(n); we should better use sth like `DiscrTree`.
Registering new entries would need to extend the DiscrTree, perhaps like
`registerSelf : DiscrTree ExprPresenter β DiscrTree ExprPresenter`.
Dispatching on just one constant like e.g. delaborators (`app.MyType.myCtr`) does not appear
sufficient because one entry may apply to multiple expressions of a given form which could be
represented as a schematic with mvars, say `@ofNat ? 0 ?`.
TODO: actually, for most use cases name-based dispatch might be sufficient, and it's simple. -/
/-- Should quickly determine if the `Expr` is within this presenter's domain of applicability.
For example it could check for a constant like the `` `name `` in ``@[delab `name]``. -/
isApplicable : Expr β MetaM Bool
/-- Whether the output should use inline (think something which fits in the space normally
occupied by an `Expr`, e.g. LaTeX) or block (think large diagram which needs dedicated space)
HTML layout. -/
layoutKind : LayoutKind := .block
/-- *Must* return `some _` or throw when `isApplicable` is `true`. -/
present : Expr β MetaM (Option Html)
initialize exprPresenters : TagAttribute β
registerTagAttribute `expr_presenter
"Register an Expr presenter. It must have the type `ProofWidgets.ExprPresenter`."
(validate := fun nm => do
let const β getConstInfo nm
if !const.type.isConstOf ``ExprPresenter then
throwError m!"type mismatch, expected {mkConst ``ExprPresenter} but got {const.type}"
return ())
private unsafe def evalExprPresenterUnsafe (env : Environment) (opts : Options)
(constName : Name) : Except String ExprPresenter :=
env.evalConstCheck ExprPresenter opts ``ExprPresenter constName
@[implemented_by evalExprPresenterUnsafe]
opaque evalExprPresenter (env : Environment) (opts : Options) (constName : Name) :
Except String ExprPresenter
structure ApplicableExprPresentersParams where
expr : WithRpcRef ExprWithCtx
#mkrpcenc ApplicableExprPresentersParams
structure ExprPresenterId where
name : Name
userName : String
deriving FromJson, ToJson
structure ApplicableExprPresenters where
presenters : Array ExprPresenterId
deriving FromJson, ToJson
@[server_rpc_method]
def applicableExprPresenters : ApplicableExprPresentersParams β
RequestM (RequestTask ApplicableExprPresenters)
| β¨β¨exprβ©β© => RequestM.asTask do
let mut presenters : Array ExprPresenterId := #[]
let ci := expr.ci
for nm in exprPresenters.ext.getState expr.ci.env do
match evalExprPresenter ci.env ci.options nm with
| .ok p =>
if β expr.runMetaM p.isApplicable then
presenters := presenters.push β¨nm, p.userNameβ©
| .error e =>
throw <| RequestError.internalError s!"Failed to evaluate Expr presenter '{nm}': {e}"
return { presenters }
structure GetExprPresentationParams where
expr : WithRpcRef ExprWithCtx
/-- Name of the presenter to use. -/
name : Name
#mkrpcenc GetExprPresentationParams
@[server_rpc_method]
def getExprPresentation : GetExprPresentationParams β
RequestM (RequestTask Html)
| { expr := β¨exprβ©, name } => RequestM.asTask do
let ci := expr.ci
if !exprPresenters.hasTag ci.env name then
throw <| RequestError.invalidParams s!"The constant '{name}' is not an Expr presenter."
match evalExprPresenter ci.env ci.options name with
| .ok p =>
let some ret β expr.runMetaM p.present
| throw <| RequestError.internalError <|
s!"Got none from {name}.present e, expected some _ because {name}.isApplicable e " ++
s!"returned true, where e := {expr.expr}"
return ret
| .error e =>
throw <| RequestError.internalError s!"Failed to evaluate Expr presenter '{name}': {e}"
structure ExprPresentationProps where
expr : WithRpcRef ExprWithCtx
#mkrpcenc ExprPresentationProps
/-- This component shows a selection of all known and applicable `ProofWidgets.ExprPresenter`s which
are used to render the expression when selected. By default `ProofWidgets.InteractiveExpr` is shown. -/
@[widget_module]
def ExprPresentation : Component ExprPresentationProps where
javascript := include_str ".." / ".." / "build" / "js" / "exprPresentation.js"
end ProofWidgets
|
module Issue1760c where
-- Skipping an old-style mutual block: Before the `mutual` keyword.
{-# NO_POSITIVITY_CHECK #-}
mutual
data D : Set where
lam : (D β D) β D
record U : Set where
field ap : U β U
|
So, apparently the info I found about how tables start was wrong. At least one game didn't start despite having over the min number of players... Sorry.
I started a new RftG, and invited the people I think were interested. If I invited you by mistake, sorry, don't feel obligated to play, and if I missed you, double sorry. I hope this system is the next thing the admins decide to overhaul.
I'll try the other failed ones again soon.
New game! Nippon is open for 3 others to join here. I've never played it before, but looking forward to learning.
Note: We figured out recently that RftG will let you create a six-player game, but will not let the sixth player join if you don't have the final expansion loaded.
Thank you for the note, I booted you, and moved it down to 5 instead.
Yep. I didn't join the second tourney because I lost all but one of the matches that I played because of running out of time.
I feel like the guy I was playing against was TRYING to make me run out of time, but it may have just been in my head.
There are absolutely people who do this.
https://en.boardgamearena.com/#!table?table=36649836 - 1 person come play Hanabi with me! |
# Looker API 4.0 (Beta) Reference
#
# Welcome to the future! API 4.0 co-exists with APIs 3.1 and 3.0. (3.0 should no longer be used.) The \"beta\" tag means updates for API 4.0 may include breaking changes, but as always we will work to minimize them. ### Authorization The classic method of API authorization uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. API 4.0 adds additional ways to authenticate API requests, including OAuth and CORS requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization). ### API Explorer The API Explorer is a Looker-provided utility with many new and unique features for learning and using the Looker API and SDKs. It is a replacement for the 'api-docs' page currently provided on Looker instances. For details, see the [API Explorer documentation](https://looker.com/docs/r/api/explorer). ### Looker Language SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. SDKs for a variety of programming languages are also provided to streamline using the API. Looker has an OpenSource [sdk-codegen project](https://github.com/looker-open-source/sdk-codegen) that provides several language SDKs. Language SDKs generated by `sdk-codegen` have an Authentication manager that can automatically authenticate API requests when needed. For details on available Looker SDKs, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks). ### API Versioning Future releases of Looker expand the latest API version release-by-release to securely expose more and more of the core power of the Looker platform to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning (but we will try to avoid doing that). Stable (non-beta) API endpoints should not receive breaking changes in future releases. For details, see [Looker API Versioning](https://looker.com/docs/r/api/versioning). ### In This Release API 4.0 version was introduced so we can make adjustments to API functions, parameters, and response types to fix bugs and inconsistencies. These changes fall outside the bounds of non-breaking additive changes we can make to our stable API 3.1. One benefit of these type adjustments in API 4.0 is dramatically better support for strongly typed languages like TypeScript, Kotlin, Swift, Go, C#, and more. While API 3.1 is still the de-facto Looker API (\"current\", \"stable\", \"default\", etc), the bulk of our development activity has shifted to API 4.0, where all new features are added. The API Explorer can be used to [interactively compare](https://looker.com/docs/r/api/explorer#comparing_api_versions) the differences between API 3.1 and 4.0. ### API and SDK Support Policies Looker API versions and language SDKs have varying support levels. Please read the API and SDK [support policies](https://looker.com/docs/r/api/support-policy) for more information.
#
# OpenAPI spec version: 4.0.21.18
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' BoardSection Class
#'
#' @field can
#' @field created_at
#' @field deleted_at
#' @field description
#' @field board_id
#' @field board_items
#' @field id
#' @field item_order
#' @field visible_item_order
#' @field title
#' @field updated_at
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite parse_json toJSON
#' @export
BoardSection <- R6::R6Class(
'BoardSection',
public = list(
`can` = NULL,
`created_at` = NULL,
`deleted_at` = NULL,
`description` = NULL,
`board_id` = NULL,
`board_items` = NULL,
`id` = NULL,
`item_order` = NULL,
`visible_item_order` = NULL,
`title` = NULL,
`updated_at` = NULL,
initialize = function(`can`, `created_at`, `deleted_at`, `description`, `board_id`, `board_items`, `id`, `item_order`, `visible_item_order`, `title`, `updated_at`){
if (!missing(`can`)) {
self$`can` <- `can`
}
if (!missing(`created_at`)) {
stopifnot(is.character(`created_at`), length(`created_at`) == 1)
self$`created_at` <- `created_at`
}
if (!missing(`deleted_at`)) {
stopifnot(is.character(`deleted_at`), length(`deleted_at`) == 1)
self$`deleted_at` <- `deleted_at`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`board_id`)) {
stopifnot(is.numeric(`board_id`), length(`board_id`) == 1)
self$`board_id` <- `board_id`
}
if (!missing(`board_items`)) {
stopifnot(is.list(`board_items`), length(`board_items`) != 0)
lapply(`board_items`, function(x) stopifnot(R6::is.R6(x)))
self$`board_items` <- `board_items`
}
if (!missing(`id`)) {
stopifnot(is.numeric(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`item_order`)) {
stopifnot(is.list(`item_order`), length(`item_order`) != 0)
lapply(`item_order`, function(x) stopifnot(is.character(x)))
self$`item_order` <- `item_order`
}
if (!missing(`visible_item_order`)) {
stopifnot(is.list(`visible_item_order`), length(`visible_item_order`) != 0)
lapply(`visible_item_order`, function(x) stopifnot(is.character(x)))
self$`visible_item_order` <- `visible_item_order`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`updated_at`)) {
stopifnot(is.character(`updated_at`), length(`updated_at`) == 1)
self$`updated_at` <- `updated_at`
}
},
toJSON = function() {
BoardSectionObject <- list()
if (!is.null(self$`can`)) {
BoardSectionObject[['can']] <- self$`can`
}
if (!is.null(self$`created_at`)) {
BoardSectionObject[['created_at']] <- self$`created_at`
}
if (!is.null(self$`deleted_at`)) {
BoardSectionObject[['deleted_at']] <- self$`deleted_at`
}
if (!is.null(self$`description`)) {
BoardSectionObject[['description']] <- self$`description`
}
if (!is.null(self$`board_id`)) {
BoardSectionObject[['board_id']] <- self$`board_id`
}
if (!is.null(self$`board_items`)) {
BoardSectionObject[['board_items']] <- lapply(self$`board_items`, function(x) x$toJSON())
}
if (!is.null(self$`id`)) {
BoardSectionObject[['id']] <- self$`id`
}
if (!is.null(self$`item_order`)) {
BoardSectionObject[['item_order']] <- self$`item_order`
}
if (!is.null(self$`visible_item_order`)) {
BoardSectionObject[['visible_item_order']] <- self$`visible_item_order`
}
if (!is.null(self$`title`)) {
BoardSectionObject[['title']] <- self$`title`
}
if (!is.null(self$`updated_at`)) {
BoardSectionObject[['updated_at']] <- self$`updated_at`
}
BoardSectionObject
},
fromJSONObject = function(BoardSectionJsonObject) {
BoardSectionObject <- BoardSectionJsonObject #jsonlite::fromJSON(BoardSectionJson, simplifyVector = FALSE)
if (!is.null(BoardSectionObject$`can`)) {
self$`can` <- BoardSectionObject$`can`
}
if (!is.null(BoardSectionObject$`created_at`)) {
self$`created_at` <- BoardSectionObject$`created_at`
}
if (!is.null(BoardSectionObject$`deleted_at`)) {
self$`deleted_at` <- BoardSectionObject$`deleted_at`
}
if (!is.null(BoardSectionObject$`description`)) {
self$`description` <- BoardSectionObject$`description`
}
if (!is.null(BoardSectionObject$`board_id`)) {
self$`board_id` <- BoardSectionObject$`board_id`
}
if (!is.null(BoardSectionObject$`board_items`)) {
self$`board_items` <- lapply(BoardSectionObject$`board_items`, function(x) {
board_itemsObject <- BoardItem$new()
board_itemsObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
board_itemsObject
})
}
if (!is.null(BoardSectionObject$`id`)) {
self$`id` <- BoardSectionObject$`id`
}
if (!is.null(BoardSectionObject$`item_order`)) {
self$`item_order` <- BoardSectionObject$`item_order`
}
if (!is.null(BoardSectionObject$`visible_item_order`)) {
self$`visible_item_order` <- BoardSectionObject$`visible_item_order`
}
if (!is.null(BoardSectionObject$`title`)) {
self$`title` <- BoardSectionObject$`title`
}
if (!is.null(BoardSectionObject$`updated_at`)) {
self$`updated_at` <- BoardSectionObject$`updated_at`
}
},
fromJSON = function(BoardSectionJson) {
BoardSectionObject <- jsonlite::fromJSON(BoardSectionJson, simplifyVector = FALSE)
self$fromJSONObject(BoardSectionObject)
},
toJSONString = function() {
sprintf(
'{
"can": %s,
"created_at": %s,
"deleted_at": %s,
"description": %s,
"board_id": %d,
"board_items": [%s],
"id": %d,
"item_order": [%s],
"visible_item_order": [%s],
"title": %s,
"updated_at": %s
}',
self$`can`,
self$`created_at`,
self$`deleted_at`,
self$`description`,
self$`board_id`,
lapply(self$`board_items`, function(x) paste(x$toJSON(), sep=",")),
self$`id`,
lapply(self$`item_order`, function(x) paste(paste0('"', x, '"'), sep=",")),
lapply(self$`visible_item_order`, function(x) paste(paste0('"', x, '"'), sep=",")),
self$`title`,
self$`updated_at`
)
},
fromJSONString = function(BoardSectionJson) {
BoardSectionObject <- jsonlite::fromJSON(BoardSectionJson, simplifyVector = FALSE)
self$`can` <- BoardSectionObject$`can`
self$`created_at` <- BoardSectionObject$`created_at`
self$`deleted_at` <- BoardSectionObject$`deleted_at`
self$`description` <- BoardSectionObject$`description`
self$`board_id` <- BoardSectionObject$`board_id`
self$`board_items` <- lapply(BoardSectionObject$`board_items`, function(x) BoardItem$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
self$`id` <- BoardSectionObject$`id`
self$`item_order` <- BoardSectionObject$`item_order`
self$`visible_item_order` <- BoardSectionObject$`visible_item_order`
self$`title` <- BoardSectionObject$`title`
self$`updated_at` <- BoardSectionObject$`updated_at`
}
)
)
|
\pagebreak
\section{Facade}
\subsection{Architecture overview}
Since most of the operations include the interaction with both web3funcions and IPFS packages, we provide a third package, facade, with the aim of hiding the web3 and IPFS layers. This package executes function calls in the suitable order to the other two packages, saving and retrieving data from both IPFS and Ethereum.
\begin{figure}[h]
\centering
\includegraphics[scale=0.6]{res/images/facade.png}
\caption{Package diagram with the interaction between facade-web3-IPFS}
\end{figure}
\subsection{Methods}
All the methods described in the picture below consist of two different steps:
\begin{itemize}
\item \textbf{setter}: first the data is stored to IPFS, then it is stored to Ethereum with the related IPFS CID;
\item \textbf{getter}: the IPFS CID is retrived from web3. The CID is then used to get the the data from IPFS.
\end{itemize}
\noindent Since the methods are just a combination of the already described methods of the web3 and IPFS packages, we omit their description.
\subsection{How to extend with new features}
This package should be used to organize information retrieved from different sources before passing them to the front-end. This package hides the interactions between the different technologies, but should not be used to interact directly with the blockchain or IPFS. Two important aspects the developer must keep in consideration are:
\begin{itemize}
\item if new functionality is added in the Solidity back-end and in the web3 package, the facade package should be updated only if the user could interact directly with this functionality;
\item even if the purpose of this package is to transform and organize data to make them more readable and usable, it is not responsible for preparing data as they will be shown/used by the front-end, that is containers/actionCreators' responsibility.
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[scale=0.55]{res/images/facade-package.png}
\caption{Class diagram of the facade package}
\end{figure}
|
#include <boost/preprocessor/tuple/pop_front.hpp>
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- This module is DEPRECATED.
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary using (Rel; Setoid; Substitutive; Symmetric; Total)
module Algebra.FunctionProperties.Consequences
{a β} (S : Setoid a β) where
{-# WARNING_ON_IMPORT
"Algebra.FunctionProperties.Consequences was deprecated in v1.3.
Use Algebra.Consequences.Setoid instead."
#-}
open import Algebra.Consequences.Setoid S public
|
cc Copyright (C) 2004-2009: Leslie Greengard and June-Yub Lee
cc Contact: [email protected]
cc
cc This program is free software; you can redistribute it and/or modify
cc it under the terms of the GNU General Public License as published by
cc the Free Software Foundation; either version 2 of the License, or
cc (at your option) any later version. This program is distributed in
cc the hope that it will be useful, but WITHOUT ANY WARRANTY; without
cc even the implied warranty of MERCHANTABILITY or FITNESS FOR A
cc PARTICULAR PURPOSE. See the GNU General Public License for more
cc details. You should have received a copy of the GNU General Public
cc License along with this program;
cc if not, see <http://www.gnu.org/licenses/>.
c
program testfft
implicit none
c
c --- local variables
c
integer i,ier,iflag,j,k1,mx,ms,nj
parameter (mx=10 000)
real(8):: xj(mx), sk(mx)
real(8):: err,eps,pi
parameter (pi=3.141592653589793d0)
complex(8) :: cj(mx),cj0(mx),cj1(mx)
complex(8) :: fk0(mx),fk1(mx)
c
c --------------------------------------------------
c create some test data
c --------------------------------------------------
ms = 90
nj = 128
do k1 = -nj/2, (nj-1)/2
j = k1+nj/2+1
xj(j) = pi * dcos(-pi*j/nj)
cj(j) = dcmplx( dsin(pi*j/nj), dcos(pi*j/nj))
enddo
c
c --------------------------------------------------
c start tests
c --------------------------------------------------
c
iflag = 1
print*,' Start 1D testing: ', ' nj =',nj, ' ms =',ms
do i = 1,3
if (i.eq.1) eps=1d-4
if (i.eq.2) eps=1d-8
if (i.eq.3) eps=1d-12
print*,' '
print*,' Requested precision eps =',eps
print*,' '
c
c -----------------------
c call 1D Type1 method
c -----------------------
c
call dirft1d1(nj,xj,cj,iflag, ms,fk0)
call nufft1d1f90(nj,xj,cj,iflag,eps, ms,fk1,ier)
call errcomp(fk0,fk1,ms,err)
print *,' ier = ',ier
print *,' type 1 error = ',err
c
c -----------------------
c call 1D Type2 method
c -----------------------
c
call dirft1d2(nj,xj,cj0,iflag, ms,fk0,ier)
call nufft1d2f90(nj,xj,cj1,iflag, eps, ms,fk0,ier)
call errcomp(cj0,cj1,nj,err)
print *,' ier = ',ier
print *,' type 2 error = ',err
c
c -----------------------
c call 1D Type3 method
c -----------------------
do k1 = 1, ms
sk(k1) = 48d0*dcos(k1*pi/ms)
enddo
call dirft1d3(nj,xj,cj,iflag, ms,sk,fk0)
call nufft1d3f90(nj,xj,cj,iflag,eps, ms,sk,fk1,ier)
call errcomp(cj0,cj1,nj,err)
print *,' ier = ',ier
print *,' type 3 error = ',err
enddo
stop
end
c
c
c
c
c
subroutine errcomp(fk0,fk1,n,err)
implicit none
integer k,n
complex(8) :: fk0(n), fk1(n)
real(8) :: salg,ealg,err
c
ealg = 0d0
salg = 0d0
do k = 1, n
ealg = ealg + cdabs(fk1(k)-fk0(k))**2
salg = salg + cdabs(fk0(k))**2
enddo
err =sqrt(ealg/salg)
return
end
|
For all your gardening and lawn mowing needs in the Mount Barker area our local franchisee Greg Scholefield is here to help you.
Mr Clip franchisees are trained to industry standards in garden maintenance and are fully insured. At Mr Clip Lawn and garden services we strive to ensure all our customers have a professional yet personal experience when dealing with us. Call Mr Clip Lawn and Gardening Services for gardening services in the Mount Barker area.
Mr Clip Garden and Lawn Services have a franchisee available for all your garden maintenance needs across greater Adelaide, Adelaide Hills, Mount Barker, and even Victor Harbor. |
[STATEMENT]
lemma (in Module) mker_of_mpj:"submodule R M H \<Longrightarrow>
ker\<^bsub>M,(M /\<^sub>m H)\<^esub> (mpj M H) = H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. submodule R M H \<Longrightarrow> ker\<^bsub>M,M /\<^sub>m H\<^esub> mpj M H = H
[PROOF STEP]
apply (simp add:ker_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. submodule R M H \<Longrightarrow> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>} = H
[PROOF STEP]
apply (rule equalityI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. submodule R M H \<Longrightarrow> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>} \<subseteq> H
2. submodule R M H \<Longrightarrow> H \<subseteq> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply (rule subsetI, simp, erule conjE)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>submodule R M H; x \<in> carrier M; mpj M H x = \<zero>\<^bsub>M /\<^sub>m H\<^esub>\<rbrakk> \<Longrightarrow> x \<in> H
2. submodule R M H \<Longrightarrow> H \<subseteq> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply (simp add:elem_mpj, simp add:qmodule_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>submodule R M H; x \<in> carrier M; x \<uplus>\<^bsub>M\<^esub> H = H\<rbrakk> \<Longrightarrow> x \<in> H
2. submodule R M H \<Longrightarrow> H \<subseteq> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply (frule_tac m = x in m_in_mr_coset [of H], assumption+)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>submodule R M H; x \<in> carrier M; x \<uplus>\<^bsub>M\<^esub> H = H; x \<in> x \<uplus>\<^bsub>M\<^esub> H\<rbrakk> \<Longrightarrow> x \<in> H
2. submodule R M H \<Longrightarrow> H \<subseteq> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. submodule R M H \<Longrightarrow> H \<subseteq> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply (rule subsetI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>submodule R M H; x \<in> H\<rbrakk> \<Longrightarrow> x \<in> {a \<in> carrier M. mpj M H a = \<zero>\<^bsub>M /\<^sub>m H\<^esub>}
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>submodule R M H; x \<in> H\<rbrakk> \<Longrightarrow> x \<in> carrier M \<and> mpj M H x = \<zero>\<^bsub>M /\<^sub>m H\<^esub>
[PROOF STEP]
apply (simp add:submodule_def, (erule conjE)+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> H; H \<subseteq> carrier M; M +> H; \<forall>a m. a \<in> carrier R \<and> m \<in> H \<longrightarrow> a \<cdot>\<^sub>s m \<in> H\<rbrakk> \<Longrightarrow> x \<in> carrier M \<and> mpj M H x = \<zero>\<^bsub>M /\<^sub>m H\<^esub>
[PROOF STEP]
apply (simp add:subsetD)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> H; H \<subseteq> carrier M; M +> H; \<forall>a m. a \<in> carrier R \<and> m \<in> H \<longrightarrow> a \<cdot>\<^sub>s m \<in> H\<rbrakk> \<Longrightarrow> mpj M H x = \<zero>\<^bsub>M /\<^sub>m H\<^esub>
[PROOF STEP]
apply (subst elem_mpj,
simp add:subsetD, simp add:submodule_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> H; H \<subseteq> carrier M; M +> H; \<forall>a m. a \<in> carrier R \<and> m \<in> H \<longrightarrow> a \<cdot>\<^sub>s m \<in> H\<rbrakk> \<Longrightarrow> x \<uplus>\<^bsub>M\<^esub> H = \<zero>\<^bsub>M /\<^sub>m H\<^esub>
[PROOF STEP]
apply (simp add:qmodule_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> H; H \<subseteq> carrier M; M +> H; \<forall>a m. a \<in> carrier R \<and> m \<in> H \<longrightarrow> a \<cdot>\<^sub>s m \<in> H\<rbrakk> \<Longrightarrow> x \<uplus>\<^bsub>M\<^esub> H = H
[PROOF STEP]
apply (rule mr_cos_h_stable[THEN sym],
simp add:submodule_def, assumption)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
(* *********************************************************************)
(* *)
(* The CertiKOS Certified Kit Operating System *)
(* *)
(* The FLINT Group, Yale University *)
(* *)
(* Copyright The FLINT Group, Yale University. All rights reserved. *)
(* This file is distributed under the terms of the Yale University *)
(* Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(* *********************************************************************)
(* *)
(* Layers of VMM *)
(* *)
(* Refinement proof for PTIntro layer *)
(* *)
(* Ronghui Gu <[email protected]> *)
(* *)
(* Yale Flint Group *)
(* *)
(* *********************************************************************)
(** This file provide the contextual refinement proof between MAL layer and MPTIntro layer*)
Require Import PTIntroGenDef.
Require Import PTIntroGenSpec.
(** * Definition of the refinement relation*)
Section Refinement.
Context `{real_params: RealParams}.
Section WITHMEM.
Context `{Hstencil: Stencil}.
Context `{Hmem: Mem.MemoryModel}.
Context `{Hmwd: UseMemWithData mem}.
(** ** The low level specifications exist*)
Section Exists.
Lemma setCR3_exist:
forall habd habd' labd m n s f,
setPT'_spec n habd = Some habd'
-> high_level_invariant habd
-> relate_RData f habd labd
-> match_RData s habd m f
-> exists labd', setCR30_spec labd (GLOBP PTPool_LOC (Int.repr (n * PgSize)))
= Some labd' /\ relate_RData f habd' labd'
/\ PT habd' = n
/\ ptpool habd' = ptpool habd
/\ pperm habd' = pperm habd
/\ idpde habd' = idpde habd
/\ CR3 labd' = (GLOBP PTPool_LOC (Int.repr (n * PgSize)))
/\ 0 <= n < num_proc.
Proof.
unfold setPT'_spec, setCR30_spec; intros until f.
intros HP HINV HR HM; pose proof HR as HR'; inv HR; revert HP;
subrewrite'; intros HQ; inv HINV; subdestruct;
destruct (CR3_valid_dec
(GLOBP PTPool_LOC (Int.repr (n * 4096)))).
- (* ipt = true /\ valid CR3*)
inv HQ; refine_split'; eauto.
inv HR'. econstructor; eauto; simpl.
econstructor; eauto.
rewrite Int.unsigned_repr. omega.
rewrite int_max. omega.
- elim n0; unfold CR3_valid; eauto.
- (* ipt = false /\ valid CR3 *)
inv HQ; refine_split'; eauto.
inv HR'. econstructor; eauto; simpl.
econstructor; eauto.
rewrite Int.unsigned_repr. omega.
rewrite int_max. omega.
- elim n0; unfold CR3_valid; eauto.
Qed.
End Exists.
Ltac pattern2_refinement_simpl:=
pattern2_refinement_simpl' (@relate_AbData).
Section FRESH_PRIM.
Lemma setPT_spec_ref:
compatsim (crel HDATA LDATA) (gensem setPT'_spec) setPT_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert(HOS: kernel_mode d2).
{
simpl; inv match_related.
functional inversion H1; subst;
refine_split'; trivial; congruence.
}
exploit setCR3_exist; eauto; intros (labd' & HP & HM & HN1 & HN2 & HN3 & Hide & HCR3 & Hrange).
refine_split; eauto.
econstructor; eauto.
pose proof match_related as match_relate'.
inv match_related.
split; eauto; pattern2_refinement_simpl.
econstructor; eauto; subrewrite'.
Qed.
Lemma getPDE_spec_ref:
compatsim (crel HDATA LDATA) (gensem getPDE_spec) getPDE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert(HOS: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned).
{
simpl; inv match_related.
unfold getPDE_spec in *.
unfold PDE_Arg in *.
subdestruct; refine_split'; trivial; congruence.
}
destruct HOS as [Hkern [Hrange Hrange']].
generalize H; intros HMAT. inv H. specialize (H1 _ Hrange).
inv H1. specialize (H _ Hrange').
destruct H as (v & HLD & _ & HM).
assert (HVint: exists v', Vint v' = v /\ Int.unsigned z = Int.unsigned v' / PgSize).
{
functional inversion H2.
- subst pt. rewrite H8 in HM. inv HM.
refine_split'; trivial. subrewrite'.
rewrite Zplus_comm.
rewrite Z_div_plus. reflexivity.
omega.
- subst pt. rewrite H8 in HM. inv HM.
refine_split'; trivial.
}
destruct HVint as (v' & Heq & Heq'); subst.
refine_split; eauto.
econstructor; eauto.
Qed.
Lemma getPTE_spec_ref:
compatsim (crel HDATA LDATA) (gensem getPTE_spec) getPTE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert(HOS: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ 0 <= Int.unsigned i1 <= PTX Int.max_unsigned).
{
simpl; inv match_related.
unfold getPTE_spec in *.
unfold PTE_Arg in *. unfold PDE_Arg in *.
subdestruct; refine_split'; trivial; congruence.
}
destruct HOS as (Hkern & Hrange & Hrange' & Hrange'').
generalize H; intros HMAT. inv H. specialize (H1 _ Hrange).
inv H1. specialize (H _ Hrange').
destruct H as (v & HLD & _ & HM).
assert (HVint: exists v1 v2, Vint v1 = v /\ Int.unsigned v1 = v2 * PgSize + PT_PERM_PTU /\
fload'_spec (v2 * one_k + Int.unsigned i1) d2 = Some (Int.unsigned z)).
{
unfold getPTE_spec in *.
destruct (ikern d1') eqn: Hik; contra_inv.
destruct (ihost d1') eqn: Hih; contra_inv.
destruct (init d1') eqn: Hii; contra_inv.
destruct (ipt d1') eqn: Hit; contra_inv.
destruct (PTE_Arg (Int.unsigned i) (Int.unsigned i0) (Int.unsigned i1)); contra_inv.
destruct (ZMap.get (Int.unsigned i0) (ZMap.get (Int.unsigned i) (ptpool d1'))) eqn: HT; contra_inv.
inv HM. exploit relate_PMap_re; eauto 1. intros HPMap.
inv HPMap. specialize (H _ Hrange _ Hrange' _ _ HT _ Hrange'').
destruct H as (v' & HLD' & HM).
assert (HLD'': fload'_spec (pi * one_k + Int.unsigned i1) d2 = Some (Int.unsigned z)).
{
unfold fload'_spec. inv match_related. rewrite <- ikern_re, <- ihost_re.
rewrite Hik, Hih.
assert (HOS: 0 <= pi * 4096 + 7 <= Int.max_unsigned).
{
specialize (Int.unsigned_range_2 v0). subrewrite'.
}
rewrite zle_lt_true.
- replace ((pi * 1024 + Int.unsigned i1) * 4) with (pi * 4096 + Int.unsigned i1 * 4) by omega.
destruct (ZMap.get (Int.unsigned i1) pte); contra_inv; inv HM; inv H2.
+ refine_split'; trivial.
rewrite H7. apply PermZ_eq in H6. rewrite H6. trivial.
+ refine_split'; trivial.
- revert HOS Hrange''. clear.
intros. rewrite_omega.
}
refine_split'; eauto 1.
}
destruct HVint as (v1 & v2 & Heq1 & Heq2 & Heq4).
refine_split; eauto 1.
econstructor; eauto.
Qed.
Lemma setPTE_spec_ref:
compatsim (crel HDATA LDATA) (gensem setPTE_spec) setPTE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: exists p0,
kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ 0 <= Int.unsigned i1 <= PTX Int.max_unsigned
/\ 0 < Int.unsigned i2 < nps d2
/\ ZtoPerm (Int.unsigned i3) = Some p0
/\ PT d1' = PT d1).
{
inv match_related. functional inversion H1; subst.
unfold PTE_Arg, PDE_Arg in *. subdestruct.
refine_split'; try congruence; eauto.
}
destruct Hkern as (perm & Hkern & Hrange & Hrange' & Hrange'' & Hrange''' & HPerm & HPT).
inv H. generalize H2; intros HMAT; specialize (H2 _ Hrange); inv H2.
specialize (H _ Hrange'); destruct H as [v[HLD [HV HM]]].
assert (HVint: exists v1 v2, Vint v1 = v /\ Int.unsigned v1 = v2 * PgSize + PT_PERM_PTU
/\ match_RData s d1' m2 ΞΉ
/\ init d2 = true
/\ exists d2', fstore0_spec (v2 * one_k + Int.unsigned i1)
(Int.unsigned i2 * PgSize + Int.unsigned i3) d2
= Some d2'
/\ relate_RData ΞΉ d1' d2').
{
unfold setPTE_spec in *. subdestruct.
inv HM. esplit; esplit. split; [reflexivity|]. split; [eassumption|]. split; [|split].
- (* match_RData *)
constructor; inv H1; simpl; trivial.
econstructor; eauto 1; intros.
destruct (zeq n0 (Int.unsigned i)); subst.
+ (* n = Int.unsigned i *)
rewrite ZMap.gss. constructor; intros.
destruct (zeq i4 (Int.unsigned i0)); subst.
* (* i4 = Int.unsigned i0 *)
rewrite ZMap.gss. refine_split'; eauto 1.
constructor; eauto; intros.
* (* i4 <> Int.unsigned i0 *)
rewrite ZMap.gso; eauto 2.
specialize (HMAT _ Hrange). inv HMAT.
eapply H2; eauto 2.
+ (* n <> int.unsigned i*)
rewrite ZMap.gso; eauto 2.
- inv match_related. congruence.
- (* fstore /\ relate RData *)
unfold fstore0_spec. inv match_related.
rewrite <- ikern_re, <- ihost_re. rewrite Hdestruct, Hdestruct0.
assert (HOS: 0<= pi * 4096 + 7 <= Int.max_unsigned).
{
specialize (Int.unsigned_range_2 v0). subrewrite'.
}
rewrite zle_lt_true.
+ replace ((pi * 1024 + Int.unsigned i1) * 4) with (pi * 4096 + Int.unsigned i1 * 4) by omega.
unfold flatmem_store. unfold PageI.
replace ((pi * 4096 + Int.unsigned i1 * 4) / 4096) with pi.
* pose proof (pperm_re pi) as HPP.
rewrite H10 in HPP. inv HPP. trivial.
refine_split'; eauto 1.
inv H1. constructor; trivial; simpl; try congruence.
{ (* FlatMem *)
unfold FlatMem.flatmem_inj in *.
intros; rewrite valid_dirty; [| assumption| eassumption].
eapply FlatMem.store_unmapped_inj; eauto 1.
simpl. rewrite_omega.
}
{ (* PMap *)
constructor; intros. inv relate_PMap_re.
destruct (zeq n0 (Int.unsigned i)); subst.
- (* n = Int.unsigned i *)
rewrite ZMap.gss in H4.
destruct (zeq i4 (Int.unsigned i0)); subst.
+ (* i4 = Int.unsigned i0 *)
rewrite ZMap.gss in H4. inv H4.
destruct (zeq vadr (Int.unsigned i1)); subst.
* (* vadr = Int.unsigned i1 *)
rewrite ZMap.gss. rewrite FlatMem.load_store_same.
refine_split'; trivial.
econstructor; eauto. apply Int.unsigned_repr.
apply ZtoPerm_range in Hdestruct6.
exploit valid_nps; eauto. rewrite nps_re.
rewrite_omega.
* (* vadr <> Int.unsigned i1 *)
rewrite ZMap.gso; auto. erewrite FlatMem.load_store_other; eauto 2.
simpl. destruct (zle (vadr + 1) (Int.unsigned i1)).
left; omega. right; omega.
+ (* i4 <> Int.unsigned i0 *)
rewrite ZMap.gso in H4; auto.
assert (Hneq: pi <> pi0).
{
red; intros; subst.
specialize (HMAT _ Hrange). inv HMAT.
specialize (H8 _ H2). destruct H8 as (v & _ & _ & HMAT).
rewrite H4 in HMAT. inv HMAT. congruence.
}
erewrite FlatMem.load_store_other; eauto 2. simpl.
destruct (zle (pi0 + 1) pi); rewrite_omega.
- (* n <> int.unsigned i*)
rewrite ZMap.gso in H4; auto.
assert (Hneq: pi <> pi0).
{
red; intros; subst.
specialize (HMAT _ H1). inv HMAT.
specialize (H8 _ H2). destruct H8 as (v & _ & _ & HMAT).
unfold PMap, ZMap.t, PMap.t in HMAT.
rewrite H4 in HMAT. inv HMAT. congruence.
}
erewrite FlatMem.load_store_other; eauto 2. simpl.
destruct (zle (pi0 + 1) pi); rewrite_omega.
}
* rewrite Zplus_comm.
rewrite Z_div_plus. rewrite Zdiv_small. reflexivity.
revert Hrange''. clear; intros. rewrite_omega. omega.
+ rewrite_omega.
}
destruct HVint as (v1 & v2 & Heq1 & Heq2 & Hma & Hinit & d2' & HST & Hre); subst.
exists ΞΉ, Vundef, (fst (m2, d2)), d2'.
refine_split; eauto 1.
- econstructor; eauto 1.
- split; eauto; pattern2_refinement_simpl.
Qed.
Lemma rmvPTE_spec_ref:
compatsim (crel HDATA LDATA) (gensem rmvPTE_spec) rmvPTE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ 0 <= Int.unsigned i1 <= PTX Int.max_unsigned
/\ PT d1' = PT d1).
{
inv match_related. functional inversion H1; subst.
unfold PTE_Arg, PDE_Arg in *. subdestruct.
refine_split'; try congruence; eauto.
}
destruct Hkern as [Hkern [Hrange [Hrange' [Hrange'' HPT]]]].
inv H. generalize H2; intros HMAT; specialize (H2 _ Hrange); inv H2.
specialize (H _ Hrange'); destruct H as [v[HLD [HV HM]]].
assert (HVint: exists v1 v2, Vint v1 = v /\ Int.unsigned v1 = v2 * PgSize + PT_PERM_PTU
/\ match_RData s d1' m2 ΞΉ
/\ exists d2', fstore0_spec (v2 * one_k + Int.unsigned i1) 0 d2 = Some d2'
/\ relate_RData ΞΉ d1' d2').
{
unfold rmvPTE_spec in *. subdestruct.
inv HM. esplit; esplit. split; [reflexivity|]. split; [eassumption|]. split.
- (* match_RData *)
constructor; inv H1; simpl; trivial. econstructor; eauto 1; intros.
destruct (zeq n0 (Int.unsigned i)); subst.
+ (* n = Int.unsigned i *)
rewrite ZMap.gss. constructor; intros.
destruct (zeq i2 (Int.unsigned i0)); subst.
* (* i2 = Int.unsigned i0 *)
rewrite ZMap.gss. refine_split'; eauto 1.
constructor; eauto; intros.
* (* i2 <> Int.unsigned i0 *)
rewrite ZMap.gso; eauto 2.
specialize (HMAT _ Hrange). inv HMAT.
eauto 2.
+ (* n <> int.unsigned i*)
rewrite ZMap.gso; eauto 2.
- (* fstore /\ relate RData *)
unfold fstore0_spec. inv match_related.
rewrite <- ikern_re, <- ihost_re. rewrite Hdestruct, Hdestruct0.
assert (HOS: 0<= pi * 4096 + 7 <= Int.max_unsigned).
{
specialize (Int.unsigned_range_2 v0). subrewrite'.
}
rewrite zle_lt_true.
+ replace ((pi * 1024 + Int.unsigned i1) * 4) with (pi * 4096 + Int.unsigned i1 * 4) by omega.
unfold flatmem_store. unfold PageI.
replace ((pi * 4096 + Int.unsigned i1 * 4) / 4096) with pi.
* pose proof (pperm_re pi) as HPP.
rewrite H10 in HPP. inv HPP.
refine_split'; eauto 1.
inv H1. constructor; trivial; simpl; try congruence.
{ (* FlatMem *)
unfold FlatMem.flatmem_inj in *.
intros; rewrite valid_dirty; [| assumption| eassumption].
eapply FlatMem.store_unmapped_inj; eauto 1.
simpl. rewrite_omega.
}
{ (* PMap *)
constructor; intros. inv relate_PMap_re.
destruct (zeq n0 (Int.unsigned i)); subst.
- (* n = Int.unsigned i *)
rewrite ZMap.gss in H4.
destruct (zeq i2 (Int.unsigned i0)); subst.
+ (* i2 = Int.unsigned i0 *)
rewrite ZMap.gss in H4. inv H4.
destruct (zeq vadr (Int.unsigned i1)); subst.
* (* vadr = Int.unsigned i1 *)
rewrite ZMap.gss. rewrite FlatMem.load_store_same.
refine_split'; trivial.
econstructor; eauto.
* (* vadr <> Int.unsigned i1 *)
rewrite ZMap.gso; auto. erewrite FlatMem.load_store_other; eauto 2.
simpl. destruct (zle (vadr + 1) (Int.unsigned i1)).
left; omega. right; omega.
+ (* i2 <> Int.unsigned i0 *)
rewrite ZMap.gso in H4; auto.
assert (Hneq: pi <> pi0).
{
red; intros; subst.
specialize (HMAT _ Hrange). inv HMAT.
specialize (H8 _ H2). destruct H8 as (v & _ & _ & HMAT).
rewrite H4 in HMAT. inv HMAT. congruence.
}
erewrite FlatMem.load_store_other; eauto 2.
simpl. destruct (zle (pi0 + 1) pi); rewrite_omega.
- (* n <> int.unsigned i*)
rewrite ZMap.gso in H4; auto.
assert (Hneq: pi <> pi0).
{
red; intros; subst.
specialize (HMAT _ H1). inv HMAT.
specialize (H8 _ H2). destruct H8 as (v & _ & _ & HMAT).
unfold PMap, ZMap.t, PMap.t in HMAT.
rewrite H4 in HMAT. inv HMAT. congruence.
}
erewrite FlatMem.load_store_other; eauto 2.
simpl. destruct (zle (pi0 + 1) pi); rewrite_omega.
}
* rewrite Zplus_comm.
rewrite Z_div_plus. rewrite Zdiv_small. reflexivity.
revert Hrange''. clear; intros. rewrite_omega. omega.
+ rewrite_omega.
}
destruct HVint as (v1 & v2 & Heq1 & Heq2 & Hma & d2' & HST & Hre); subst.
exists ΞΉ, Vundef, (fst (m2, d2)), d2'.
refine_split; eauto 1.
- econstructor; eauto 1.
- split; eauto; pattern2_refinement_simpl.
Qed.
Lemma setPDEU_spec_ref:
compatsim (crel HDATA LDATA) (gensem setPDEU_spec) setPDEU_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ 0 < Int.unsigned i1 < nps d2
/\ PT d1' = PT d1
/\ init d2 = true).
{
inv match_related.
unfold setPDEU_spec in *.
unfold PDE_Arg in *.
subdestruct; refine_split'; trivial; try congruence.
inv H1. reflexivity.
}
destruct Hkern as (Hkern & Hrange & Hrange' & Hrange'' & HPT & Hinit).
inv H. generalize H2; intros HMAT; specialize (H2 _ Hrange); inv H2.
specialize (H _ Hrange'); destruct H as [_[_ [HV _]]].
specialize (Mem.valid_access_store _ _ _ _
(Vint (Int.repr (Int.unsigned i1 * PgSize + PT_PERM_PTU)))
HV); intros [m0 HST].
refine_split; eauto.
- econstructor; eauto.
simpl; lift_trivial. rewrite HST. reflexivity.
- pose proof H1 as Hspec.
functional inversion Hspec; subst; simpl in *.
split; eauto 1; pattern2_refinement_simpl.
+ inv match_related; simpl in *; split; simpl; try eassumption.
{ (* Flatmem *)
apply FlatMem.free_page_inj. assumption.
}
{ (* PPermT_les *)
intros j. specialize (pperm_re j).
destruct (zeq j (Int.unsigned i1)); subst.
- rewrite ZMap.gss. rewrite H10 in pperm_re.
inv pperm_re. constructor.
- rewrite ZMap.gso; eauto 2.
}
{ (* PMapPool *)
subst pt'. constructor; intros.
destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss in H12.
destruct (zeq i2 (Int.unsigned i0)); subst.
{ (* i2 = Int.unsigned i0 *)
rewrite ZMap.gss in H12.
inv H12. rewrite ZMap.gi.
refine_split'; eauto 2. constructor.
}
{ (* i2 <> Int.unsigned i0 *)
rewrite ZMap.gso in H12; eauto 1.
inv relate_PMap_re. eauto.
}
}
{ (* n <> int.unsigned i*)
rewrite ZMap.gso in H12; eauto 1.
inv relate_PMap_re. eauto.
}
}
+ econstructor; eauto 1; simpl in *.
{
econstructor; eauto 1; intros.
* destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss.
specialize (HMAT _ H). inv HMAT.
constructor; intros.
specialize (H11 _ H12).
destruct (zeq i2 (Int.unsigned i0)); subst.
{ (* i2 = Int.unsigned i0 *)
refine_split'.
- eapply Mem.load_store_same; eauto.
- eapply Mem.store_valid_access_1; eauto.
- subst pt'; repeat rewrite ZMap.gss.
constructor; intros.
+ rewrite Int.unsigned_repr. reflexivity.
exploit valid_nps; eauto 1.
intros. rewrite_omega.
+ rewrite ZMap.gss. reflexivity.
}
{ (* i1 <> Int.unsigned i0 *)
destruct H11 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (i2 + 1) (Int.unsigned i0)).
+ left. revert n l. clear. intros. omega.
+ right. revert n g. clear. intros. omega.
- eapply Mem.store_valid_access_1; eauto.
- subst pt'. rewrite ZMap.gso; auto.
inv HM1; constructor; intros; eauto 1.
+ destruct (zeq pi (Int.unsigned i1)); subst.
* congruence.
* rewrite ZMap.gso; eauto 1.
}
}
{ (* n <> int.unsigned i*)
constructor; intros. rewrite ZMap.gso; auto.
specialize (HMAT _ H). inv HMAT.
specialize (H12 _ H11); destruct H12 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (n + 1) (Int.unsigned i)).
+ left. rewrite_omega.
+ right. rewrite_omega.
- eapply Mem.store_valid_access_1; eauto.
- unfold PMap, ZMap.t, PMap.t in HM1.
inv HM1; econstructor; intros; eauto 1.
+ destruct (zeq pi (Int.unsigned i1)); subst.
* congruence.
* rewrite ZMap.gso; eauto 1.
}
}
{
inv H0. esplit; eauto. intros.
specialize (H _ H0 _ H12).
destruct H as (v & HLD & HV' & HM).
erewrite Mem.load_store_other; eauto.
- refine_split'; eauto.
eapply Mem.store_valid_access_1; eauto.
- left. red; intros; subst.
specialize (genv_vars_inj _ _ _ _ H3 H11).
intros. inv H.
}
Qed.
Lemma rmvPDE_spec_ref:
compatsim (crel HDATA LDATA) (gensem rmvPDE_spec) rmvPDE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ PT d1' = PT d1
/\ idpde d1' = idpde d1).
{
inv match_related.
unfold rmvPDE_spec in *.
unfold PDE_Arg in *.
subdestruct; refine_split'; trivial; try congruence;
inv H1; reflexivity.
}
destruct Hkern as (Hkern & Hrange & Hrange' & HPT & Hipde).
inv H. generalize H2; intros HMAT; specialize (H2 _ Hrange); inv H2.
specialize (H _ Hrange'); destruct H as [_[_ [HV _]]].
specialize (Mem.valid_access_store _ _ _ _
(Vint (Int.repr PT_PERM_UP))
HV); intros [m0 HST].
refine_split; eauto.
- econstructor; eauto.
simpl; lift_trivial. rewrite HST. reflexivity.
- pose proof H1 as Hspec.
assert (Hre_ab: relate_AbData s ΞΉ d1' d2).
{
inv match_related.
assert (HP: relate_PMapPool
(ZMap.set (Int.unsigned i)
(ZMap.set (Int.unsigned i0) PDEUnPresent
(ZMap.get (Int.unsigned i) (ptpool d1)))
(ptpool d1)) (HP d2)).
{
constructor; intros.
destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss in H4.
destruct (zeq i1 (Int.unsigned i0)); subst.
{ (* i1 = Int.unsigned i0 *)
rewrite ZMap.gss in H4. inv H4.
}
{ (* i2 <> Int.unsigned i0 *)
rewrite ZMap.gso in H4; eauto 1.
inv relate_PMap_re. eauto.
}
}
{ (* n <> int.unsigned i*)
rewrite ZMap.gso in H4; eauto 1.
inv relate_PMap_re. eauto.
}
}
functional inversion Hspec; subst;
split; trivial; simpl in *; contra_inv.
{ (* PPermT_les *)
intros j. specialize (pperm_re j).
destruct (zeq j pi); subst.
- rewrite ZMap.gss. rewrite H12 in pperm_re.
inv pperm_re. constructor.
- rewrite ZMap.gso; eauto 2.
}
{ (* PPermT_les *)
intros j. specialize (pperm_re j).
destruct (zeq j pi); subst.
- rewrite ZMap.gss. rewrite H11 in pperm_re.
inv pperm_re. constructor.
- rewrite ZMap.gso; eauto 2.
}
}
assert (Hma_ab: match_AbData s d1' m0 ΞΉ).
{
econstructor; eauto 1; simpl in *.
{
econstructor; eauto 1; intros. pose proof HMAT as HMAT'.
specialize (HMAT _ H).
assert(HP: forall pp,
(forall pi0 a1 a2, ZMap.get pi0 (pperm d1) = PGHide (PGPMap a1 a2) ->
(a1 <> Int.unsigned i \/ a2 <> Int.unsigned i0) ->
ZMap.get pi0 pp = PGHide (PGPMap a1 a2)) ->
match_PMap s (ZMap.get n (ZMap.set (Int.unsigned i)
(ZMap.set (Int.unsigned i0) PDEUnPresent
(ZMap.get (Int.unsigned i) (ptpool d1)))
(ptpool d1))) pp m0 b n).
{
intros.
* destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss. inv HMAT.
constructor; intros.
specialize (H4 _ H5).
destruct (zeq i1 (Int.unsigned i0)); subst.
{ (* i1 = Int.unsigned i0 *)
refine_split'.
- eapply Mem.load_store_same; eauto.
- eapply Mem.store_valid_access_1; eauto.
- repeat rewrite ZMap.gss.
constructor.
}
{ (* i1 <> Int.unsigned i0 *)
destruct H4 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (i1 + 1) (Int.unsigned i0)).
+ left. omega.
+ right. omega.
- eapply Mem.store_valid_access_1; eauto.
- rewrite ZMap.gso; auto.
inv HM1; constructor; intros; eauto.
}
}
{ (* n <> int.unsigned i*)
constructor; intros. rewrite ZMap.gso; auto.
inv HMAT. specialize (H5 _ H4); destruct H5 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (n + 1) (Int.unsigned i)).
+ left. rewrite_omega.
+ right. rewrite_omega.
- eapply Mem.store_valid_access_1; eauto.
- unfold PMap, ZMap.t, PMap.t in HM1.
inv HM1; econstructor; intros; eauto.
}
}
functional inversion Hspec; subst; simpl in *;
subst pt'; eapply HP; eauto 1; contra_inv.
{
intros. destruct (zeq pi0 pi); subst.
- specialize (HMAT' _ Hrange). inv HMAT'.
specialize (H15 _ Hrange').
destruct H15 as (? & _ & _ & HM).
rewrite H12 in HM. inv HM.
destruct H14; congruence.
- rewrite ZMap.gso; eauto 1.
}
{
intros. destruct (zeq pi0 pi); subst.
- specialize (HMAT' _ Hrange). inv HMAT'.
specialize (H14 _ Hrange').
destruct H14 as (? & _ & _ & HM).
rewrite H11 in HM. inv HM.
destruct H13; congruence.
- rewrite ZMap.gso; eauto 1.
}
}
{
inv H0. rewrite Hipde. esplit; eauto. intros.
specialize (H _ H0 _ H4).
destruct H as (v & HLD & HV' & HM).
erewrite Mem.load_store_other; eauto.
- refine_split'; eauto.
eapply Mem.store_valid_access_1; eauto.
- left. red; intros; subst.
specialize (genv_vars_inj _ _ _ _ H3 H2).
intros. inv H.
}
}
split; eauto 1; pattern2_refinement_simpl.
Qed.
Lemma setPDE_spec_ref:
compatsim (crel HDATA LDATA) (gensem setPDE_spec) setPDE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: kernel_mode d2
/\ 0 <= Int.unsigned i < num_proc
/\ 0 <= Int.unsigned i0 <= PDX Int.max_unsigned
/\ PT d1' = PT d1).
{
inv match_related.
unfold setPDE_spec in *.
unfold PDE_Arg in *.
subdestruct; refine_split'; trivial; try congruence.
inv H1. reflexivity.
}
destruct Hkern as (Hkern & Hrange & Hrange' & HPT).
inv H. generalize H2; intros HMAT; specialize (H2 _ Hrange); inv H2.
specialize (H _ Hrange'); destruct H as [_[_ [HV _]]]. inv H0.
specialize (Mem.valid_access_store _ _ _ _
(Vptr b0 (Int.repr (Int.unsigned i0 * PgSize + PT_PERM_PTU)))
HV); intros [m0 HST].
refine_split; eauto.
- econstructor; eauto.
simpl; lift_trivial. rewrite HST. reflexivity.
- pose proof H1 as Hspec.
functional inversion Hspec; subst; simpl in *.
split; eauto 1; pattern2_refinement_simpl.
+ inv match_related; simpl in *; split; simpl; try eassumption.
{ (* PMapPool *)
subst pt'. constructor; intros.
destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss in H11.
destruct (zeq i1 (Int.unsigned i0)); subst.
{ (* i1 = Int.unsigned i0 *)
rewrite ZMap.gss in H11.
inv H11.
}
{ (* i1 <> Int.unsigned i0 *)
rewrite ZMap.gso in H11; eauto 1.
inv relate_PMap_re. eauto.
}
}
{ (* n <> int.unsigned i*)
rewrite ZMap.gso in H11; eauto 1.
inv relate_PMap_re. eauto.
}
}
+ econstructor; eauto 1; simpl in *.
{
econstructor; eauto 1; intros.
* destruct (zeq n (Int.unsigned i)); subst.
{ (* n = int.unsigned i*)
rewrite ZMap.gss.
specialize (HMAT _ H0). inv HMAT.
constructor; intros.
specialize (H10 _ H11).
destruct (zeq i1 (Int.unsigned i0)); subst.
{ (* i1 = Int.unsigned i0 *)
refine_split'.
- eapply Mem.load_store_same; eauto.
- eapply Mem.store_valid_access_1; eauto.
- subst pt'; repeat rewrite ZMap.gss.
constructor; intros; eauto.
rewrite Int.unsigned_repr. reflexivity.
rewrite_omega.
}
{ (* i1 <> Int.unsigned i0 *)
destruct H10 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (i1 + 1) (Int.unsigned i0)).
+ left. omega.
+ right. omega.
- eapply Mem.store_valid_access_1; eauto.
- subst pt'. rewrite ZMap.gso; auto.
}
}
{ (* n <> int.unsigned i*)
constructor; intros. rewrite ZMap.gso; auto.
specialize (HMAT _ H0). inv HMAT.
specialize (H11 _ H10); destruct H11 as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (n + 1) (Int.unsigned i)).
+ left. rewrite_omega.
+ right. rewrite_omega.
- eapply Mem.store_valid_access_1; eauto.
- unfold PMap, ZMap.t, PMap.t in HM1.
inv HM1; econstructor; intros; eauto 1.
}
}
{
esplit; eauto. intros.
specialize (H _ H0 _ H10).
destruct H as (v & HLD & HV' & HM).
erewrite Mem.load_store_other; eauto.
- refine_split'; eauto.
eapply Mem.store_valid_access_1; eauto.
- left. red; intros; subst.
specialize (genv_vars_inj _ _ _ _ H3 H2).
intros. inv H.
}
Qed.
Lemma pt_in_spec_ref:
compatsim (crel HDATA LDATA) (primcall_general_compatsem'
ptin'_spec (prim_ident:= pt_in)) pt_in_spec_low.
Proof.
compatsim_simpl (@match_AbData); intros.
inv match_extcall_states.
assert(HOS: kernel_mode d2).
{
simpl; inv match_related.
functional inversion H8; subst;
refine_split'; trivial; try congruence.
}
refine_split'; eauto.
econstructor; eauto.
- specialize (match_reg PC). unfold Pregmap.get in *.
rewrite H7 in match_reg.
inv match_reg.
exploit inject_forward_equal'; eauto.
intros HW; inv HW.
rewrite Int.add_zero. reflexivity.
- functional inversion H8; subst.
pose proof match_related as match_relate'.
inv match_related.
split; eauto; pattern2_refinement_simpl.
+ econstructor; eauto.
econstructor; eauto.
inv match_match.
econstructor; eauto.
+ val_inject_simpl.
Qed.
Lemma pt_out_spec_ref:
compatsim (crel HDATA LDATA) (primcall_general_compatsem'
ptout_spec (prim_ident:= pt_out)) pt_out_spec_low.
Proof.
compatsim_simpl (@match_AbData); intros.
inv match_extcall_states.
assert(HOS: kernel_mode d2).
{
simpl; inv match_related.
functional inversion H8; subst;
refine_split'; trivial; try congruence.
}
refine_split'; eauto.
econstructor; eauto.
- eapply reg_symbol_inject; eassumption.
- functional inversion H8; subst.
pose proof match_related as match_relate'.
inv match_related.
split; eauto; pattern2_refinement_simpl.
+ econstructor; eauto.
econstructor; eauto.
inv match_match.
econstructor; eauto.
+ val_inject_simpl.
Qed.
Lemma setIDPTE_spec_ref:
compatsim (crel HDATA LDATA) (gensem setIDPTE_spec) setIDPTE_spec_low.
Proof.
compatsim_simpl (@match_AbData).
assert (Hkern: kernel_mode d2
/\ 0 <= Int.unsigned i <= PDX Int.max_unsigned
/\ 0 <= Int.unsigned i0 <= PTX Int.max_unsigned
/\ PT d1' = PT d1
/\ exists p0, ZtoPerm (Int.unsigned i1) = Some p0).
{
inv match_related.
unfold setIDPTE_spec in *.
unfold IDPTE_Arg in *.
subdestruct; refine_split'; trivial; try congruence.
inv H1. reflexivity.
}
destruct Hkern as (Hkern & Hrange & Hrange' & HPT & p0 & Hipde).
inv H0. generalize H2; intros HMAT. specialize (H2 _ Hrange _ Hrange').
destruct H2 as [_[_ [HV _]]].
specialize (Mem.valid_access_store _ _ _ _
(Vint (Int.repr ((Int.unsigned i * 1024 + Int.unsigned i0) * 4096 +
Int.unsigned i1)))
HV); intros [m0 HST].
refine_split; eauto.
- econstructor; eauto.
simpl; lift_trivial. rewrite HST. reflexivity.
- pose proof H1 as Hspec.
functional inversion Hspec; subst; simpl in *.
split; eauto 1; pattern2_refinement_simpl.
+ inv match_related; simpl in *; split; simpl; try eassumption.
+ econstructor; eauto 1; simpl in *.
{
inv H. esplit; eauto. intros.
specialize (H0 _ H). inv H0.
split; intros. specialize (H11 _ H0).
destruct H11 as (v & HLD & HV' & HM).
erewrite Mem.load_store_other; eauto.
- refine_split'; eauto.
eapply Mem.store_valid_access_1; eauto.
- left. red; intros; subst.
specialize (genv_vars_inj _ _ _ _ H3 H10).
intros. contra_inv.
}
{
econstructor; eauto 1; intros. subst pde'.
* destruct (zeq i2 (Int.unsigned i)); subst.
{ (* i2 = int.unsigned i*)
rewrite ZMap.gss.
specialize (HMAT _ H0).
destruct (zeq j (Int.unsigned i0)); subst.
{ (* j = Int.unsigned i0 *)
rewrite ZMap.gss. refine_split'.
- eapply Mem.load_store_same; eauto.
- eapply Mem.store_valid_access_1; eauto.
- simpl. econstructor; eauto.
rewrite Int.unsigned_repr. reflexivity.
exploit ZtoPerm_range; eauto. intros.
rewrite_omega.
}
{ (* j <> Int.unsigned i0 *)
specialize (HMAT _ H10).
destruct HMAT as [v1[HL1[HV1 HM1]]].
refine_split'.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (j + 1) (Int.unsigned i0)).
+ left. omega.
+ right. omega.
- eapply Mem.store_valid_access_1; eauto.
- rewrite ZMap.gso; auto.
}
}
{ (* i2 <> int.unsigned i*)
rewrite ZMap.gso; auto.
specialize (HMAT _ H0 _ H10).
destruct HMAT as [v1[HL1[HV1 HM1]]].
refine_split'; eauto.
- erewrite Mem.load_store_other; eauto.
right; simpl.
destruct (zle (i2 + 1) (Int.unsigned i)).
+ left. rewrite_omega.
+ right. rewrite_omega.
- eapply Mem.store_valid_access_1; eauto.
}
}
Qed.
End FRESH_PRIM.
End WITHMEM.
End Refinement.
|
### 1. Vector Spaces
MML textbookμμλ, Vector spacesλ₯Ό λ€μκ³Ό κ°μ΄ μ μνκ³ μμ΅λλ€.
"A real-valued vector space $V = (v,+, \cdot)$ is a set $v$ with two operations"
$$ + : V x V = V $$
$$ \cdot : R x V = V $$
Space(곡κ°)μ λν μ μλ₯Ό Group(κ΅°)μΌλ‘ λνλμ΅λλ€.
Groupμ **'closure', 'associativity', 'neutral element', 'inverse'** 4κ°μ§ μ±μ§μ λ§μ‘±ν΄μΌ νλ―λ‘
λ²‘ν° κ³΅κ° λ΄μ 벑ν°λ€λ μ΄λ¬ν μ±μ§μ λ§μ‘±ν΄μΌ ν©λλ€!
μ‘°κΈ λ μ½κ² νμ΄λ³΄μλ©΄,
**Vector spaces** : Nμ°¨μ λ΄ μ‘΄μ¬νλ 벑ν°λ€μ μ νκ²°ν©μ°μ°( $+$ : Addition, $\cdot$ : Multiplication by scalar )μΌλ‘ λ§λ€μ΄λΌ μ μλ μ‘°ν©λ€μ GroupμΌλ‘ μ΄ν΄ν μ μμ κ² κ°λ€μ!
μ λ§ κ°λ¨νκ²λ.. nμ°¨μ λ΄ μ‘΄μ¬νλ λͺ¨λ 벑ν°λ€μ΄ 벑ν°κ³΅κ°μ μ΄λ£¬λ€κ³ μ½κ² μκ°ν μ μκ² μ΅λλ€. κ°λ
μ μΌλ‘λ μ νκ²°ν©μ°μ°μΌλ‘ μ΄λ£° μ μλ κ΅°(group)μΌλ‘ ννλ κ²μΌ λΏμ΄μ£ . **λ¬Όλ‘ , zero-vectorλ λ²‘ν° κ³΅κ° λ΄μ ν¬ν¨λκ² μ§μ.** μ νκ²°ν©μ°μ°μΌλ‘ zero-vectorλ₯Ό ν¬ν¨νκ³ μμΌλκΉμ!
### 2. Vector subspaces
Vector subspacesλ λ€μκ³Ό κ°μ΄ μ μν©λλ€.
"$V = (v, +, \cdot)$, $U \subseteq V$ μΌ λ, $U(u, +, \cdot)$ μ **Vector subspaceμ΄λ€.**"
"**Vector subspaces** : λ²‘ν° κ³΅κ° λ΄ λ²‘ν°λ€μ μ νκ²°ν©μ°μ°( Addition, Multiplication by scalar )μΌλ‘ λ§λ€μ΄λΌ μ μλ μμ 벑ν°κ³΅κ°" μ λλ‘ μ΄ν΄ν μ μμ κ² κ°μ΅λλ€.
Vector subspacesλ μ μμμλ λλ μ μλ―,
μλ₯Ό λ€μ΄, 2μ°¨μ μ€μ’ν곡κ°($R^{2}$) λ΄μμ ,
1. μμ (origin, (0,0)).
2. μμ μ μ§λλ μ§μ
3. $R^{2}$ μ νν κ°λ₯ν 2-dimensional vector
μ΄ Vector subspaceκ°λ μ μμ΅λλ€. **μ£Όμν κ²μ μμ μ λͺ¨λ λΆλΆκ³΅κ°μ λΆλΆμ§ν©μ΄ λ©λλ€. μμ μ΄ ν¬ν¨λμ§ μλ λΆλΆκ³΅κ°(subspace)λ μ‘΄μ¬νμ§ μμ΅λλ€.** μμ μ΄ μ‘΄μ¬νμ§ μλλ€λ©΄, κ·Έ λΆλΆκ³΅κ°μ μμ μ λν΄ λ«νμμ§ μμ Groupμ΄ λ μ μκΈ° λλ¬Έμ
λλ€.(Groupμ "Closure" μ±μ§ μλ°, λΆλΆκ³΅κ°μ΄ λκΈ° μν΄μ , $+$, $\cdot$ μ°μ°μ κ²°κ³Όκ° κ·Έ λΆλΆκ³΅κ° λ΄μ ν¬ν¨λμ΄ μμ΄μΌ νλ λ°, zero-vectorκ° μλ€λ©΄, κ·Έ λΆλΆκ³΅κ°μ΄ zero-vectorκ° μ°μ°μ κ²°κ³Όκ° λλ κ²½μ°λ₯Ό νμ§ λͺ»νλ κ²½μ°κ° λ°μνκΈ° λλ¬Έμ
λλ€.)
λ€μκ³Ό κ°μ μ§μ μ λΆλΆκ³΅κ°μ΄ μλλλ€. λ¬Όλ‘ 2μ°¨μ κ³΅κ° λ΄ μ‘΄μ¬νλ 벑ν°μ μ νκ²°ν©μΌλ‘ μ΄λ£¨μ΄μ§ '곡κ°(space)'μ΄μ§λ§, zero-vectorλ₯Ό μ§λμ§ μκΈ°(ν¬ν¨νμ§ μκΈ°) λλ¬Έμ, λͺ¨λ μ νκ²°ν© μ°μ°μ κ²°κ³Όμ λ«ν(closed)μμ§ μμ Vector subspaceλ‘ λ³Ό μ μμ΅λλ€!
```python
%matplotlib inline
plt.xlim(-1.4, 4.4)
plt.ylim(-0.6, 5.2)
plt.plot([-2,-1,0,1,2,3,4],[0,1,2,3,4,5,6])
plt.plot(0, 0, 'ro', ms=10)
plt.title("Not Subspace",fontsize=15)
plt.show()
```
λ λ€λ₯Έ μλ‘, 3μ°¨μ μ€μ’ν곡κ°($R^{3})μ μκ°ν΄λ³΄μλ©΄,
1. μμ (origin, (0,0)
2. μμ μ μ§λλ μ§μ
3. μμ μ κ°λ νλ©΄
4. $R^{3}$μ νν κ°λ₯ν 3-dimensional vector
κ° Vector subspaceκ° λ μ μμ΅λλ€.
### 3. Matrix subspace (column space)
λ²‘ν° μΈ, νλ ¬μλ μμ λΆλΆκ³΅κ°(subspace)κ° μ‘΄μ¬ν©λλ€.
벑ν°μ²λΌ, μ νκ²°ν© μ°μ°(Addition, Multiplication by scalar)μΌλ‘ μ‘°ν©ν΄λΌ μ μλ 곡κ°μ
λλ€. λ¨μ§, νλ ¬μ column Vectorκ° λ§λ€μ΄λΈλ€λ μ°¨μ΄μ μ΄ μμ λΏμ
λλ€.
νλ ¬ λ΄ columnλ€μ μ νκ²°ν©μΌλ‘ λ§λ€ μ μλ subspaceκ° λ°λ‘ "column space"μ΄μ Matrixμ subspaceμ
λλ€!
μλ₯Ό λ€λ©΄, μλμ κ°μ νλ ¬μ κ²½μ°
$$
\begin{align}
\begin{bmatrix}
10 & 11 & 12\\
11 & 12 & 13\\
12 & 13 & 14\\
\end{bmatrix}
\end{align}
$$
$$
\begin{align}
c_{1}
\begin{bmatrix}
10\\
11\\
12\\
\end{bmatrix}
+
c_{2}
\begin{bmatrix}
11\\
12\\
13\\
\end{bmatrix}
+
c_{3}
\begin{bmatrix}
12\\
13\\
14\\
\end{bmatrix}
\end{align}
$$
μ κ°μ΄ κ° μ΄λ²‘ν°λ€μ μ νκ²°ν©μΌλ‘ λνλΌ μ μκ³ , μ΄κ²μ΄ κ³§ column space, subspace of Matrixλ₯Ό μ΄λ£¨κ² λλ€.
|
Require Import ExtLib.Structures.Maps.
Require Import List.
Require Import ExtLib.Core.RelDec.
Require Import ExtLib.Structures.Monads.
Set Implicit Arguments.
Set Strict Implicit.
Section keyed.
Variable K : Type.
Variable K_le : K -> K -> Prop.
Variable RD_K : K -> K -> comparison.
Inductive twothree (T : Type) : Type :=
| Leaf
| Two : twothree T -> K -> T -> twothree T -> twothree T
| Three : twothree T -> K -> T -> twothree T -> K -> T -> twothree T -> twothree T.
Arguments Leaf {T}.
Arguments Two {T} _ _ _ _.
Arguments Three {T} _ _ _ _ _ _ _.
Section modify.
Variable V : Type.
Variable k : K.
Variable upd : V -> option V.
Variable def : option V.
Fixpoint remove_greatest (m : twothree V) {T} (k_oops : unit -> T) (k_ok : K -> V -> twothree V -> T) : T :=
match m with
| Leaf => k_oops tt
| Two l k v r =>
remove_greatest r (fun _ => k_ok k v l) (fun k' v' r' => k_ok k' v' (Two l k v r'))
| Three l k v m k' v' r =>
remove_greatest r (fun _ => k_ok k' v' (Two l k v m)) (fun k'' v'' r'' => k_ok k'' v'' (Three l k v m k' v' r''))
end.
Fixpoint twothree_modify (m : twothree V) {T} (k_ok : twothree V -> T) (k_splice_up : twothree V -> K -> V -> twothree V -> T) {struct m} : T :=
match m with
| Leaf =>
match def with
| Some v => k_splice_up Leaf k v Leaf
| None => k_ok Leaf
end
| Two l k' v' r =>
match RD_K k k' with
| Eq =>
match upd v' with
| Some v' => k_ok (Two l k v' r)
| None => remove_greatest l (fun _ => k_ok r) (fun k v l => k_ok (Two l k v r))
end
| Lt =>
twothree_modify l (fun l => k_ok (Two l k' v' r))
(fun l'' k'' v'' r'' => k_ok (Three l'' k'' v'' r'' k' v' r))
| Gt =>
twothree_modify r (fun r => k_ok (Two l k' v' r))
(fun l'' k'' v'' r'' => k_ok (Three l k' v' l'' k'' v'' r''))
end
| Three l k1 v1 m k2 v2 r =>
match RD_K k k1 with
| Eq =>
match upd v1 with
| Some v' => k_ok (Three l k v' m k2 v2 r)
| None =>
remove_greatest l (fun _ => k_ok (Two m k2 v2 r)) (fun k1 v1 l => k_ok (Three l k1 v2 m k2 v2 r))
end
| Lt =>
twothree_modify l (fun l' => k_ok (Three l' k1 v1 m k2 v2 r))
(fun l' k' v' r' => k_splice_up (Two l' k' v' r') k1 v1 (Two m k2 v2 r))
| Gt =>
match RD_K k k2 with
| Eq =>
match upd v2 with
| Some v2 => k_ok (Three l k1 v1 m k v2 r)
| None =>
remove_greatest m (fun _ => k_ok (Two l k1 v1 r))
(fun k' v' m' => k_ok (Three l k1 v1 m' k' v' r))
end
| Lt =>
twothree_modify m (fun m' => k_ok (Three l k1 v1 m' k2 v2 r))
(fun l' k' v' r' => k_splice_up (Two l k1 v1 l') k' v' (Two r' k2 v2 r))
| Gt =>
twothree_modify r (fun r' => k_ok (Three l k1 v1 m k2 v2 r'))
(fun l' k' v' r' => k_splice_up (Two l k1 v1 m) k2 v2 (Two l' k' v' r'))
end
end
end.
End modify.
Definition twothree_add {V} (k : K) (v : V) (m : twothree V) : twothree V :=
twothree_modify k (fun _ => Some v) (Some v) m (fun m => m) Two.
Definition twothree_remove {V} (k : K) (m : twothree V) : twothree V :=
twothree_modify k (fun _ => None) None m (fun m => m) Two.
Fixpoint twothree_find {V} (k : K) (m : twothree V) : option V :=
match m with
| Leaf => None
| Two l k' v' r =>
match RD_K k k' with
| Eq => Some v'
| Lt => twothree_find k l
| Gt => twothree_find k r
end
| Three l k1 v1 m k2 v2 r =>
match RD_K k k1 with
| Eq => Some v1
| Lt => twothree_find k l
| Gt => match RD_K k k2 with
| Eq => Some v2
| Lt => twothree_find k m
| Gt => twothree_find k r
end
end
end.
Section fold.
Import MonadNotation.
Local Open Scope monad_scope.
Variables V T : Type.
Variable f : K -> V -> T -> T.
Fixpoint twothree_fold (acc : T) (map : twothree V) : T :=
match map with
| Leaf => acc
| Two l k v r =>
let acc := twothree_fold acc l in
let acc := f k v acc in
twothree_fold acc r
| Three l k1 v1 m k2 v2 r =>
let acc := twothree_fold acc l in
let acc := f k1 v1 acc in
let acc := twothree_fold acc m in
let acc := f k2 v2 acc in
twothree_fold acc m
end.
End fold.
Definition twothree_union {V} (m1 m2 : twothree V) : twothree V :=
twothree_fold twothree_add m2 m1.
Global Instance Map_twothree V : Map K V (twothree V) :=
{ empty := Leaf
; add := twothree_add
; remove := twothree_remove
; lookup := twothree_find
; union := twothree_union
}.
Require Import ExtLib.Structures.Reducible.
Global Instance Foldable_twothree V : Foldable (twothree V) (K * V) :=
fun _ f b x => twothree_fold (fun k v => f (k,v)) b x.
End keyed.
(** Performance Test **)
(*
Module TEST.
Definition m := twothree nat nat.
Instance Map_m : Map nat (twothree nat).
apply Map_twothree.
apply Compare_dec.nat_compare.
Defined.
Definition z : m :=
(fix fill n acc : m :=
let acc := add n n acc in
match n with
| 0 => acc
| S n => fill n acc
end) 500 empty.
Time Eval vm_compute in
let z := z in
(fix find_all n : unit :=
let _ := lookup n z in
match n with
| 0 => tt
| S n => find_all n
end) 500.
End TEST.
*) |
lemma Reals_mult [simp]: "a \<in> \<real> \<Longrightarrow> b \<in> \<real> \<Longrightarrow> a * b \<in> \<real>" |
Require Import Coq.Strings.String Coq.omega.Omega Coq.Lists.List Coq.Logic.FunctionalExtensionality Coq.Sets.Ensembles
Fiat.Common.List.ListFacts
Fiat.Computation
Fiat.Computation.Refinements.Iterate_Decide_Comp
Fiat.ADT
Fiat.ADTRefinement Fiat.ADTNotation
Fiat.QueryStructure.Specification.Representation.Schema
Fiat.QueryStructure.Specification.Representation.QueryStructureSchema
Fiat.ADTRefinement.BuildADTRefinements
Fiat.QueryStructure.Specification.Representation.QueryStructure
Fiat.Common.Ensembles.IndexedEnsembles
Fiat.QueryStructure.Specification.Operations.Query
Fiat.QueryStructure.Specification.Operations.Delete
Fiat.QueryStructure.Specification.Operations.Mutate
Fiat.QueryStructure.Implementation.Constraints.ConstraintChecksRefinements
Fiat.Common.IterateBoundedIndex
Fiat.Common.DecideableEnsembles
Fiat.Common.List.PermutationFacts
Fiat.QueryStructure.Implementation.Operations.General.QueryRefinements
Fiat.QueryStructure.Implementation.Operations.General.MutateRefinements
Fiat.Common.Ensembles.EnsembleListEquivalence.
(* Facts about implements delete operations. *)
Section DeleteRefinements.
Hint Resolve crossConstr.
Hint Unfold SatisfiesCrossRelationConstraints
SatisfiesAttributeConstraints
SatisfiesTupleConstraints.
Arguments GetUnConstrRelation : simpl never.
Arguments UpdateUnConstrRelation : simpl never.
Arguments replace_BoundedIndex : simpl never.
Arguments BuildQueryStructureConstraints : simpl never.
Arguments BuildQueryStructureConstraints' : simpl never.
Local Transparent QSDelete.
Definition QSDeletedTuples
{qsSchema}
(qs : UnConstrQueryStructure qsSchema) Ridx
(DeletedTuples : Ensemble RawTuple) :=
(UnIndexedEnsembleListEquivalence
(Intersection _
(GetUnConstrRelation qs Ridx)
(Complement _ (EnsembleDelete (GetUnConstrRelation qs Ridx) DeletedTuples)))).
Lemma QSDeleteSpec_UnConstr_refine_AttributeConstraints :
forall qsSchema qs Ridx
(DeletedTuples : Ensemble RawTuple)
or,
@DropQSConstraints_AbsR qsSchema or qs ->
refine
{b : bool |
(forall tup,
GetUnConstrRelation qs Ridx tup ->
SatisfiesAttributeConstraints Ridx (indexedElement tup)) ->
decides b
(MutationPreservesAttributeConstraints
(EnsembleDelete (GetRelation or Ridx) DeletedTuples)
(SatisfiesAttributeConstraints Ridx))}
(ret true).
Proof.
unfold MutationPreservesAttributeConstraints; intros * AbsR_or_qs v Comp_v.
computes_to_econstructor; intros; computes_to_inv; subst; simpl; intros.
unfold DropQSConstraints_AbsR in *; eapply H; inversion H0; subst;
rewrite GetRelDropConstraints; eauto.
Qed.
Lemma QSDeleteSpec_UnConstr_refine_CrossConstraints' :
forall qsSchema qs Ridx
(DeletedTuples : Ensemble RawTuple)
or,
@DropQSConstraints_AbsR qsSchema or qs ->
refine
{b : bool |
(forall Ridx',
Ridx' <> Ridx ->
forall tup',
GetUnConstrRelation qs Ridx tup' ->
SatisfiesCrossRelationConstraints Ridx Ridx' (indexedElement tup')
(GetUnConstrRelation qs Ridx')) ->
decides b
(forall Ridx',
Ridx' <> Ridx ->
MutationPreservesCrossConstraints
(EnsembleDelete (GetRelation or Ridx) DeletedTuples)
(GetUnConstrRelation qs Ridx')
(SatisfiesCrossRelationConstraints Ridx Ridx'))}
(ret true).
Proof.
unfold MutationPreservesCrossConstraints; intros * AbsR_or_qs v Comp_v.
computes_to_econstructor; intros; computes_to_inv; subst; simpl; intros.
unfold DropQSConstraints_AbsR in *; eapply H; inversion H1; subst; eauto.
rewrite GetRelDropConstraints; eauto.
Qed.
Lemma QSDeleteSpec_UnConstr_refine_opt :
forall qsSchema qs Ridx DeletedTuples or,
@DropQSConstraints_AbsR qsSchema or qs ->
refine
(or' <- (QSDelete or Ridx DeletedTuples);
nr' <- {nr' | DropQSConstraints_AbsR (fst or') nr'};
ret (nr', snd or'))
match (tupleConstraints (GetNRelSchema (qschemaSchemas qsSchema) Ridx)) with
| Some tConstr =>
tupleConstr <- {b | (forall tup tup',
elementIndex tup <> elementIndex tup'
-> GetUnConstrRelation qs Ridx tup
-> GetUnConstrRelation qs Ridx tup'
-> tConstr (indexedElement tup) (indexedElement tup'))
-> decides b (MutationPreservesTupleConstraints
(EnsembleDelete (GetRelation or Ridx) DeletedTuples)
tConstr) };
crossConstr <- (Iterate_Decide_Comp_opt_Pre _
(fun Ridx' =>
if fin_eq_dec Ridx Ridx'
then None
else
match
BuildQueryStructureConstraints qsSchema Ridx'
Ridx
with
| Some CrossConstr =>
Some
((MutationPreservesCrossConstraints
(GetUnConstrRelation qs Ridx')
(EnsembleDelete (GetRelation or Ridx) DeletedTuples)
CrossConstr))
| None => None
end)
(@Iterate_Ensemble_BoundedIndex_filter
_
(fun Ridx' =>
forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx (indexedElement tup') (GetUnConstrRelation qs Ridx))
(fun idx =>
if (fin_eq_dec Ridx idx)
then false else true)
));
match tupleConstr, crossConstr with
| true, true =>
deleted <- Pick (QSDeletedTuples qs Ridx DeletedTuples);
ret (UpdateUnConstrRelation qs Ridx (EnsembleDelete (GetUnConstrRelation qs Ridx) DeletedTuples), deleted)
| _, _ => ret (qs, [])
end
| None =>
crossConstr <- (Iterate_Decide_Comp_opt_Pre _
(fun Ridx' =>
if fin_eq_dec Ridx Ridx'
then None
else
match
BuildQueryStructureConstraints qsSchema Ridx'
Ridx
with
| Some CrossConstr =>
Some
((MutationPreservesCrossConstraints
(GetUnConstrRelation qs Ridx')
(EnsembleDelete (GetRelation or Ridx) DeletedTuples)
CrossConstr))
| None => None
end)
(@Iterate_Ensemble_BoundedIndex_filter
_
(fun Ridx' =>
forall tup',
(GetUnConstrRelation qs Ridx') tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx (indexedElement tup') (GetUnConstrRelation qs Ridx))
(fun idx =>
if (fin_eq_dec Ridx idx)
then false else true)));
match crossConstr with
| true =>
deleted <- Pick (QSDeletedTuples qs Ridx DeletedTuples);
ret (UpdateUnConstrRelation qs Ridx (EnsembleDelete (GetUnConstrRelation qs Ridx) DeletedTuples), deleted)
| _ => ret (qs, [])
end
end.
Proof.
unfold QSDelete.
intros; rewrite QSMutateSpec_UnConstr_refine;
eauto using
QSDeleteSpec_UnConstr_refine_AttributeConstraints,
refine_SatisfiesTupleConstraintsMutate,
refine_SatisfiesCrossConstraintsMutate,
QSDeleteSpec_UnConstr_refine_CrossConstraints'.
simplify with monad laws.
unfold SatisfiesTupleConstraints.
case_eq (tupleConstraints (GetNRelSchema (qschemaSchemas qsSchema) Ridx)); intros;
[eapply refine_under_bind; intros
| simplify with monad laws].
simpl; unfold DropQSConstraints_AbsR, QSDeletedTuples in *; subst.
f_equiv; unfold pointwise_relation; intros;
repeat find_if_inside; try simplify with monad laws; try reflexivity.
rewrite GetRelDropConstraints, get_update_unconstr_eq; f_equiv.
f_equiv; unfold pointwise_relation; intros; eauto.
simpl; unfold DropQSConstraints_AbsR, QSDeletedTuples in *; subst.
repeat find_if_inside; try simplify with monad laws; try reflexivity.
rewrite GetRelDropConstraints, get_update_unconstr_eq; f_equiv.
Qed.
Lemma EnsembleComplementIntersection {A}
: forall E (P : Ensemble A),
DecideableEnsemble P
-> forall (a : @IndexedElement A),
(In _ (Intersection _ E
(Complement _ (EnsembleDelete E P))) a
<-> In _ (Intersection _ E
(fun itup => P (indexedElement itup))) a).
Proof.
unfold EnsembleDelete, Complement, In in *; intuition;
destruct H; constructor; eauto; unfold In in *.
- case_eq (DecideableEnsembles.dec (indexedElement x)); intros.
+ eapply dec_decides_P; eauto.
+ exfalso; apply H0; constructor; unfold In; eauto.
intros H'; apply dec_decides_P in H'; congruence.
- intros H'; destruct H'; unfold In in *; eauto.
Qed.
Lemma DeletedTuplesIntersection {qsSchema}
: forall qs Ridx (P : Ensemble RawTuple),
DecideableEnsemble P
-> refine {x | @QSDeletedTuples qsSchema qs Ridx P x}
{x | UnIndexedEnsembleListEquivalence
(Intersection _ (GetUnConstrRelation qs Ridx)
(fun itup => P (indexedElement itup))) x}.
Proof.
intros qs Ridx P P_dec v Comp_v; computes_to_inv.
computes_to_constructor.
unfold QSDeletedTuples, UnIndexedEnsembleListEquivalence in *; destruct_ex;
intuition; subst.
eexists; intuition.
unfold EnsembleListEquivalence in *; intuition; eauto with typeclass_instances.
+ eapply H; eapply EnsembleComplementIntersection; eauto with typeclass_instances.
+ eapply EnsembleComplementIntersection; eauto with typeclass_instances.
eapply H; eauto.
Qed.
Definition UpdateUnConstrRelationDeleteC {qsSchema} (qs : UnConstrQueryStructure qsSchema) Ridx DeletedTuples :=
ret (UpdateUnConstrRelation qs Ridx (EnsembleDelete (GetUnConstrRelation qs Ridx) DeletedTuples)).
Lemma QSDeleteSpec_refine_subgoals ResultT :
forall qsSchema (qs : QueryStructure qsSchema) qs' Ridx
default success
refined_schConstr refined_qsConstr
(DeletedTuples : Ensemble RawTuple)
(k : _ -> Comp ResultT),
DropQSConstraints_AbsR qs qs'
-> refine match tupleConstraints (GetNRelSchema (qschemaSchemas qsSchema) Ridx) with
| Some Constr =>
{b | (forall tup tup',
elementIndex tup <> elementIndex tup'
-> GetUnConstrRelation qs' Ridx tup
-> GetUnConstrRelation qs' Ridx tup'
-> Constr (indexedElement tup)
(indexedElement tup'))
-> decides b
(MutationPreservesTupleConstraints
(EnsembleDelete (GetUnConstrRelation qs' Ridx) DeletedTuples)
Constr) }
| None => ret true
end
refined_schConstr
-> refine (Iterate_Decide_Comp_opt_Pre _
(fun Ridx' =>
if fin_eq_dec Ridx Ridx'
then None
else
match BuildQueryStructureConstraints qsSchema Ridx' Ridx with
| Some CrossConstr =>
Some
(MutationPreservesCrossConstraints (GetUnConstrRelation qs' Ridx')
(EnsembleDelete (GetUnConstrRelation qs' Ridx) DeletedTuples)
CrossConstr)
| None => None
end)
(@Iterate_Ensemble_BoundedIndex_filter
_
(fun Ridx' =>
forall tup',
GetUnConstrRelation qs' Ridx' tup'
-> SatisfiesCrossRelationConstraints
Ridx' Ridx (indexedElement tup') (GetUnConstrRelation qs' Ridx))
(fun idx =>
if (fin_eq_dec Ridx idx)
then false else true)
)) refined_qsConstr
-> (forall qs'' qs''' mutated,
DropQSConstraints_AbsR qs'' qs'''
-> (forall Ridx',
Ridx <> Ridx' ->
GetRelation qs Ridx' =
GetRelation qs'' Ridx')
-> (forall t,
GetRelation qs'' Ridx t <-> EnsembleDelete (GetRelation qs Ridx) DeletedTuples t)
-> QSDeletedTuples qs' Ridx DeletedTuples mutated
-> refine (k (qs'', mutated))
(success qs''' mutated))
-> refine (k (qs, [ ])) default
-> refine
(qs' <- QSDelete qs Ridx DeletedTuples; k qs')
( schConstr <- refined_schConstr;
qsConstr <- refined_qsConstr;
match schConstr, qsConstr with
| true, true =>
mutated <- Pick (QSDeletedTuples qs' Ridx DeletedTuples);
qs'' <- UpdateUnConstrRelationDeleteC qs' Ridx DeletedTuples;
success qs'' mutated
| _, _ => default
end).
Proof.
intros.
unfold QSDelete.
rewrite QSMutateSpec_refine_subgoals' with (refined_schConstr_self := ret true)
(refined_qsConstr' := ret true);
try first [eassumption | reflexivity ].
simplify with monad laws.
repeat (f_equiv; unfold pointwise_relation; intros).
rewrite <- H0, <- (GetRelDropConstraints qs Ridx), <- H.
rewrite refine_SatisfiesTupleConstraintsMutate; eauto.
destruct (tupleConstraints (GetNRelSchema (qschemaSchemas qsSchema) Ridx));
f_equiv.
rewrite refine_SatisfiesCrossConstraintsMutate; eauto.
rewrite <- (GetRelDropConstraints qs Ridx).
rewrite <- H1, H; f_equiv.
repeat find_if_inside.
unfold QSDeletedTuples.
f_equiv.
rewrite <- (GetRelDropConstraints qs Ridx).
unfold GetUnConstrRelation, UpdateUnConstrRelation.
rewrite ilist2.ith_replace2_Index_eq, <- H; reflexivity.
unfold UpdateUnConstrRelationDeleteC, UpdateUnConstrRelationMutateC;
rewrite <- H, GetRelDropConstraints; reflexivity.
reflexivity.
reflexivity.
eauto using QSDeleteSpec_UnConstr_refine_AttributeConstraints.
eauto using QSDeleteSpec_UnConstr_refine_CrossConstraints'.
intros; eapply H2; eauto.
unfold QSDeletedTuples.
unfold GetUnConstrRelation, UpdateUnConstrRelation in H7.
rewrite ilist2.ith_replace2_Index_eq in H7.
rewrite <- GetRelDropConstraints, H in H7; eauto.
Qed.
Local Transparent Query_For.
Lemma DeletedTuplesFor {qsSchema}
: forall qs Ridx P,
DecideableEnsemble P
-> refine {x | @QSDeletedTuples qsSchema qs Ridx P x}
(For (UnConstrQuery_In qs Ridx
(fun tup => Where (P tup) Return tup))).
Proof.
intros qs Ridx P P_dec v Comp_v; rewrite DeletedTuplesIntersection by auto.
computes_to_constructor.
unfold UnIndexedEnsembleListEquivalence.
unfold Query_For in *.
computes_to_inv.
destruct Comp_v as [l [Perm_l_v Comp_v] ].
unfold UnConstrQuery_In, QueryResultComp in *; computes_to_inv.
remember (GetUnConstrRelation qs Ridx); clear Heqi.
revert P_dec i v v0 Perm_l_v Comp_v Comp_v'; clear; induction l; simpl; intros.
- apply Return_inv in Comp_v; subst.
eexists nil; simpl; split; eauto.
rewrite Permutation_nil by eauto; reflexivity.
+ unfold EnsembleListEquivalence in *; intuition.
* destruct H; intuition.
apply Pick_inv in Perm_l_v; inversion Perm_l_v.
unfold In in *; intuition; subst.
apply H1 in H.
eapply (@FlattenCompList.flatten_CompList_nil _ P x0); eauto.
destruct x0; simpl in *; try discriminate; computes_to_econstructor.
* constructor.
- apply Pick_inv in Perm_l_v.
unfold UnConstrRelation in i.
destruct Perm_l_v as [ [ | [a' x'] ] [x_eq [equiv_u_x' NoDup_x'] ] ].
destruct l; simpl in *; try discriminate.
unfold In in Comp_v; pose (Bind_inv Comp_v); destruct_ex; intuition; subst; computes_to_inv; subst.
simpl in x_eq; injections.
case_eq (@DecideableEnsembles.dec _ P P_dec a); intros.
+ apply Pick_inv in H0; intuition.
apply dec_decides_P in H2. apply H3 in H2.
apply Return_inv in H2; simpl in *; subst; simpl in *.
pose proof (PermutationConsSplit _ _ _ Comp_v'); destruct_ex; subst.
unfold UnIndexedEnsembleListEquivalence in *.
destruct (H (fun x => i x /\ x <> {|indexedElement := a; elementIndex := a' |}) (app x x0) v1); intuition eauto.
apply PickComputes.
computes_to_inv; injections.
eexists _; intuition eauto.
unfold In in *; intuition.
rewrite equiv_u_x' in H2; destruct H2; subst; eauto; congruence.
unfold In; intuition.
apply equiv_u_x'; simpl; intuition.
inversion NoDup_x'; subst; eauto.
apply H7; apply in_map_iff; eexists; split; eauto; simpl; eauto.
inversion NoDup_x'; subst; eauto.
eapply Permutation_cons_inv; rewrite Permutation_middle; eassumption.
* symmetry in H2; pose proof (app_map_inv _ _ _ _ H2); destruct_ex;
intuition; subst.
eexists (app x2 ({|indexedElement := a; elementIndex := a' |} :: x3));
simpl; rewrite map_app.
{ simpl; intuition; computes_to_inv; injections.
- destruct H5; unfold In in *; apply equiv_u_x' in H5; simpl in *; intuition.
apply in_or_app; simpl; eauto.
assert (i x) as u_x by (apply equiv_u_x'; eauto).
assert (List.In x (x2 ++ x3)) as In_x by
(apply H0; constructor; unfold In; intuition; subst;
inversion NoDup_x'; subst; eapply H10; apply in_map_iff; eexists;
split; cbv beta; simpl; eauto; reflexivity).
apply in_or_app; simpl; apply in_app_or in In_x; intuition.
- unfold In.
assert (List.In x (x2 ++ x3) \/ x = {|indexedElement := a; elementIndex := a' |})
as In_x0
by (apply in_app_or in H5; simpl in H5; intuition).
intuition.
+ apply H0 in H7; destruct H7; unfold In in *; intuition.
constructor; eauto.
+ subst; constructor; eauto.
apply equiv_u_x'; simpl; eauto.
case_eq (@DecideableEnsembles.dec _ P P_dec a); intros.
apply dec_decides_P; eauto.
assert (~ P a) as H''
by (unfold not; intros H'; apply dec_decides_P in H'; congruence);
apply H4 in H''; discriminate.
- rewrite map_app; apply NoDup_app_swap; simpl; constructor; eauto.
inversion NoDup_x'; subst; unfold not; intros; apply H8.
rewrite <- map_app in H5; apply in_map_iff in H5; destruct_ex; intuition.
assert (List.In x (x2 ++ x3)) as In_a by
(apply in_or_app; apply in_app_or in H10; intuition).
apply H0 in In_a; destruct In_a; unfold In in *; intuition.
apply equiv_u_x' in H12; simpl in *; intuition.
destruct x; simpl in *; subst.
apply in_map_iff; eexists; split; eauto; simpl; eauto.
apply NoDup_app_swap; rewrite <- map_app; eauto.
}
+ unfold Query_Where, Query_Return in H0;
computes_to_inv; intuition.
assert (~ P a) as H''
by (unfold not; intros H'; apply dec_decides_P in H'; congruence
).
apply H4 in H''; subst; simpl in *; subst.
destruct (H (fun x => i x /\ x <> {|indexedElement := a; elementIndex := a' |}) v v1); intuition eauto.
* computes_to_econstructor.
eexists; intuition; eauto.
unfold In in *; intuition.
apply equiv_u_x' in H5; destruct H5; subst; eauto.
congruence.
unfold In; intuition.
unfold In; intuition.
subst.
apply equiv_u_x'; simpl; intuition.
inversion NoDup_x'; subst; eauto.
apply H8; apply in_map_iff; eexists; split; eauto; simpl; eauto.
inversion NoDup_x'; subst; eauto.
* unfold In.
eexists; split; eauto.
unfold UnIndexedEnsembleListEquivalence in *; intuition.
destruct H6; intuition.
eapply H0; constructor; unfold In in *; subst; intuition.
subst; apply_in_hyp dec_decides_P; simpl in *; congruence.
constructor;
apply H0 in H6; destruct H6; unfold In in *; intuition.
Qed.
End DeleteRefinements.
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
chapter "Machine Operations"
theory MachineOps
imports
"Word_Lib.WordSetup"
"Lib.NonDetMonad"
"../MachineMonad"
begin
section "Wrapping and Lifting Machine Operations"
text \<open>
Most of the machine operations below work on the underspecified
part of the machine state @{typ machine_state_rest} and cannot fail.
We could express the latter by type (leaving out the failure flag),
but if we later wanted to implement them,
we'd have to set up a new hoare-logic
framework for that type. So instead, we provide a wrapper for these
operations that explicitly ignores the fail flag and sets it to
False. Similarly, these operations never return an empty set of
follow-on states, which would require the operation to fail.
So we explicitly make this (non-existing) case a null operation.
All this is done only to avoid a large number of axioms (2 for each operation).
\<close>
context Arch begin global_naming X64
section "The Operations"
consts'
memory_regions :: "(paddr \<times> paddr) list" (* avail_p_regs *)
device_regions :: "(paddr \<times> paddr) list" (* dev_p_regs *)
definition
getMemoryRegions :: "(paddr * paddr) list machine_monad"
where "getMemoryRegions \<equiv> return memory_regions"
consts'
getDeviceRegions_impl :: "unit machine_rest_monad"
getDeviceRegions_val :: "machine_state \<Rightarrow> (paddr * paddr) list"
definition
getDeviceRegions :: "(paddr * paddr) list machine_monad"
where
"getDeviceRegions \<equiv> return device_regions"
consts'
getKernelDevices_impl :: "unit machine_rest_monad"
getKernelDevices_val :: "machine_state \<Rightarrow> (paddr * machine_word) list"
definition
getKernelDevices :: "(paddr * machine_word) list machine_monad"
where
"getKernelDevices \<equiv> do
machine_op_lift getKernelDevices_impl;
gets getKernelDevices_val
od"
definition
loadWord :: "machine_word \<Rightarrow> machine_word machine_monad"
where "loadWord p \<equiv> do m \<leftarrow> gets underlying_memory;
assert (p && mask 3 = 0);
return (word_rcat (map (\<lambda>i. m (p + (7 - of_int i))) [0 .. 7]))
od"
definition
storeWord :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where "storeWord p w \<equiv> do
assert (p && mask 3 = 0);
modify (underlying_memory_update (
fold (\<lambda>i m. m((p + (of_int i)) := word_rsplit w ! (7 - nat i))) [0 .. 7]))
od"
lemma upto0_7_def:
"[0..7] = [0,1,2,3,4,5,6,7]" by eval
lemma loadWord_storeWord_is_return:
"p && mask 3 = 0 \<Longrightarrow> (do w \<leftarrow> loadWord p; storeWord p w od) = return ()"
apply (rule ext)
by (simp add: loadWord_def storeWord_def bind_def assert_def return_def
modify_def gets_def get_def eval_nat_numeral put_def upto0_7_def
word_rsplit_rcat_size word_size)
text \<open>This instruction is required in the simulator, only.\<close>
definition
storeWordVM :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where "storeWordVM w p \<equiv> return ()"
consts'
configureTimer_impl :: "unit machine_rest_monad"
configureTimer_val :: "machine_state \<Rightarrow> irq"
definition
configureTimer :: "irq machine_monad"
where
"configureTimer \<equiv> do
machine_op_lift configureTimer_impl;
gets configureTimer_val
od"
consts' (* XXX: replaces configureTimer in new boot code
TODO: remove configureTimer when haskell updated *)
initTimer_impl :: "unit machine_rest_monad"
definition
initTimer :: "unit machine_monad"
where "initTimer \<equiv> machine_op_lift initTimer_impl"
consts'
resetTimer_impl :: "unit machine_rest_monad"
definition
resetTimer :: "unit machine_monad"
where "resetTimer \<equiv> machine_op_lift resetTimer_impl"
consts'
invalidateTLB_impl :: "unit machine_rest_monad"
definition
invalidateTLB :: "unit machine_monad"
where "invalidateTLB \<equiv> machine_op_lift invalidateTLB_impl"
lemmas cache_machine_op_defs = invalidateTLB_def
definition
debugPrint :: "unit list \<Rightarrow> unit machine_monad"
where
debugPrint_def[simp]:
"debugPrint \<equiv> \<lambda>message. return ()"
\<comment> \<open>Interrupt controller operations\<close>
text \<open>
Interrupts that cannot occur while the kernel is running (e.g. at preemption points),
but that can occur from user mode. Empty on plain x86-64.
\<close>
definition
"non_kernel_IRQs = {}"
text \<open>
@{term getActiveIRQ} is now derministic.
It 'updates' the irq state to the reflect the passage of
time since last the irq was gotten, then it gets the active
IRQ (if there is one).
\<close>
definition
getActiveIRQ :: "bool \<Rightarrow> (irq option) machine_monad"
where
"getActiveIRQ in_kernel \<equiv> do
is_masked \<leftarrow> gets $ irq_masks;
modify (\<lambda>s. s \<lparr> irq_state := irq_state s + 1 \<rparr>);
active_irq \<leftarrow> gets $ irq_oracle \<circ> irq_state;
if is_masked active_irq \<or> active_irq = 0xFF \<or> (in_kernel \<and> active_irq \<in> non_kernel_IRQs)
then return None
else return ((Some active_irq) :: irq option)
od"
definition
maskInterrupt :: "bool \<Rightarrow> irq \<Rightarrow> unit machine_monad"
where
"maskInterrupt m irq \<equiv>
modify (\<lambda>s. s \<lparr> irq_masks := (irq_masks s) (irq := m) \<rparr>)"
text \<open>Does nothing on imx31\<close>
definition
ackInterrupt :: "irq \<Rightarrow> unit machine_monad"
where
"ackInterrupt \<equiv> \<lambda>irq. return ()"
text \<open>Does nothing on imx31\<close>
definition
setInterruptMode :: "irq \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> unit machine_monad"
where
"setInterruptMode \<equiv> \<lambda>irq levelTrigger polarityLow. return ()"
section "Memory Clearance"
text \<open>Clear memory contents to recycle it as user memory\<close>
definition
clearMemory :: "machine_word \<Rightarrow> nat \<Rightarrow> unit machine_monad"
where
"clearMemory ptr bytelength \<equiv> mapM_x (\<lambda>p. storeWord p 0) [ptr, ptr + word_size .e. ptr + (of_nat bytelength) - 1]"
definition
clearMemoryVM :: "machine_word \<Rightarrow> nat \<Rightarrow> unit machine_monad"
where
"clearMemoryVM ptr bits \<equiv> return ()"
text \<open>
Initialize memory to be used as user memory.
Note that zeroing out the memory is redundant in the specifications.
In any case, we cannot abstract from the call to cleanCacheRange,
which appears in the implementation.
\<close>
abbreviation (input) "initMemory == clearMemory"
text \<open>
Free memory that had been initialized as user memory.
While freeing memory is a no-(in) the implementation,
we zero out the underlying memory in the specifications to avoid garbage.
If we know that there is no garbage,
we can compute from the implementation state
what the exact memory content in the specifications is.
\<close>
definition
freeMemory :: "machine_word \<Rightarrow> nat \<Rightarrow> unit machine_monad"
where
"freeMemory ptr bits \<equiv>
mapM_x (\<lambda>p. storeWord p 0) [ptr, ptr + word_size .e. ptr + 2 ^ bits - 1]"
section "User Monad"
text \<open> There are 576 bytes of FPU state. Since there are no operations on this state apart from bulk
save/restore, we abstract from names and just say how many bytes there are. \<close>
type_synonym fpu_bytes = 576
type_synonym fpu_state = "fpu_bytes \<Rightarrow> 8 word"
type_synonym user_regs = "register \<Rightarrow> machine_word"
datatype user_context = UserContext (fpu_state : fpu_state) (user_regs : user_regs)
type_synonym 'a user_monad = "(user_context, 'a) nondet_monad"
definition
getRegister :: "register \<Rightarrow> machine_word user_monad"
where
"getRegister r \<equiv> gets (\<lambda>s. user_regs s r)"
definition
"modify_registers f uc \<equiv> UserContext (fpu_state uc) (f (user_regs uc))"
definition
setRegister :: "register \<Rightarrow> machine_word \<Rightarrow> unit user_monad"
where
"setRegister r v \<equiv> modify (\<lambda>s. UserContext (fpu_state s) ((user_regs s) (r := v)))"
definition
"getRestartPC \<equiv> getRegister FaultIP"
definition
"setNextPC \<equiv> setRegister NextIP"
definition
getFPUState :: "fpu_state user_monad"
where
"getFPUState \<equiv> gets fpu_state"
definition
setFPUState :: "fpu_state \<Rightarrow> unit user_monad"
where
"setFPUState fc \<equiv> modify (\<lambda>s. UserContext fc (user_regs s))"
(* The FPU state is opaque; the null state is a constant snapshot taken after initialisation *)
consts'
FPUNullState :: fpu_state
consts'
nativeThreadUsingFPU_impl :: "machine_word \<Rightarrow> unit machine_rest_monad"
nativeThreadUsingFPU_val :: "machine_state \<Rightarrow> bool"
definition
nativeThreadUsingFPU :: "machine_word \<Rightarrow> bool machine_monad"
where
"nativeThreadUsingFPU thread_ptr \<equiv> do
machine_op_lift (nativeThreadUsingFPU_impl thread_ptr);
gets nativeThreadUsingFPU_val
od"
consts'
switchFpuOwner_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_rest_monad"
definition
switchFpuOwner :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"switchFpuOwner new_owner cpu \<equiv> machine_op_lift (switchFpuOwner_impl new_owner cpu)"
consts'
initL2Cache_impl :: "unit machine_rest_monad"
definition
initL2Cache :: "unit machine_monad"
where "initL2Cache \<equiv> machine_op_lift initL2Cache_impl"
consts'
writeCR3_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_rest_monad"
definition writeCR3 :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"writeCR3 vspace pcid \<equiv> machine_op_lift (writeCR3_impl vspace pcid)"
consts'
mfence_impl :: "unit machine_rest_monad"
definition
mfence :: "unit machine_monad"
where
"mfence \<equiv> machine_op_lift mfence_impl"
consts'
invalidateTLBEntry_impl :: "word64 \<Rightarrow> unit machine_rest_monad"
definition
invalidateTLBEntry :: "word64 \<Rightarrow> unit machine_monad"
where
"invalidateTLBEntry vptr \<equiv> machine_op_lift (invalidateTLBEntry_impl vptr)"
consts'
invalidateTranslationSingleASID_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_rest_monad"
definition
invalidateTranslationSingleASID :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"invalidateTranslationSingleASID vptr asid \<equiv> machine_op_lift (invalidateTranslationSingleASID_impl vptr asid)"
consts'
invalidateASID_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_rest_monad"
definition
invalidateASID :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"invalidateASID vspace asid \<equiv> machine_op_lift (invalidateASID_impl vspace asid)"
consts'
invalidateLocalPageStructureCacheASID_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_rest_monad"
definition
invalidateLocalPageStructureCacheASID :: "machine_word \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"invalidateLocalPageStructureCacheASID vspace asid \<equiv>
machine_op_lift (invalidateLocalPageStructureCacheASID_impl vspace asid)"
(* FIXME x64: VT-d
definition
firstValidIODomain :: "word16"
where
"firstValidIODomain \<equiv> undefined"
definition
numIODomainIDBits :: "nat"
where
"numIODomainIDBits \<equiv> undefined"
*)
definition
hwASIDInvalidate :: "word64 \<Rightarrow> machine_word \<Rightarrow> unit machine_monad"
where
"hwASIDInvalidate \<equiv> invalidateASID"
consts'
getFaultAddress_val :: "machine_state \<Rightarrow> machine_word"
definition
getFaultAddress :: "word64 machine_monad"
where
"getFaultAddress \<equiv> gets getFaultAddress_val"
consts'
irqIntOffset_val :: "machine_state \<Rightarrow> machine_word"
definition
irqIntOffset :: "machine_word"
where
"irqIntOffset \<equiv> 0x20"
definition
maxPCIBus :: "machine_word"
where
"maxPCIBus \<equiv> 0xFF"
definition
maxPCIDev :: "machine_word"
where
"maxPCIDev \<equiv> 31"
definition
maxPCIFunc :: "machine_word"
where
"maxPCIFunc \<equiv> 7"
definition
ioapicIRQLines :: "machine_word"
where
"ioapicIRQLines \<equiv> 24"
consts'
ioapicMapPinToVector_impl :: "machine_word \<Rightarrow> machine_word \<Rightarrow> machine_word \<Rightarrow> machine_word \<Rightarrow>
machine_word \<Rightarrow> unit machine_rest_monad"
definition
ioapicMapPinToVector :: "machine_word \<Rightarrow> machine_word \<Rightarrow> machine_word \<Rightarrow> machine_word \<Rightarrow>
machine_word \<Rightarrow> unit machine_monad"
where
"ioapicMapPinToVector ioapic pin level polarity vector \<equiv>
machine_op_lift (ioapicMapPinToVector_impl ioapic pin level polarity vector)"
definition IRQ :: "word8 \<Rightarrow> irq"
where
"IRQ \<equiv> id"
consts'
in8_impl :: "word16 \<Rightarrow> unit machine_rest_monad"
in8_val :: "machine_state \<Rightarrow> machine_word"
definition
in8 :: "word16 \<Rightarrow> machine_word machine_monad"
where
"in8 port \<equiv> do machine_op_lift $ in8_impl port; gets in8_val od"
consts'
in16_impl :: "word16 \<Rightarrow> unit machine_rest_monad"
in16_val :: "machine_state \<Rightarrow> machine_word"
definition
in16 :: "word16 \<Rightarrow> machine_word machine_monad"
where
"in16 port \<equiv> do machine_op_lift $ in16_impl port; gets in16_val od"
consts'
in32_impl :: "word16 \<Rightarrow> unit machine_rest_monad"
in32_val :: "machine_state \<Rightarrow> machine_word"
definition
in32 :: "word16 \<Rightarrow> machine_word machine_monad"
where
"in32 port \<equiv> do machine_op_lift $ in32_impl port; gets in32_val od"
consts'
out8_impl :: "word16 \<Rightarrow> word8 \<Rightarrow> unit machine_rest_monad"
definition
out8 :: "word16 \<Rightarrow> word8 \<Rightarrow> unit machine_monad"
where
"out8 port dat \<equiv> machine_op_lift $ out8_impl port dat"
consts'
out16_impl :: "word16 \<Rightarrow> word16 \<Rightarrow> unit machine_rest_monad"
definition
out16 :: "word16 \<Rightarrow> word16 \<Rightarrow> unit machine_monad"
where
"out16 port dat \<equiv> machine_op_lift $ out16_impl port dat"
consts'
out32_impl :: "word16 \<Rightarrow> word32 \<Rightarrow> unit machine_rest_monad"
definition
out32 :: "word16 \<Rightarrow> word32 \<Rightarrow> unit machine_monad"
where
"out32 port dat \<equiv> machine_op_lift $ out32_impl port dat"
end
end
|
(* Title: HOL/Auth/Recur.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1996 University of Cambridge
*)
section\<open>The Otway-Bull Recursive Authentication Protocol\<close>
theory Recur imports Public begin
text\<open>End marker for message bundles\<close>
abbreviation
END :: "msg" where
"END == Number 0"
(*Two session keys are distributed to each agent except for the initiator,
who receives one.
Perhaps the two session keys could be bundled into a single message.
*)
inductive_set (*Server's response to the nested message*)
respond :: "event list \<Rightarrow> (msg*msg*key)set"
for evs :: "event list"
where
One: "Key KAB \<notin> used evs
==> (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, END\<rbrace>,
\<lbrace>Crypt (shrK A) \<lbrace>Key KAB, Agent B, Nonce NA\<rbrace>, END\<rbrace>,
KAB) \<in> respond evs"
(*The most recent session key is passed up to the caller*)
| Cons: "[| (PA, RA, KAB) \<in> respond evs;
Key KBC \<notin> used evs; Key KBC \<notin> parts {RA};
PA = Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, P\<rbrace> |]
==> (Hash[Key(shrK B)] \<lbrace>Agent B, Agent C, Nonce NB, PA\<rbrace>,
\<lbrace>Crypt (shrK B) \<lbrace>Key KBC, Agent C, Nonce NB\<rbrace>,
Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace>,
KBC)
\<in> respond evs"
(*Induction over "respond" can be difficult due to the complexity of the
subgoals. Set "responses" captures the general form of certificates.
*)
inductive_set
responses :: "event list => msg set"
for evs :: "event list"
where
(*Server terminates lists*)
Nil: "END \<in> responses evs"
| Cons: "[| RA \<in> responses evs; Key KAB \<notin> used evs |]
==> \<lbrace>Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace> \<in> responses evs"
inductive_set recur :: "event list set"
where
(*Initial trace is empty*)
Nil: "[] \<in> recur"
(*The spy MAY say anything he CAN say. Common to
all similar protocols.*)
| Fake: "[| evsf \<in> recur; X \<in> synth (analz (knows Spy evsf)) |]
==> Says Spy B X # evsf \<in> recur"
(*Alice initiates a protocol run.
END is a placeholder to terminate the nesting.*)
| RA1: "[| evs1 \<in> recur; Nonce NA \<notin> used evs1 |]
==> Says A B (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, END\<rbrace>)
# evs1 \<in> recur"
(*Bob's response to Alice's message. C might be the Server.
We omit PA = \<lbrace>XA, Agent A, Agent B, Nonce NA, P\<rbrace> because
it complicates proofs, so B may respond to any message at all!*)
| RA2: "[| evs2 \<in> recur; Nonce NB \<notin> used evs2;
Says A' B PA \<in> set evs2 |]
==> Says B C (Hash[Key(shrK B)] \<lbrace>Agent B, Agent C, Nonce NB, PA\<rbrace>)
# evs2 \<in> recur"
(*The Server receives Bob's message and prepares a response.*)
| RA3: "[| evs3 \<in> recur; Says B' Server PB \<in> set evs3;
(PB,RB,K) \<in> respond evs3 |]
==> Says Server B RB # evs3 \<in> recur"
(*Bob receives the returned message and compares the Nonces with
those in the message he previously sent the Server.*)
| RA4: "[| evs4 \<in> recur;
Says B C \<lbrace>XH, Agent B, Agent C, Nonce NB,
XA, Agent A, Agent B, Nonce NA, P\<rbrace> \<in> set evs4;
Says C' B \<lbrace>Crypt (shrK B) \<lbrace>Key KBC, Agent C, Nonce NB\<rbrace>,
Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace> \<in> set evs4 |]
==> Says B A RA # evs4 \<in> recur"
(*No "oops" message can easily be expressed. Each session key is
associated--in two separate messages--with two nonces. This is
one try, but it isn't that useful. Re domino attack, note that
Recur.thy proves that each session key is secure provided the two
peers are, even if there are compromised agents elsewhere in
the chain. Oops cases proved using parts_cut, Key_in_keysFor_parts,
etc.
Oops: "[| evso \<in> recur; Says Server B RB \<in> set evso;
RB \<in> responses evs'; Key K \<in> parts {RB} |]
==> Notes Spy \<lbrace>Key K, RB\<rbrace> # evso \<in> recur"
*)
declare Says_imp_knows_Spy [THEN analz.Inj, dest]
declare parts.Body [dest]
declare analz_into_parts [dest]
declare Fake_parts_insert_in_Un [dest]
(** Possibility properties: traces that reach the end
ONE theorem would be more elegant and faster!
By induction on a list of agents (no repetitions)
**)
text\<open>Simplest case: Alice goes directly to the server\<close>
lemma "Key K \<notin> used []
==> \<exists>NA. \<exists>evs \<in> recur.
Says Server A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Server, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2] recur.Nil [THEN recur.RA1,
THEN recur.RA3 [OF _ _ respond.One]])
apply (possibility, simp add: used_Cons)
done
text\<open>Case two: Alice, Bob and the server\<close>
lemma "[| Key K \<notin> used []; Key K' \<notin> used []; K \<noteq> K';
Nonce NA \<notin> used []; Nonce NB \<notin> used []; NA < NB |]
==> \<exists>NA. \<exists>evs \<in> recur.
Says B A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent B, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2]
recur.Nil
[THEN recur.RA1 [of _ NA],
THEN recur.RA2 [of _ NB],
THEN recur.RA3 [OF _ _ respond.One
[THEN respond.Cons [of _ _ K _ K']]],
THEN recur.RA4], possibility)
apply (auto simp add: used_Cons)
done
(*Case three: Alice, Bob, Charlie and the server Rather slow (5 seconds)*)
lemma "[| Key K \<notin> used []; Key K' \<notin> used [];
Key K'' \<notin> used []; K \<noteq> K'; K' \<noteq> K''; K \<noteq> K'';
Nonce NA \<notin> used []; Nonce NB \<notin> used []; Nonce NC \<notin> used [];
NA < NB; NB < NC |]
==> \<exists>K. \<exists>NA. \<exists>evs \<in> recur.
Says B A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent B, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2]
recur.Nil [THEN recur.RA1,
THEN recur.RA2, THEN recur.RA2,
THEN recur.RA3
[OF _ _ respond.One
[THEN respond.Cons, THEN respond.Cons]],
THEN recur.RA4, THEN recur.RA4])
apply basic_possibility
apply (tactic "DEPTH_SOLVE (swap_res_tac \<^context> [refl, conjI, disjCI] 1)")
done
lemma respond_imp_not_used: "(PA,RB,KAB) \<in> respond evs ==> Key KAB \<notin> used evs"
by (erule respond.induct, simp_all)
lemma Key_in_parts_respond [rule_format]:
"[| Key K \<in> parts {RB}; (PB,RB,K') \<in> respond evs |] ==> Key K \<notin> used evs"
apply (erule rev_mp, erule respond.induct)
apply (auto dest: Key_not_used respond_imp_not_used)
done
text\<open>Simple inductive reasoning about responses\<close>
lemma respond_imp_responses:
"(PA,RB,KAB) \<in> respond evs ==> RB \<in> responses evs"
apply (erule respond.induct)
apply (blast intro!: respond_imp_not_used responses.intros)+
done
(** For reasoning about the encrypted portion of messages **)
lemmas RA2_analz_spies = Says_imp_spies [THEN analz.Inj]
lemma RA4_analz_spies:
"Says C' B \<lbrace>Crypt K X, X', RA\<rbrace> \<in> set evs ==> RA \<in> analz (spies evs)"
by blast
(*RA2_analz... and RA4_analz... let us treat those cases using the same
argument as for the Fake case. This is possible for most, but not all,
proofs: Fake does not invent new nonces (as in RA2), and of course Fake
messages originate from the Spy. *)
lemmas RA2_parts_spies = RA2_analz_spies [THEN analz_into_parts]
lemmas RA4_parts_spies = RA4_analz_spies [THEN analz_into_parts]
(** Theorems of the form X \<notin> parts (spies evs) imply that NOBODY
sends messages containing X! **)
(** Spy never sees another agent's shared key! (unless it's bad at start) **)
lemma Spy_see_shrK [simp]:
"evs \<in> recur ==> (Key (shrK A) \<in> parts (spies evs)) = (A \<in> bad)"
apply (erule recur.induct, auto)
txt\<open>RA3. It's ugly to call auto twice, but it seems necessary.\<close>
apply (auto dest: Key_in_parts_respond simp add: parts_insert_spies)
done
lemma Spy_analz_shrK [simp]:
"evs \<in> recur ==> (Key (shrK A) \<in> analz (spies evs)) = (A \<in> bad)"
by auto
lemma Spy_see_shrK_D [dest!]:
"[|Key (shrK A) \<in> parts (knows Spy evs); evs \<in> recur|] ==> A \<in> bad"
by (blast dest: Spy_see_shrK)
(*** Proofs involving analz ***)
(** Session keys are not used to encrypt other session keys **)
(*Version for "responses" relation. Handles case RA3 in the theorem below.
Note that it holds for *any* set H (not just "spies evs")
satisfying the inductive hypothesis.*)
lemma resp_analz_image_freshK_lemma:
"[| RB \<in> responses evs;
\<forall>K KK. KK \<subseteq> - (range shrK) \<longrightarrow>
(Key K \<in> analz (Key`KK \<union> H)) =
(K \<in> KK | Key K \<in> analz H) |]
==> \<forall>K KK. KK \<subseteq> - (range shrK) \<longrightarrow>
(Key K \<in> analz (insert RB (Key`KK \<union> H))) =
(K \<in> KK | Key K \<in> analz (insert RB H))"
apply (erule responses.induct)
apply (simp_all del: image_insert
add: analz_image_freshK_simps, auto)
done
text\<open>Version for the protocol. Proof is easy, thanks to the lemma.\<close>
lemma raw_analz_image_freshK:
"evs \<in> recur ==>
\<forall>K KK. KK \<subseteq> - (range shrK) \<longrightarrow>
(Key K \<in> analz (Key`KK \<union> (spies evs))) =
(K \<in> KK | Key K \<in> analz (spies evs))"
apply (erule recur.induct)
apply (drule_tac [4] RA2_analz_spies,
drule_tac [5] respond_imp_responses,
drule_tac [6] RA4_analz_spies, analz_freshK, spy_analz)
txt\<open>RA3\<close>
apply (simp_all add: resp_analz_image_freshK_lemma)
done
(*Instance of the lemma with H replaced by (spies evs):
[| RB \<in> responses evs; evs \<in> recur; |]
==> KK \<subseteq> - (range shrK) \<longrightarrow>
Key K \<in> analz (insert RB (Key`KK \<union> spies evs)) =
(K \<in> KK | Key K \<in> analz (insert RB (spies evs)))
*)
lemmas resp_analz_image_freshK =
resp_analz_image_freshK_lemma [OF _ raw_analz_image_freshK]
lemma analz_insert_freshK:
"[| evs \<in> recur; KAB \<notin> range shrK |]
==> (Key K \<in> analz (insert (Key KAB) (spies evs))) =
(K = KAB | Key K \<in> analz (spies evs))"
by (simp del: image_insert
add: analz_image_freshK_simps raw_analz_image_freshK)
text\<open>Everything that's hashed is already in past traffic.\<close>
lemma Hash_imp_body:
"[| Hash \<lbrace>Key(shrK A), X\<rbrace> \<in> parts (spies evs);
evs \<in> recur; A \<notin> bad |] ==> X \<in> parts (spies evs)"
apply (erule rev_mp)
apply (erule recur.induct,
drule_tac [6] RA4_parts_spies,
drule_tac [5] respond_imp_responses,
drule_tac [4] RA2_parts_spies)
txt\<open>RA3 requires a further induction\<close>
apply (erule_tac [5] responses.induct, simp_all)
txt\<open>Fake\<close>
apply (blast intro: parts_insertI)
done
(** The Nonce NA uniquely identifies A's message.
This theorem applies to steps RA1 and RA2!
Unicity is not used in other proofs but is desirable in its own right.
**)
lemma unique_NA:
"[| Hash \<lbrace>Key(shrK A), Agent A, B, NA, P\<rbrace> \<in> parts (spies evs);
Hash \<lbrace>Key(shrK A), Agent A, B',NA, P'\<rbrace> \<in> parts (spies evs);
evs \<in> recur; A \<notin> bad |]
==> B=B' \<and> P=P'"
apply (erule rev_mp, erule rev_mp)
apply (erule recur.induct,
drule_tac [5] respond_imp_responses)
apply (force, simp_all)
txt\<open>Fake\<close>
apply blast
apply (erule_tac [3] responses.induct)
txt\<open>RA1,2: creation of new Nonce\<close>
apply simp_all
apply (blast dest!: Hash_imp_body)+
done
(*** Lemmas concerning the Server's response
(relations "respond" and "responses")
***)
lemma shrK_in_analz_respond [simp]:
"[| RB \<in> responses evs; evs \<in> recur |]
==> (Key (shrK B) \<in> analz (insert RB (spies evs))) = (B\<in>bad)"
apply (erule responses.induct)
apply (simp_all del: image_insert
add: analz_image_freshK_simps resp_analz_image_freshK, auto)
done
lemma resp_analz_insert_lemma:
"[| Key K \<in> analz (insert RB H);
\<forall>K KK. KK \<subseteq> - (range shrK) \<longrightarrow>
(Key K \<in> analz (Key`KK \<union> H)) =
(K \<in> KK | Key K \<in> analz H);
RB \<in> responses evs |]
==> (Key K \<in> parts{RB} | Key K \<in> analz H)"
apply (erule rev_mp, erule responses.induct)
apply (simp_all del: image_insert parts_image
add: analz_image_freshK_simps resp_analz_image_freshK_lemma)
txt\<open>Simplification using two distinct treatments of "image"\<close>
apply (simp add: parts_insert2, blast)
done
lemmas resp_analz_insert =
resp_analz_insert_lemma [OF _ raw_analz_image_freshK]
text\<open>The last key returned by respond indeed appears in a certificate\<close>
lemma respond_certificate:
"(Hash[Key(shrK A)] \<lbrace>Agent A, B, NA, P\<rbrace>, RA, K) \<in> respond evs
==> Crypt (shrK A) \<lbrace>Key K, B, NA\<rbrace> \<in> parts {RA}"
apply (ind_cases "(Hash[Key (shrK A)] \<lbrace>Agent A, B, NA, P\<rbrace>, RA, K) \<in> respond evs")
apply simp_all
done
(*This unicity proof differs from all the others in the HOL/Auth directory.
The conclusion isn't quite unicity but duplicity, in that there are two
possibilities. Also, the presence of two different matching messages in
the inductive step complicates the case analysis. Unusually for such proofs,
the quantifiers appear to be necessary.*)
lemma unique_lemma [rule_format]:
"(PB,RB,KXY) \<in> respond evs ==>
\<forall>A B N. Crypt (shrK A) \<lbrace>Key K, Agent B, N\<rbrace> \<in> parts {RB} \<longrightarrow>
(\<forall>A' B' N'. Crypt (shrK A') \<lbrace>Key K, Agent B', N'\<rbrace> \<in> parts {RB} \<longrightarrow>
(A'=A \<and> B'=B) | (A'=B \<and> B'=A))"
apply (erule respond.induct)
apply (simp_all add: all_conj_distrib)
apply (blast dest: respond_certificate)
done
lemma unique_session_keys:
"[| Crypt (shrK A) \<lbrace>Key K, Agent B, N\<rbrace> \<in> parts {RB};
Crypt (shrK A') \<lbrace>Key K, Agent B', N'\<rbrace> \<in> parts {RB};
(PB,RB,KXY) \<in> respond evs |]
==> (A'=A \<and> B'=B) | (A'=B \<and> B'=A)"
by (rule unique_lemma, auto)
(** Crucial secrecy property: Spy does not see the keys sent in msg RA3
Does not in itself guarantee security: an attack could violate
the premises, e.g. by having A=Spy **)
lemma respond_Spy_not_see_session_key [rule_format]:
"[| (PB,RB,KAB) \<in> respond evs; evs \<in> recur |]
==> \<forall>A A' N. A \<notin> bad \<and> A' \<notin> bad \<longrightarrow>
Crypt (shrK A) \<lbrace>Key K, Agent A', N\<rbrace> \<in> parts{RB} \<longrightarrow>
Key K \<notin> analz (insert RB (spies evs))"
apply (erule respond.induct)
apply (frule_tac [2] respond_imp_responses)
apply (frule_tac [2] respond_imp_not_used)
apply (simp_all del: image_insert parts_image
add: analz_image_freshK_simps split_ifs shrK_in_analz_respond
resp_analz_image_freshK parts_insert2)
txt\<open>Base case of respond\<close>
apply blast
txt\<open>Inductive step of respond\<close>
apply (intro allI conjI impI, simp_all)
txt\<open>by unicity, either \<^term>\<open>B=Aa\<close> or \<^term>\<open>B=A'\<close>, a contradiction
if \<^term>\<open>B \<in> bad\<close>\<close>
apply (blast dest: unique_session_keys respond_certificate)
apply (blast dest!: respond_certificate)
apply (blast dest!: resp_analz_insert)
done
lemma Spy_not_see_session_key:
"[| Crypt (shrK A) \<lbrace>Key K, Agent A', N\<rbrace> \<in> parts (spies evs);
A \<notin> bad; A' \<notin> bad; evs \<in> recur |]
==> Key K \<notin> analz (spies evs)"
apply (erule rev_mp)
apply (erule recur.induct)
apply (drule_tac [4] RA2_analz_spies,
frule_tac [5] respond_imp_responses,
drule_tac [6] RA4_analz_spies,
simp_all add: split_ifs analz_insert_eq analz_insert_freshK)
txt\<open>Fake\<close>
apply spy_analz
txt\<open>RA2\<close>
apply blast
txt\<open>RA3\<close>
apply (simp add: parts_insert_spies)
apply (metis Key_in_parts_respond parts.Body parts.Fst resp_analz_insert
respond_Spy_not_see_session_key usedI)
txt\<open>RA4\<close>
apply blast
done
(**** Authenticity properties for Agents ****)
text\<open>The response never contains Hashes\<close>
lemma Hash_in_parts_respond:
"[| Hash \<lbrace>Key (shrK B), M\<rbrace> \<in> parts (insert RB H);
(PB,RB,K) \<in> respond evs |]
==> Hash \<lbrace>Key (shrK B), M\<rbrace> \<in> parts H"
apply (erule rev_mp)
apply (erule respond_imp_responses [THEN responses.induct], auto)
done
text\<open>Only RA1 or RA2 can have caused such a part of a message to appear.
This result is of no use to B, who cannot verify the Hash. Moreover,
it can say nothing about how recent A's message is. It might later be
used to prove B's presence to A at the run's conclusion.\<close>
lemma Hash_auth_sender [rule_format]:
"[| Hash \<lbrace>Key(shrK A), Agent A, Agent B, NA, P\<rbrace> \<in> parts(spies evs);
A \<notin> bad; evs \<in> recur |]
==> Says A B (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, NA, P\<rbrace>) \<in> set evs"
apply (unfold HPair_def)
apply (erule rev_mp)
apply (erule recur.induct,
drule_tac [6] RA4_parts_spies,
drule_tac [4] RA2_parts_spies,
simp_all)
txt\<open>Fake, RA3\<close>
apply (blast dest: Hash_in_parts_respond)+
done
(** These two results subsume (for all agents) the guarantees proved
separately for A and B in the Otway-Rees protocol.
**)
text\<open>Certificates can only originate with the Server.\<close>
lemma Cert_imp_Server_msg:
"[| Crypt (shrK A) Y \<in> parts (spies evs);
A \<notin> bad; evs \<in> recur |]
==> \<exists>C RC. Says Server C RC \<in> set evs \<and>
Crypt (shrK A) Y \<in> parts {RC}"
apply (erule rev_mp, erule recur.induct, simp_all)
txt\<open>Fake\<close>
apply blast
txt\<open>RA1\<close>
apply blast
txt\<open>RA2: it cannot be a new Nonce, contradiction.\<close>
apply blast
txt\<open>RA3. Pity that the proof is so brittle: this step requires the rewriting,
which however would break all other steps.\<close>
apply (simp add: parts_insert_spies, blast)
txt\<open>RA4\<close>
apply blast
done
end
|
function h=hermfun(t,j)
%HERMFUN Orthonormal Hermite functions. [with F. Rekibi]
%
% H=HERMFUN(T,N) generates the fisrt N+1 orthonormal Hermite functions
% [H0,...HN] on a time axis specfied by the column vector T.
%
% HERMFUN uses the expression of Simons et al. 2003.
%
% Note that H(:,1) is the zeroth-order Hermite function, which is equal
% to a Gaussian. H(:,2) is the first-order function, and so forth.
%
% See also HERMPOLY.
%
% 'hermfun --f' generates a sample figure; compare with the Hermite
% function figure at
%
% http://en.wikipedia.org/wiki/Hermite_polynomials#Definition
%
% Usage: h=hermfun(t,n);
% _________________________________________________________________
% This is part of JLAB --- type 'help jlab' for more information
% (C) 2004--2015 F. Rekibi and J. M. Lilly
% --- type 'help jlab_license' for details
% 05.08.07 JML fixed bug to include N+1 columns
% 'hermfun --f' generates a sample figure; compare with Figure 2
% of Simons, van der Hilst, and Zuber (2003), JGR 108 B5.
if strcmpi(t,'--f')
type makefigs_hermfun
makefigs_hermfun;
return
end
if size(t,1)==1
t=t';
end
H=hermpoly(t,j);
E=exp(-t.^2/2)*ones(1,j+1);
HE=H.*E;
h=zeros(length(t),j);
for k=1:j+1
h(:,k)=HE(:,k)*frac(1,pi^(1/4))*frac(1,sqrt(2^(k-1)*factorial(k-1)));
end
|
import Pkg
import Random
@inline function _precompile_execution_content(pkgs::AbstractVector{<:Symbol};
notest::AbstractVector{<:Symbol} = Symbol[],
test_stdlibs::Bool = false)
stdlib_names = Symbol.(collect(values(Pkg.Types.stdlibs())))
result = "\n"
result *= "import Pkg\n"
result *= "import Test\n"
for pkg in pkgs
result *= "import $(pkg)\n"
pkg_is_stdlib = pkg in stdlib_names
pkg_is_not_stdlib = !pkg_is_stdlib
pkg_is_in_notest = pkg in notest
pkg_is_not_in_notest = !pkg_is_in_notest
if pkg_is_not_in_notest && (test_stdlibs || pkg_is_not_stdlib)
module_name = "Test_$(pkg)_$(Random.randstring(16))"
result *= "module $(module_name)\n"
result *= "import $(pkg)\n"
result *= "if Base.pkgdir($(pkg)) !== nothing && isfile(joinpath(Base.pkgdir($(pkg)), \"test\", \"runtests.jl\"))\n"
result *= "include(joinpath(Base.pkgdir($(pkg)), \"test\", \"runtests.jl\"))\n"
result *= "end # end if\n"
result *= "end # end module $(module_name)\n"
result *= "import .$(module_name)\n"
end
end
return result
end
@inline function _precompile_execution(filename::AbstractString,
pkgs::AbstractVector{<:Symbol};
notest::AbstractVector{<:Symbol} = Symbol[],
test_stdlibs::Bool = false)
precompile_execution_content =_precompile_execution_content(pkgs;
notest = notest,
test_stdlibs = test_stdlibs)
rm(filename; force = true, recursive = true)
mkpath(dirname(filename))
open(filename, "w") do io
print(io, precompile_execution_content)
end
return filename
end
@inline function _precompile_execution(pkgs::AbstractVector{<:Symbol};
notest::AbstractVector{<:Symbol} = Symbol[],
test_stdlibs::Bool = false)
my_temp_dir = mktempdir(; cleanup=true)
my_temp_name = joinpath(my_temp_dir, "precompile_execution_file.jl")
return _precompile_execution(my_temp_name,
pkgs;
notest = notest,
test_stdlibs = test_stdlibs)
end
|
using IfElse
using Test
x = 2
@test IfElse.ifelse(x>0,1,-1) == Core.ifelse(x>0,1,-1)
|
# .hlpr_summary_cor ----
.hlpr_summary_cor <- function(object, stars = TRUE) {
frame <- .hlpr_get_matrix(object)
target_col <- names(object)[names(object) %in% c("r", "rho", "tau", "Median")][1]
if (is.na(target_col)) {
target_col <- names(object)[!names(object) %in% c("Parameter1", "Parameter2")][1]
}
out <- .hlpr_create_matrix(frame, object, column = target_col)
# Fill attributes
for (i in names(object)[!names(object) %in% c("Group", "Parameter1", "Parameter2", target_col)]) {
attri <- .hlpr_create_matrix(frame, object, column = i)
attr(out, i) <- attri
}
# Transfer attributes
attributes(out) <- c(attributes(out), attributes(object)[!names(attributes(object)) %in% c("names", "row.names", "class", names(attributes(out)))])
attr(out, "stars") <- stars
attr(out, "coefficient_name") <- target_col
return(out)
}
|
-- PCA.hs
-- Compiles to library.
-- This library contains the functions needed to perform principle
-- component anlysis on vectorized data.
module PCA
( Hyperplane (..)
, mean
, saveHyperplane
, loadHyperplane
, linRegression
, distance
) where
import Numeric.LinearAlgebra.HMatrix
import Numeric.LinearAlgebra.Data
import Data.Packed.Matrix
import Data.Packed.Vector
data Hyperplane = Hyperplane Int (Vector Double) (Matrix Double) deriving (Show, Read)
-- First argument is the dimension of the ambient space.
-- Second argument is some vector in the hyperplane.
-- Rows of third argument form the basis of the parallel
-- linear subspace.
-- The Hyperplane is the coset of the second arg mod the third arg.
saveHyperplane :: FilePath -> Hyperplane -> IO ()
-- Saves a hyperplane in machine-readable plaintext format.
saveHyperplane f h = writeFile f (show h)
loadHyperplane :: FilePath -> IO Hyperplane
-- Loads a hyperplane that was saved in the syntax of `save`.
loadHyperplane f = readFile f >>= (return . read)
mean :: Matrix Double -> Vector Double
-- Returns the average of the rows of a matrix.
mean x = scale n . sum . toRows $ x
where
n = 1 / (fromIntegral . rows $ x) :: Double
normalize :: Matrix Double -> Matrix Double
-- Subtracts the average of/from the rows of a matrix.
normalize x = x - (fromRows . replicate (rows x) . mean $ x)
linRegression :: Int -> Matrix Double -> Hyperplane
-- Finds the best-fit hyperplane of the rows of a matrix.
-- First argument is the threshold to use a singular value.
-- Second argument is your data, rows are individual datapoints.
-- The first argument controls the size of the hyperplane.
-- If the first argument is too small, the hyperplane will just be
-- the whole space. If the first argument is too big, the hyper-
-- plane will just be a single point.
linRegression numsv inmat = Hyperplane n m rows
where
n = cols inmat
m = mean inmat
normalizedmat = normalize inmat
(_,rows) = trimSVDRight numsv normalizedmat
trimSVDRight :: Int -> Matrix Double -> (Vector Double, Matrix Double)
-- Returns partial diagonal and right-side factor from SVD of
-- a matrix.
trimSVDRight numsv inmat = (v, outmat)
where
n' = min numsv (dim v1 - 1)
(v1,m1) = rightSV inmat
v = fromList . take n' . toList $ v1
outmat = trans $ takeColumns (dim v) m1
distance :: Hyperplane -> Vector Double -> Double
-- Finds the distance from a vector to a hyperplane.
distance (Hyperplane n hv m) v
| cols m == 0 = norm_2 v'
| rows m == 0 = norm_2 v'
| otherwise = norm_2 $ distancevec
where
v' = v - hv
distancevec = let a = m #> v'
b = trans m #> a in
a `seq` b `seq` (b - v')
|
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE QuantifiedConstraints #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
module Q.Interpolation where
import qualified Q.SortedVector as SV
import Numeric.GSL.Interpolation
import qualified Numeric.LinearAlgebra as V (Vector, fromList)
import Foreign (Storable)
import Data.List
class (Ord k, Storable k, Storable v) => Interpolator a k v where
interpolate :: a -> [(k, v)] -> k -> v
class (Ord k, Storable k, Storable v) => InterpolatorV a k v where
interpolateV :: a -> SV.SortedVector k -> V.Vector v -> k -> v
instance (Ord k, Storable k, Storable v, InterpolatorV a k v) => Interpolator a k v where
interpolate a pts = interpolateV a xs' ys' where
(xs, ys) = (unzip . sortOn fst) pts
xs' = SV.fromSortedList xs
ys' = V.fromList ys
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from src.utils import utils
class BaseAgent(object):
def __init__(self, config):
self.config = config
# self.logger = logging.getLogger("Agent")
self._set_seed() # set seed as early as possible
self._load_datasets()
self._load_loaders()
self._choose_device()
self._create_model()
self._create_optimizer()
self.current_epoch = 0
self.current_iteration = 0
self.current_val_iteration = 0
# we need these to decide best loss
self.current_loss = 0
self.current_val_metric = 0
self.best_val_metric = 0
self.iter_with_no_improv = 0
def _set_seed(self):
torch.manual_seed(self.config.seed)
np.random.seed(self.config.seed)
def _choose_device(self):
self.is_cuda = torch.cuda.is_available()
# if self.is_cuda and not self.config.cuda:
# self.logger.info("WARNING: You have a CUDA device, so you should probably enable CUDA")
self.cuda = self.is_cuda & self.config.cuda
self.manual_seed = self.config.seed
if self.cuda: torch.cuda.manual_seed(self.manual_seed)
if self.cuda:
if not isinstance(self.config.gpu_device, list):
self.config.gpu_device = [self.config.gpu_device]
# NOTE: we do not support multi-gpu run for now
gpu_device = self.config.gpu_device[0]
# self.logger.info("User specified 1 GPU: {}".format(gpu_device))
self.device = torch.device("cuda")
torch.cuda.set_device(gpu_device)
# self.logger.info("Program will run on *****GPU-CUDA***** ")
# print_cuda_statistics()
else:
self.device = torch.device("cpu")
torch.manual_seed(self.manual_seed)
# self.logger.info("Program will run on *****CPU*****\n")
def _load_datasets(self):
raise NotImplementedError
def _load_loaders(self):
self.train_loader, self.train_len = self._create_dataloader(
self.train_dataset,
self.config.optim.train_batch_size,
shuffle=True,
)
self.val_loader, self.val_len = self._create_test_dataloader(
self.val_dataset,
self.config.optim.test_batch_size,
)
self.test_loader, self.test_len = self._create_test_dataloader(
self.test_dataset,
self.config.optim.test_batch_size,
)
def _create_dataloader(self, dataset, batch_size, shuffle=True):
dataset_size = len(dataset)
loader = DataLoader(dataset, batch_size=batch_size,
shuffle=shuffle, pin_memory=True,
num_workers=self.config.data_loader_workers)
return loader, dataset_size
def _create_test_dataloader(self, dataset, batch_size):
return self._create_dataloader(dataset, batch_size, shuffle=False)
def _create_model(self):
raise NotImplementedError
def _create_optimizer(self):
raise NotImplementedError
def run_validation(self):
self.validate()
def run(self):
"""
The main operator
:return:
"""
try:
self.train()
except KeyboardInterrupt as e:
# self.logger.info("Interrupt detected. Saving data...")
self.backup()
raise e
def train(self):
"""
Main training loop
:return:
"""
for epoch in range(self.current_epoch, self.config.num_epochs):
self.current_epoch = epoch
self.train_one_epoch()
if (self.config.validate and
epoch % self.config.optim.validate_freq == 0):
self.validate() # validate every now and then
self.test()
self.save_checkpoint()
# check if we should quit early bc bad perf
if self.iter_with_no_improv > self.config.optim.patience:
# self.logger.info("Exceeded patience. Stop training...")
break
def train_one_epoch(self):
"""
One epoch of training
:return:
"""
raise NotImplementedError
def validate(self):
"""
One cycle of model validation
:return:
"""
raise NotImplementedError
def test(self):
"""
One cycle of model testing
:return:
"""
raise NotImplementedError
def backup(self):
"""
Backs up the model upon interrupt
"""
# self.logger.info("Backing up current version of model...")
self.save_checkpoint(filename='backup.pth.tar')
def finalise(self):
"""
Do appropriate saving after model is finished training
"""
# self.logger.info("Saving final versions of model...")
self.save_checkpoint(filename='final.pth.tar')
def save_metrics(self):
raise NotImplementedError
def save_checkpoint(self, filename="checkpoint.pth.tar"):
out_dict = self.save_metrics()
# if we aren't validating, then every time we save is the
# best new epoch!
is_best = ((self.current_val_metric == self.best_val_metric) or
not self.config.validate)
utils.save_checkpoint(out_dict, is_best, filename=filename,
folder=self.config.checkpoint_dir)
self.copy_checkpoint()
def copy_checkpoint(self, filename="checkpoint.pth.tar"):
if self.current_epoch % self.config.copy_checkpoint_freq == 0:
utils.copy_checkpoint(
filename=filename, folder=self.config.checkpoint_dir,
copyname='checkpoint_epoch{}.pth.tar'.format(self.current_epoch),
)
def load_checkpoint(self, filename):
raise NotImplementedError
|
import numpy as np
from glue.core import Data
from jdaviz import Application
from ..moment_maps import MomentMap
def test_moment_calculation(spectral_cube_wcs):
app = Application()
dc = app.data_collection
dc.append(Data(x=np.ones((3, 4, 5)), label='test', coords=spectral_cube_wcs))
mm = MomentMap(app=app)
mm.selected_data = 'test'
mm.n_moment = 0
mm.vue_calculate_moment(None)
print(dc[1].get_object())
assert mm.moment_available
assert dc[1].label == 'Moment 0: test'
assert dc[1].get_object().shape == (4, 5)
|
This post provides a bit of an overview of my plans for my as-yet unnamed RPG. One of the biggest lessons I learned with my previous game (which I talked a bit about here) is that players want to interact. The in-game chat was pretty much an afterthought in my previous game (in fact, it was the very last feature I added before going to "alpha"). Most of the work I did post-release was community-based features -- an alliance system, one-on-one and group private chats, and so on.
So I wanted to make sure my new game supported a vibrant community right from the beginning. Also, I was a little tired of the limitations of working with a phone, so I wanted something that ran on a PC. But more than that, I wanted something that was cross-platform (I personally use a Mac laptop and a Linux workstation, so it needed to run on those platforms as well as Windows, of course). Because I'm a masochist, I decided make the game browser-based.
So I made it fully 3D with three.js as the backend renderer. In fact it's actually not that hard to get webgl up and running -- the hardest part, at least for me, was content. The initial "rewrite" was still talking to the same backend server, so it still supported all those wonderful multiplayer features, but I had an MD2 model (from Quake) as my main character, and some procedurally generated tree I found somewhere as decoration.
The most interesting thing about the rewrite is the terrain. I found this cool article on Gamasutra and implemented it as a GLSL shader in three.js.
Then I taught myself how to use blender to create some basic 3D objects and animations. All the content in the current gameplay video was made by me in blender. Yay! |
Top 10 Comma Checker Tools Online is a top list in the Education & Books category on rankly.com. Are you a fan of Education & Books or Top 10 Comma Checker Tools Online? Explore more top 100 lists about Education & Books on rankly.com or participate in ranking the stuff already on the all time Top 10 Comma Checker Tools Online top list below.
If you're not a member of rankly.com, you should consider becoming one. Registration is fast, free and easy. At rankly.com, we aim to give you the best of everything - including stuff like the Top 10 Comma Checker Tools Online list.
They will ensure that all the commas in your text are correctly used by checking it and identifying errors in comma usage.
The best service to use for checking the comma usage in your text and highlights any mistake identified.
Ginger Software Ensure that your punctuation is correct by using ginger punctuation checker tool.
This services will ensure that all commas in the text are correctly used in the appropriate places.
It is a free online tool that checks for all punctuation mistakes with comma inclusive.
Write error free articles and blog posts by having the content checked by this tool for comma usage and grammar error.
The best comma checker that is easy to use. Just place your text in the editor and have it checked.
Comma checking services that are done with human power to pinpoint all the punctuation errors.
It is a most popular choice for all the bloggers and writers who desire to have correct punctuations.
The sure way to correctly punctuate all your sentences and comma is correctly placed in the correct positions. |
State Before: p : β
R : Type ?u.121832
hp : Fact (Nat.Prime p)
instβ : CommRing R
n : β
β’ βconstantCoeff (wittMul p n) = 0 State After: p : β
R : Type ?u.121832
hp : Fact (Nat.Prime p)
instβ : CommRing R
n : β
β’ βconstantCoeff (X 0 * X 1) = 0 Tactic: apply constantCoeff_wittStructureInt p _ _ n State Before: p : β
R : Type ?u.121832
hp : Fact (Nat.Prime p)
instβ : CommRing R
n : β
β’ βconstantCoeff (X 0 * X 1) = 0 State After: no goals Tactic: simp only [MulZeroClass.mul_zero, RingHom.map_mul, constantCoeff_X] |
Require Import DFSSpec.
Require Import Coq.FSets.FSetInterface.
Require Import Coq.Structures.OrderedTypeEx.
Require Import Graph.
Require Import Forest.
Require Import Path.
Require Import Coq.Init.Nat.
Require Import Helper.
Require Import Coq.Arith.PeanoNat.
Require Import Omega.
Require Import Coq.FSets.FSetProperties.
Require Import DerivedProofs.
Require Import SCCDef.
Require Import Coq.Classes.RelationClasses.
Module SCCAlg(O: UsualOrderedType)(S: FSetInterface.Sfun O)(G: Graph O S)(F: Forest O S)(D: DFSBase)
(D' : DFSCustomOrder).
Module P := Helper.Partition O S.
Module P2 := FSetProperties.WProperties_fun O S.
Module O2 := OrderedTypeFacts O.
Module SN := Helper.SetNeq O S.
Module SC := SCCDef.SCCDef O S G.
Module Pa := SC.Pa.
Import SC.
(** Correctness of SCC algorithm **)
(*Lemma 22.13 in CLRS*)
Lemma scc_path_one_dir: forall g C C' u v u' v',
scc C g ->
scc C' g ->
S.equal C C' = false ->
S.In u C ->
S.In v C ->
S.In u' C' ->
S.In v' C' ->
Pa.path g u u' ->
~Pa.path g v' v.
Proof.
intros. intro. rewrite Pa.path_path_list_rev in H6.
rewrite Pa.path_path_list_rev in H7. destruct_all.
assert (A:= H0).
unfold scc in H0. destruct H0. unfold strongly_connected in H0. destruct_all.
destruct (O.eq_dec u' v'). unfold O.eq in e. subst.
assert (Pa.path_list_rev g u v (x ++ v' :: x0) = true). apply Pa.path_app. split; assumption.
assert (S.In v' C). eapply scc_path_within. apply H. apply H2. apply H3. apply H11. solve_in.
eapply neq_scc_disjoint in H1. apply H1. split. apply H12. apply H4. apply H. apply A.
assert (Pa.path g u' v'). apply H10; try(assumption). rewrite Pa.path_path_list_rev in H11. destruct H11.
assert (Pa.path_list_rev g u v (x ++ v' :: x1 ++ u' :: x0) = true). apply Pa.path_app. split. apply H7.
apply Pa.path_app. split. apply H11. apply H6. assert (S.In v' C). eapply scc_path_within. apply H.
apply H2. apply H3. apply H12. solve_in. eapply neq_scc_disjoint in H1. apply H1. split.
apply H13. apply H5. apply H. apply A.
Qed.
(** Results about times of 1st DFS Pass **)
Module D1 := (D O S G F).
Module Der1 := (DerivedProofs.DerivedProofs O S G F D1).
Module M := (Helper.MinMax O).
Import M.
Definition min_elt_set (c: S.t) (f: O.t -> nat) : option O.t:=
min_elt_list (S.elements c) f.
Lemma min_elt_set_none_iff_empty: forall s f,
min_elt_set s f = None <-> S.is_empty s = true.
Proof.
intros. unfold min_elt_set. rewrite min_elt_list_none_iff_empty. rewrite <- P2.elements_Empty.
apply P2.FM.is_empty_iff.
Qed.
Lemma min_elt_set_finds_min: forall f x s,
(forall x y, S.In x s -> S.In y s -> f x = f y -> x = y) ->
min_elt_set s f = Some x ->
forall y, S.In y s -> y <> x -> f x < f y.
Proof.
intros. unfold min_elt_set in H0. eapply min_elt_list_finds_min in H0. apply H0. intros.
apply H. apply S.elements_2. rewrite <- In_InA_equiv. apply H3.
apply S.elements_2. rewrite <- In_InA_equiv. apply H4. apply H5.
apply S.elements_1 in H1. rewrite <- In_InA_equiv in H1. apply H1. apply H2.
Qed.
Lemma min_elt_set_in_set: forall f x s,
min_elt_set s f = Some x ->
S.In x s.
Proof.
intros. unfold min_elt_set in H. apply min_elt_list_in_list in H.
rewrite In_InA_equiv in H.
apply S.elements_2 in H. apply H.
Qed.
Lemma find_min_scc_exists: forall f g c,
scc c g ->
exists x, min_elt_set c f = Some x.
Proof.
intros. destruct (min_elt_set c f) eqn : ?. exists t. reflexivity.
rewrite min_elt_set_none_iff_empty in Heqo. unfold scc in H. destruct H.
unfold strongly_connected in H. destruct_all. rewrite H in Heqo. inversion Heqo.
Qed.
(*Definition of discovery time of SCC - I define it as the vertex that is discovered first (rather than the
time*)
Definition d_time_scc g c (H: scc c g) :=
min_elt_set c (D1.d_time None g).
Definition max_elt_set (c: S.t) (f: O.t -> nat) : option O.t:=
max_elt_list (S.elements c) f.
Lemma max_elt_set_none_iff_empty: forall s f,
max_elt_set s f = None <-> S.is_empty s = true.
Proof.
intros. unfold max_elt_set. rewrite max_elt_list_none_iff_empty. rewrite <- P2.elements_Empty.
apply P2.FM.is_empty_iff.
Qed.
Lemma max_elt_set_finds_max: forall f x s,
(forall x y, S.In x s -> S.In y s -> f x = f y -> x = y) ->
max_elt_set s f = Some x ->
forall y, S.In y s -> y <> x -> f y < f x.
Proof.
intros. unfold max_elt_set in H0. eapply max_elt_list_finds_max in H0. apply H0. intros.
apply H. apply S.elements_2. rewrite <- In_InA_equiv. apply H3.
apply S.elements_2. rewrite <- In_InA_equiv. apply H4. apply H5.
apply S.elements_1 in H1. rewrite <- In_InA_equiv in H1. apply H1. apply H2.
Qed.
Lemma max_elt_set_in_set: forall f x s,
max_elt_set s f = Some x ->
S.In x s.
Proof.
intros. unfold max_elt_set in H. apply max_elt_list_in_list in H.
rewrite In_InA_equiv in H.
apply S.elements_2 in H. apply H.
Qed.
Lemma find_max_scc_exists: forall f g c,
scc c g ->
exists x, max_elt_set c f = Some x.
Proof.
intros. destruct (max_elt_set c f) eqn : ?. exists t. reflexivity.
rewrite max_elt_set_none_iff_empty in Heqo. unfold scc in H. destruct H.
unfold strongly_connected in H. destruct_all. rewrite H in Heqo. inversion Heqo.
Qed.
(*Definition of finish time of SCC*)
Definition f_time_scc g c (H: scc c g) :=
max_elt_set c (D1.f_time None g).
(*This is a consequence of either my poor planning or a less than optimal use of modules*)
Lemma path_module_equiv: forall g x y,
Pa.path g x y <-> D1.P.path g x y.
Proof.
intros. split; intros; induction H.
- constructor. apply H.
- eapply D1.P.p_continue. apply IHpath. apply H0.
- constructor. apply H.
- eapply Pa.p_continue. apply IHpath. apply H0.
Qed.
(*A major lemma in establishing the correctness of the SCC algorithm: If we have two distinct SCCs C and C' and
there is an edge from C to C', then f(C) > f(C') (implies that the SCC with largest finish times is a source
node in G^SCC*)
(*Lemma 22.14 in CLRS*)
Lemma scc_finish_time: forall g c c' u v (Hc: scc c g) (Hc': scc c' g) x y,
S.equal c c' = false ->
G.contains_edge g u v = true ->
S.In u c ->
S.In v c' ->
f_time_scc g c Hc = Some x ->
f_time_scc g c' Hc' = Some y ->
D1.f_time None g x > D1.f_time None g y.
Proof.
intros. assert (exists x, d_time_scc g c Hc = Some x). unfold d_time_scc. eapply find_min_scc_exists.
apply Hc. assert (exists x, d_time_scc g c' Hc' = Some x). unfold d_time_scc. eapply find_min_scc_exists.
apply Hc'. destruct_all. assert (((D1.d_time None g x1) > (D1.d_time None g x0)) \/
((D1.d_time None g x1) < (D1.d_time None g x0)) \/ ((D1.d_time None g x1) = (D1.d_time None g x0))) by omega.
destruct H7. (*D(c) > D(c')*)
(*Proof- at time d[x0] all vertices in c' are white, so all descendants of x0, so x0 finishes after all of them,
so x0 = y*)
assert (S.In x0 c'). { unfold d_time_scc in H6. apply min_elt_set_in_set in H6. apply H6. }
assert (A:= Hc). assert (B:= Hc').
unfold scc in Hc. unfold scc in Hc'. destruct Hc. destruct_all.
unfold strongly_connected in s0. unfold strongly_connected in s. destruct_all.
assert (G.contains_vertex g x0 = true). apply e0. apply H8.
pose proof (D1.discovery_exists None g x0 H9). destruct H10 as [s].
assert (forall v, S.In v c' -> v <> x0 -> D1.white None g s v = true). { intros.
rewrite D1.white_def. rewrite Nat.ltb_lt. unfold d_time_scc in H6.
eapply min_elt_set_finds_min in H6. rewrite <- H10 in H6. apply H6. intros.
eapply D1.d_times_unique. apply e0. apply H13. apply e0. apply H14. apply H15.
apply H11. apply H12. }
assert (forall v, S.In v c' -> v <> x0 -> exists l, D1.P.path_list_ind g x0 v (fun x => D1.white None g s x) l). {
intros. assert (C:=H12). apply (p x0) in H12. rewrite path_module_equiv in H12.
rewrite D1.P.path_path_list_rev in H12.
destruct H12 as [l]. assert (exists l, D1.P.path_list_rev g x0 v0 l = true). exists l. apply H12.
apply Der1.unique_paths in H14. destruct_all. exists x2. rewrite D1.P.path_list_ind_rev. split.
apply H14. split. intros. eapply scc_path_within in H14. apply H11. apply H14. auto. intro. subst. contradiction.
assumption. assumption. assumption. assumption. apply H11. assumption. assumption. auto. assumption. auto. }
assert (forall v, S.In v c' -> v <> x0 -> F.desc (D1.dfs_forest None g) x0 v). intros. apply D1.white_path_theorem.
apply e0. apply H8. intros.
assert (s = s0). eapply D1.state_time_unique. omega. subst. apply H12; try(assumption).
assert (x0 = y). { destruct (O.eq_dec x0 y). apply e3. assert (D1.f_time None g x0 < D1.f_time None g y).
unfold f_time_scc in H4. eapply max_elt_set_finds_max in H4. apply H4. intros.
eapply D1.f_times_unique. apply e0. apply H14. apply e0. apply H15. rewrite H16. reflexivity. apply H8.
auto. assert (F.desc (D1.dfs_forest None g) x0 y). apply H13. eapply max_elt_set_in_set. unfold f_time_scc in H4.
apply H4. auto. rewrite D1.descendant_iff_interval in H15. omega. apply e0. apply H8. apply e0.
eapply max_elt_set_in_set. unfold f_time_scc in H4. apply H4. }
subst. (*Now we know that start and finish vertex are the same, need to show that all vertices in c are white
when y finishes*)
pose proof (D1.finish_exists None g y H9). destruct H14 as [s'].
assert (forall x, S.In x c -> D1.white None g s' x = true). { intros.
assert (D1.white None g s x0 = true). rewrite D1.white_def. rewrite H10. destruct (O.eq_dec x0 x1).
unfold O.eq in e3. subst. rewrite Nat.ltb_lt. omega. unfold d_time_scc in H5.
eapply min_elt_set_finds_min in H5. assert ( D1.d_time None g x1 < D1.d_time None g x0). apply H5.
rewrite Nat.ltb_lt. omega. intros. eapply D1.d_times_unique. apply e2. assumption. apply e2. assumption.
rewrite H18. reflexivity. assumption. apply n1.
pose proof (Der1.color_total g None s' x0). destruct H17. apply H17. destruct H17.
- rewrite D1.gray_def in H17. simplify. rewrite H14 in H18. rewrite H14 in H19.
rewrite D1.white_def in H16. rewrite H10 in H16.
assert (G.contains_vertex g x0 = true). apply e2. apply H15.
assert (y <> x0). intro. subst. eapply neq_scc_disjoint in H. apply H. split. apply H15. assumption.
apply A. assumption.
pose proof (D1.parentheses_theorem None g y x0 H9 H17 H20). rewrite Nat.ltb_lt in H16.
rewrite Nat.ltb_lt in H18. rewrite Nat.leb_le in H19. omega.
- rewrite D1.white_def in H16. rewrite D1.black_def in H17. rewrite H10 in H16. rewrite H14 in H17.
rewrite Nat.ltb_lt in H16. rewrite Nat.leb_le in H17. assert (y <> x0). intro. subst.
eapply neq_scc_disjoint in H. apply H. split. apply H15. assumption. apply A. assumption.
assert ((D1.f_time None g x0 = D1.f_time None g y) \/ (D1.f_time None g x0 < D1.f_time None g y)) by omega.
destruct H19. assert (y = x0). eapply D1.f_times_unique. apply H9. apply e2. apply H15.
rewrite <- H19. reflexivity. subst. contradiction. clear H17.
assert (F.desc (D1.dfs_forest None g) y x0). eapply D1.descendant_iff_interval. apply H9.
apply e2. apply H15. pose proof (Der1.discover_before_finish g None y x0).
assert ( D1.d_time None g y < D1.f_time None g y). apply H17; try(assumption); try(auto).
pose proof (Der1.discover_before_finish g None x0 y).
assert (D1.d_time None g x0 < D1.f_time None g x0). apply H21; try(assumption); try(auto). omega.
eapply D1.white_path_theorem in H17. destruct H17 as [l]. rewrite D1.P.path_list_ind_rev in H17. destruct_all.
destruct (O.eq_dec x0 u). unfold O.eq in e3. subst.
destruct (O.eq_dec v y). unfold O.eq in e3. subst.
assert (D1.P.path_list_rev g y y (u :: l) = true). simpl. simplify.
assert (S.In u c'). eapply scc_path_within. apply B. apply H2. apply H2. apply H22. solve_in.
exfalso. eapply neq_scc_disjoint in H. apply H. split. apply H15. apply H23. apply A. apply B.
assert (Pa.path g v y). apply p. assumption. assumption. auto. rewrite path_module_equiv in H22.
rewrite D1.P.path_path_list_rev in H22. destruct H22 as [l'].
assert (D1.P.path_list_rev g y y (l' ++ v :: u :: l) = true). apply D1.P.path_app. split. apply H22.
simpl. simplify. assert (S.In u c'). eapply scc_path_within. apply B. apply H8. apply H8. apply H23.
apply in_or_app. right. simpl. right. left. reflexivity. exfalso.
eapply neq_scc_disjoint in H. apply H. split. apply H15. apply H24. apply A. apply B.
assert (Pa.path g x0 u). apply p0; try(assumption); try(auto). rewrite path_module_equiv in H22.
rewrite D1.P.path_path_list_rev in H22. destruct H22 as [l''].
assert (D1.P.path_list_rev g y v (u :: l'' ++ x0 :: l) = true). simpl. simplify. apply D1.P.path_app.
simplify. assert (S.In u c'). eapply scc_path_within. apply B. apply H8. apply H2. apply H23. solve_in.
exfalso. eapply neq_scc_disjoint in H. apply H. split. apply H1. apply H24. apply A. apply B.
apply H9. apply H10. } assert (D1.white None g s' x = true). apply H15.
unfold f_time_scc in H3. eapply max_elt_set_in_set in H3. apply H3. rewrite D1.white_def in H16.
rewrite H14 in H16. rewrite Nat.ltb_lt in H16.
pose proof (Der1.discover_before_finish g None x y).
assert (D1.d_time None g x < D1.f_time None g x). apply H17. apply e2. unfold f_time_scc in H3.
eapply max_elt_set_in_set in H3. apply H3. apply H9. intro. subst.
eapply neq_scc_disjoint in H. apply H. split. unfold f_time_scc in H3.
eapply max_elt_set_in_set in H3. apply H3. apply H8. apply A. apply B. omega.
destruct H7. assert (A:= Hc). assert (B:= Hc'). unfold scc in Hc. unfold scc in Hc'.
destruct_all. unfold strongly_connected in s0. unfold strongly_connected in s. destruct_all.
unfold f_time_scc in *. unfold d_time_scc in *.
(*Proof: there is a white path to every vertex in c', so all must finish before*)
pose proof (D1.discovery_exists None g x1). destruct H8 as [s]. apply e2.
eapply min_elt_set_in_set. apply H5.
assert (forall x, S.In x c -> x1 <> x -> D1.white None g s x = true). {
intros. rewrite D1.white_def. rewrite H8. rewrite Nat.ltb_lt. eapply min_elt_set_finds_min. intros.
eapply D1.d_times_unique. apply e2. apply H11. apply e2. apply H12. rewrite H13. reflexivity. apply H5.
apply H9. auto. }
assert (forall x, S.In x c' -> D1.white None g s x = true). { intros. rewrite D1.white_def. rewrite H8.
destruct (O.eq_dec x2 x0). unfold O.eq in e3. subst. rewrite Nat.ltb_lt. apply H7. rewrite Nat.ltb_lt.
assert (D1.d_time None g x0 < D1.d_time None g x2). eapply min_elt_set_finds_min. intros.
eapply D1.d_times_unique. apply e0. apply H11. apply e0. apply H12. rewrite H13. reflexivity.
apply H6. apply H10. auto. omega. }
assert (forall x, S.In x c' -> exists l, D1.P.path_list_ind g x1 x (fun y => D1.white None g s y) l). {
intros. destruct (O.eq_dec x1 u). unfold O.eq in e3. subst. destruct (O.eq_dec x2 v). unfold O.eq in e3.
subst. exists nil. constructor. apply H0. apply H10. apply H2.
assert (Pa.path g v x2). apply p. apply H2. apply H11. auto. rewrite path_module_equiv in H12.
rewrite D1.P.path_path_list_rev in H12. destruct H12 as [l]. exists (l ++ v :: nil).
rewrite D1.P.path_list_ind_rev. split. apply D1.P.path_app. simplify. split. intros.
apply in_app_or in H13. destruct H13. apply H10. eapply scc_path_within in H12. apply H12.
apply B. apply H2. apply H11. apply H13. simpl in H13. destruct H13. subst. apply H10. apply H2.
destruct H13. apply H10. apply H11. assert (I: S.In x1 c). eapply min_elt_set_in_set. apply H5.
assert (Pa.path g x1 u). apply p0. apply I. apply H1. auto. rewrite path_module_equiv in H12.
rewrite D1.P.path_path_list_rev in H12. eapply Der1.unique_paths in H12. destruct H12 as [l].
destruct_all. destruct (O.eq_dec v x2). unfold O.eq in e3. subst. exists (u :: l).
rewrite D1.P.path_list_ind_rev. split. simpl. simplify. split. intros.
simpl in H16. destruct H16. subst. apply H9. assumption. auto.
apply H9. eapply scc_path_within. apply A. apply I. apply H1. apply H12. apply H16. intro. subst.
contradiction. apply H10. apply H11.
assert (Pa.path g v x2). apply p; try(assumption); try(auto). rewrite path_module_equiv in H16.
rewrite D1.P.path_path_list_rev in H16. destruct H16 as [l'].
exists (l' ++ v :: u :: l). rewrite D1.P.path_list_ind_rev. split.
apply D1.P.path_app. split. apply H16. simpl. simplify. split. intros.
apply in_app_or in H17. destruct H17. apply H10. eapply scc_path_within. apply B.
apply H2. apply H11. apply H16. apply H17. simpl in H17. destruct H17. subst.
apply H10. apply H2. destruct H17. subst. apply H9. apply H1. auto.
apply H9. eapply scc_path_within. apply A. apply I. apply H1. apply H12. apply H17.
intro. subst. contradiction. apply H10. apply H11. auto. }
assert (forall x, S.In x c' -> F.desc (D1.dfs_forest None g) x1 x). intros.
eapply D1.white_path_theorem. apply e2. eapply min_elt_set_in_set. apply H5.
intros. assert (s0 = s). eapply D1.state_time_unique. omega. subst.
apply H11. apply H12. assert (F.desc (D1.dfs_forest None g) x1 y). apply H12.
eapply max_elt_set_in_set. apply H4. rewrite D1.descendant_iff_interval in H13.
destruct (O.eq_dec x x1). unfold O.eq in e3. subst. omega.
assert (D1.f_time None g x1 < D1.f_time None g x). eapply max_elt_set_finds_max in H3.
apply H3. intros. eapply D1.f_times_unique. apply e2. apply H14. apply e2. apply H15.
rewrite H16. reflexivity. eapply min_elt_set_in_set. apply H5. auto. omega.
apply e2. eapply min_elt_set_in_set. apply H5. eapply e0.
eapply max_elt_set_in_set. apply H4. assert (A:= Hc). assert (B:= Hc').
unfold scc in Hc. unfold scc in Hc'. destruct_all. unfold strongly_connected in s0.
unfold strongly_connected in s. unfold f_time_scc in *. unfold d_time_scc in *.
destruct_all. assert (x1 = x0). eapply D1.d_times_unique. apply H12.
eapply min_elt_set_in_set. apply H5. apply H9. eapply min_elt_set_in_set. apply H6.
rewrite H7. reflexivity. subst. eapply neq_scc_disjoint in H. exfalso. apply H.
split. eapply min_elt_set_in_set. apply H5. eapply min_elt_set_in_set. apply H6.
apply A. apply B.
Qed.
(*Corollary 22.15 in CLRS*)
Lemma scc_finish_time_transpose: forall g c c' u v (Hc: scc c g) (Hc': scc c' g) x y,
S.equal c c' = false ->
G.contains_edge (G.get_transpose g) u v = true ->
S.In u c ->
S.In v c' ->
f_time_scc g c Hc = Some x ->
f_time_scc g c' Hc' = Some y ->
D1.f_time None g x < D1.f_time None g y.
Proof.
intros. assert (D1.f_time None g x < D1.f_time None g y <-> D1.f_time None g y > D1.f_time None g x) by omega.
rewrite H5. clear H5. eapply scc_finish_time. assert (S.equal c' c = false). destruct (S.equal c' c) eqn : ?.
apply S.equal_2 in Heqb. rewrite Heqb in H. assert (~S.Equal c c). intro.
apply SCCAlg.P2.FM.equal_iff in H5. rewrite H5 in H. inversion H.
pose proof (P2.equal_refl c). contradiction. reflexivity. apply H5.
rewrite G.transpose_edges. apply H0. apply H2. apply H1. apply H4. apply H3.
Qed.
(** Defining the second DFS pass **)
Module D2 := (D' O S G F).
(*The second pass uses DFSCustomOrder, not DFSBase (since the order depends on the graph, and Coq's modules
do not allow this. The vertices are ordered in reverse order of their finish times from the first pass. We
prove that this is a valid Graphordering*)
Program Instance reverseF (g: G.graph) : D2.G'.GraphOrdering (G.get_transpose g)
(fun v1 v2 => (D1.f_time None g v2) <? (D1.f_time None g v1)) := {
}.
Next Obligation.
rewrite Nat.ltb_lt in *. omega.
Defined.
Next Obligation.
intro. subst. rewrite Nat.ltb_lt in *. omega.
Defined.
Next Obligation.
repeat(rewrite Nat.ltb_lt). assert ((D1.f_time None g y < D1.f_time None g x) \/ (D1.f_time None g y > D1.f_time None g x)
\/ (D1.f_time None g y = D1.f_time None g x)) by omega. destruct H1; try(simplify). right. right.
eapply D1.f_times_unique. apply G.transpose_vertices. apply H.
apply G.transpose_vertices. apply H0. symmetry. apply H2.
Defined.
Section SecondPass.
Variable g' : G.graph.
Definition gt := G.get_transpose g'.
Definition lt := (fun v1 v2 => D1.f_time None g' v2 <? D1.f_time None g' v1).
(*When the root of a tree in the forest is discovered, it has the largest finish time of all remaining vertices*)
Lemma root_largest_finish_time: forall v (s: D2.state gt lt (reverseF g') None),
D2.time_of_state gt lt (reverseF g') None s = D2.d_time gt lt (reverseF g') None v ->
F.is_root (D2.dfs_forest gt lt (reverseF g') None) v = true ->
(forall (u : G.vertex), G.contains_vertex g' u = true -> D2.white gt lt (reverseF g') None s u = true ->
D1.f_time None g' v > D1.f_time None g' u).
Proof.
intros. apply G.transpose_vertices in H1. replace (G.get_transpose g') with gt in H1.
pose proof (D2.root_smallest gt ((fun v1 v2 : O.t =>
D1.f_time None g' v2 <? D1.f_time None g' v1)) (reverseF g') v s H H0 u H1 H2). simpl in H3.
rewrite Nat.ltb_lt in H3. apply H3. reflexivity.
Qed.
(*A few helper lemmas*)
Lemma get_tree_in_graph: forall g lt H o v t,
InA S.Equal t (F.get_trees (D2.dfs_forest g lt H o)) ->
S.In v t ->
G.contains_vertex g v = true.
Proof.
intros. eapply F.get_trees_root in H0. destruct H0. destruct_all. destruct (O.eq_dec v x).
unfold O.eq in e. subst. eapply D2.same_vertices. apply F.is_root_5. apply H0.
rewrite H3 in H1. apply F.desc_in_forest in H1. eapply D2.same_vertices. apply H1. auto.
Qed.
Lemma get_trees_partition_graph : forall g lt H o,
P.partition G.contains_vertex g (F.get_trees (D2.dfs_forest g lt H o)).
Proof.
intros. unfold P.partition. pose proof (F.get_trees_partition (D2.dfs_forest g lt0 H o)).
unfold F.P.partition in H0. destruct_all. split. intros. apply H0.
apply D2.same_vertices. apply H2. apply H1.
Qed.
(** Further results about discovery and finish times as they relate to trees **)
(*TODO: see about extending this/proving for the more general case *)
(*Given two DFS trees t1 and t2 with roots r1 and r2, r1 is discovered before r2 iff it
finishes before r2*)
Lemma root_times: forall g lt H o t1 t2 r1 r2,
InA S.Equal t1 (F.get_trees (D2.dfs_forest g lt H o)) ->
InA S.Equal t2 (F.get_trees (D2.dfs_forest g lt H o)) ->
S.In r1 t1 ->
S.In r2 t2 ->
F.is_root (D2.dfs_forest g lt H o) r1 = true ->
F.is_root (D2.dfs_forest g lt H o) r2 = true ->
D2.f_time g lt H o r1 < D2.f_time g lt H o r2 <-> D2.d_time g lt H o r1 < D2.d_time g lt H o r2.
Proof.
intros. assert (G.contains_vertex g r1 = true). eapply get_tree_in_graph. apply H0. assumption.
assert (G.contains_vertex g r2 = true). eapply get_tree_in_graph. apply H1. assumption.
destruct (O.eq_dec r1 r2). unfold O.eq in e. subst. omega.
assert (r1 <> r2) by auto. clear n. pose proof (D2.parentheses_theorem g lt0 H o r1 r2 H6 H7 H8).
destruct H9.
- assert (F.desc (D2.dfs_forest g lt0 H o) r1 r2). eapply D2.descendant_iff_interval; try(assumption); try(omega).
eapply F.root_no_desc in H5. exfalso. apply H5. apply H10. eapply D2.same_vertices. assumption.
- destruct H9.
+ assert (F.desc (D2.dfs_forest g lt0 H o) r2 r1). eapply D2.descendant_iff_interval; try(assumption); try(omega).
eapply F.root_no_desc in H4. exfalso. apply H4. apply H10. eapply D2.same_vertices. assumption.
+ omega.
Qed.
(*Given 2 DFS trees t1 and t2 and roots r1 and r2, if r1 finishes before r2, then r1 finishes before r2 starts*)
Lemma root_start_end: forall g lt H o t1 t2 r1 r2,
InA S.Equal t1 (F.get_trees (D2.dfs_forest g lt H o)) ->
InA S.Equal t2 (F.get_trees (D2.dfs_forest g lt H o)) ->
S.In r1 t1 ->
S.In r2 t2 ->
F.is_root (D2.dfs_forest g lt H o) r1 = true ->
F.is_root (D2.dfs_forest g lt H o) r2 = true ->
D2.f_time g lt H o r1 < D2.f_time g lt H o r2 ->
D2.f_time g lt H o r1 < D2.d_time g lt H o r2.
Proof.
intros. assert (G.contains_vertex g r1 = true). eapply get_tree_in_graph. apply H0. assumption.
assert (G.contains_vertex g r2 = true). eapply get_tree_in_graph. apply H1. assumption.
destruct (O.eq_dec r1 r2). unfold O.eq in e. subst. omega.
assert (r1 <> r2) by auto. clear n. pose proof (D2.parentheses_theorem g lt0 H o r1 r2 H7 H8 H9).
destruct H10.
- assert (F.desc (D2.dfs_forest g lt0 H o) r1 r2). eapply D2.descendant_iff_interval; try(assumption); try(omega).
eapply F.root_no_desc in H5. exfalso. apply H5. apply H11. eapply D2.same_vertices. assumption.
- destruct H10.
+ assert (F.desc (D2.dfs_forest g lt0 H o) r2 r1). eapply D2.descendant_iff_interval; try(assumption); try(omega).
eapply F.root_no_desc in H4. exfalso. apply H4. apply H11. eapply D2.same_vertices. assumption.
+ omega.
Qed.
(*Let t1 and t2 be 2 DFS trees with roots r1 and r2. Let u be in t1 and v be in t2. Then u finishes
before v is discovered*)
Lemma tree_times: forall g lt H o t1 t2 u v r1 r2,
InA S.Equal t1 (F.get_trees (D2.dfs_forest g lt H o)) ->
InA S.Equal t2 (F.get_trees (D2.dfs_forest g lt H o)) ->
S.In r1 t1 ->
S.In r2 t2 ->
F.is_root (D2.dfs_forest g lt H o) r1 = true ->
F.is_root (D2.dfs_forest g lt H o) r2 = true ->
D2.f_time g lt H o r1 < D2.f_time g lt H o r2 ->
S.In u t1 ->
S.In v t2 ->
D2.f_time g lt H o u < D2.d_time g lt H o v.
Proof.
intros. assert (G.contains_vertex g u = true).
eapply get_tree_in_graph. apply H0. apply H7.
assert (G.contains_vertex g v = true). eapply get_tree_in_graph. apply H1. apply H8.
assert (A :~S.In v t1 /\ ~S.In u t2). { split; intro;
pose proof (get_trees_partition_graph g lt0 H o); unfold P.partition in H12;
destruct_all; specialize (H13 t1 t2); destruct (S.equal t1 t2) eqn : ?.
- apply S.equal_2 in Heqb; rewrite <- Heqb in H3. assert (r1 = r2). { eapply F.tree_root_unique.
apply H0. apply H4. apply H5. apply H2. apply H3. } subst. omega.
- assert (P.disjoint t1 t2). apply H13. reflexivity. assumption. assumption. unfold P.disjoint in H14.
apply (H14 v). split; assumption.
- apply S.equal_2 in Heqb; rewrite <- Heqb in H3. assert (r1 = r2). { eapply F.tree_root_unique.
apply H0. apply H4. apply H5. apply H2. apply H3. } subst. omega.
- assert (P.disjoint t1 t2). apply H13. reflexivity. assumption. assumption. unfold P.disjoint in H14.
apply (H14 u). split; assumption. }
assert (u <> v). { intro. subst. destruct_all. contradiction. }
pose proof (D2.parentheses_theorem g lt0 H o u v H9 H10 H11). destruct H12.
- assert (F.desc (D2.dfs_forest g lt0 H o) u v). eapply D2.descendant_iff_interval. apply H9.
apply H10. omega. assert (F.desc (D2.dfs_forest g lt0 H o) r1 v). { assert (R:=H0).
apply F.get_trees_root in H0. destruct_all. assert (x = r1). eapply F.tree_root_unique.
apply R. all: try(assumption). subst. destruct (O.eq_dec u r1). unfold O.eq in e. subst. assumption.
eapply F.is_descendant_trans. apply (H19 u). auto. assumption. assumption. }
assert (S.In v t1). { assert (R:=H0).
apply F.get_trees_root in H0. destruct_all. assert (x = r1). eapply F.tree_root_unique.
apply R. assumption. assumption. assumption. assumption. subst. destruct (O.eq_dec v r1). unfold O.eq in e.
subst. assumption. apply H20. auto. assumption. } destruct_all; contradiction.
- destruct H12.
+ assert (F.desc (D2.dfs_forest g lt0 H o) v u). eapply D2.descendant_iff_interval. apply H10.
apply H9. omega. assert (F.desc (D2.dfs_forest g lt0 H o) r2 u). { assert (R:=H1).
apply F.get_trees_root in H1. destruct_all. assert (x = r2). eapply F.tree_root_unique.
apply R. all: try(assumption). subst. destruct (O.eq_dec v r2). unfold O.eq in e. subst. assumption.
eapply F.is_descendant_trans. apply (H19 v). auto. assumption. assumption. }
assert (S.In u t2). { assert (R:=H1).
apply F.get_trees_root in H1. destruct_all. assert (x = r2). eapply F.tree_root_unique.
apply R. assumption. assumption. assumption. assumption. subst. destruct (O.eq_dec u r2). unfold O.eq in e.
subst. assumption. apply H20. auto. assumption. } destruct_all; contradiction.
+ destruct H12.
* omega.
* assert (R1 := H0). assert (R2 := H1). eapply F.get_trees_root in H0. eapply F.get_trees_root in H1.
destruct_all. assert (x0 = r1). eapply F.tree_root_unique. apply R1. all: try(assumption). subst.
assert (x = r2). eapply F.tree_root_unique. apply R2. all: try(assumption). subst.
destruct (O.eq_dec u r1). unfold O.eq in e. subst. destruct (O.eq_dec v r2). unfold O.eq in e. subst.
omega. assert (F.desc (D2.dfs_forest g lt0 H o) r2 v). eapply H18. auto. assumption.
eapply D2.descendant_iff_interval in H21. assert (D2.d_time g lt0 H o r1 < D2.d_time g lt0 H o r2).
eapply root_times. apply R1. apply R2. all: try(assumption). omega. eapply get_tree_in_graph.
apply R2. assumption.
assert (F.desc (D2.dfs_forest g lt0 H o) r1 u). eapply H20. auto. assumption.
eapply D2.descendant_iff_interval in H21. assert (D2.d_time g lt0 H o r1 < D2.d_time g lt0 H o r2).
eapply root_times. apply R1. apply R2. all: try(assumption). destruct (O.eq_dec v r2). unfold O.eq in e.
subst. omega. assert (F.desc (D2.dfs_forest g lt0 H o) r2 v). eapply H18. auto. assumption.
eapply D2.descendant_iff_interval in H23. eapply root_start_end in H6. omega. apply R1. apply R2.
all: try(assumption). eapply get_tree_in_graph. apply R2. assumption.
eapply get_tree_in_graph. apply R1. assumption.
Qed.
(*Every path has a vertex along it that was discovered first*)
Lemma path_has_first_discovered: forall g u v l lt H,
Pa.path_list_rev g u v l = true ->
NoDup l -> ~In u l -> ~In v l ->
u <> v ->
exists x, (x = u \/ x = v \/ In x l) /\ (forall y, (y = u \/ y = v \/ In y l) ->y <> x ->
D2.d_time g lt H None x < D2.d_time g lt H None y).
Proof.
intros.
remember (min_elt_path u v (D2.d_time g lt0 H None) l) as y.
exists y. split. symmetry in Heqy. eapply min_elt_path_in in Heqy. simplify.
eapply min_elt_path_finds_min. intros.
pose proof (Pa.path_implies_in_graph g u v l H0).
eapply D2.d_times_unique. destruct H5. subst. simplify. destruct H5; subst; simplify.
destruct H6. subst. simplify. destruct H6; subst; simplify. apply H7.
all: symmetry in Heqy; assumption.
Qed.
(*This is a key result towards proving the correctness of Kosaraju's Algorithm: There are no paths
from an earlier DFS tree to a later tree*)
(*The proof turns out to be quite complicated.
Essentially, assume there is a path and let x be the element in the path that is first discovered.
If x is in t2 (the later tree), this contradicts the fact that u, the starting vertex, was in an
earlier tree. If x is in an earlier tree, then v (the end vertex) becomes a descendant of x by
the white path theorem, so v is in an earlier tree, which contradicts the fact that any two
DFS trees are djsoint.*)
Lemma no_path_to_later_tree: forall g lt H t1 t2 r1 r2 u v,
InA S.Equal t1 (F.get_trees (D2.dfs_forest g lt H None)) ->
InA S.Equal t2 (F.get_trees (D2.dfs_forest g lt H None)) ->
S.In r1 t1 ->
S.In r2 t2 ->
F.is_root (D2.dfs_forest g lt H None) r1 = true ->
F.is_root (D2.dfs_forest g lt H None) r2 = true ->
D2.f_time g lt H None r1 < D2.f_time g lt H None r2 ->
S.In u t1 ->
S.In v t2 ->
~Pa.path g u v.
Proof.
intros. intro. assert (D2.f_time g lt0 H None u < D2.d_time g lt0 H None v). eapply tree_times.
apply H0. apply H1. apply H2. apply H3. all: try assumption.
assert (G.contains_vertex g v = true). eapply get_tree_in_graph. apply H1. assumption.
assert (G.contains_vertex g u = true). eapply get_tree_in_graph. apply H0. assumption.
assert (A :~S.In v t1 /\ ~S.In u t2). { split; intro;
pose proof (get_trees_partition_graph g lt0 H None); unfold P.partition in H14;
destruct_all; specialize (H15 t1 t2); destruct (S.equal t1 t2) eqn : ?.
- apply S.equal_2 in Heqb; rewrite <- Heqb in H3. assert (r1 = r2). { eapply F.tree_root_unique.
apply H0. all: try assumption. } subst. omega.
- assert (P.disjoint t1 t2). apply H15. reflexivity. assumption. assumption. unfold P.disjoint in H16.
apply (H16 v). split; assumption.
- apply S.equal_2 in Heqb; rewrite <- Heqb in H3. assert (r1 = r2). { eapply F.tree_root_unique.
apply H0. all: try assumption. } subst. omega.
- assert (P.disjoint t1 t2). apply H15. reflexivity. assumption. assumption. unfold P.disjoint in H16.
apply (H16 u). split; assumption. } destruct A as [N1 N2].
assert (N: u <> v). intro. subst. contradiction.
rewrite Pa.path_path_list_rev in H9. destruct H9 as [l]. eapply Pa.path_no_dups in H9.
destruct H9 as [ld]. destruct_all. assert (A:= H9). apply (path_has_first_discovered _ _ _ _ _ H) in A.
destruct A as [fst]. destruct_all. assert (R1 := H0). assert (R2 := H1). eapply F.get_trees_root in H0.
destruct H0 as [x]. destruct H0 as [HR1 HI1]. destruct HI1 as [HI1 HD1]. assert (x = r1). eapply F.tree_root_unique.
apply R1. all: try assumption. subst. eapply F.get_trees_root in H1. destruct H1 as [x]. destruct H0 as [HR2 HI2].
destruct HI2 as [HI2 HD2]. assert (x = r2). eapply F.tree_root_unique. apply R2. all: try assumption. subst.
clear HR1. clear HR2. clear HI1. clear HI2.
destruct H17. subst.
- assert (F.desc (D2.dfs_forest g lt0 H None) u v). { eapply D2.white_path_theorem. apply H12. intros.
exists ld. rewrite D2.P.path_list_ind_rev. split. apply H9. split.
+ intros. rewrite D2.white_def. rewrite H0. rewrite Nat.ltb_lt. apply H18. simplify. intro. subst. contradiction.
+ rewrite D2.white_def. rewrite H0. rewrite Nat.ltb_lt. apply H18. simplify. intro. subst. contradiction. }
assert (F.desc (D2.dfs_forest g lt0 H None) r1 v). destruct (O.eq_dec r1 u). unfold O.eq in e. subst.
assumption. eapply F.is_descendant_trans. apply (HD1 u). auto. assumption. assumption.
rewrite <- HD1 in H1. contradiction. intro. subst. contradiction.
- destruct H0.
+ subst. specialize (H18 u). assert (D2.d_time g lt0 H None v < D2.d_time g lt0 H None u).
apply H18. left. reflexivity. apply N.
assert (D2.d_time g lt0 H None u < D2.f_time g lt0 H None u).
pose proof (D2.parentheses_theorem g lt0 H None u v H12 H11 N). omega. omega.
+ destruct (P2.In_dec fst t2).
* specialize (H18 u). assert (D2.d_time g lt0 H None fst < D2.d_time g lt0 H None u).
apply H18. left. reflexivity. intro. subst. contradiction.
assert (D2.f_time g lt0 H None u < D2.d_time g lt0 H None fst). eapply tree_times.
apply R1. apply R2. apply H2. apply H3. all: try assumption.
assert (D2.d_time g lt0 H None u < D2.f_time g lt0 H None u).
pose proof (D2.parentheses_theorem g lt0 H None u v H12 H11 N). omega. omega.
* assert (G.contains_vertex g fst = true). eapply Pa.path_implies_in_graph.
apply H9. apply H0. pose proof (get_trees_partition_graph g lt0 H None).
unfold P.partition in H17. destruct H17. specialize (H17 _ H1). destruct H17 as [t3].
destruct H17 as [R3 H17]. assert (A:= R3). eapply F.get_trees_root in A. destruct A as [r3].
destruct H20 as [HR3 HI3]. destruct HI3 as [HI3 HD3].
assert (D2.f_time g lt0 H None r3 > D2.f_time g lt0 H None r2 \/
D2.f_time g lt0 H None r3 = D2.f_time g lt0 H None r2 \/
D2.f_time g lt0 H None r3 < D2.f_time g lt0 H None r2) by omega.
destruct H20.
-- assert (D2.f_time g lt0 H None r2 < D2.d_time g lt0 H None r3). eapply root_start_end.
apply R2. apply R3. all: try assumption.
destruct (O.eq_dec v r2). unfold O.eq in e. subst. destruct (O.eq_dec fst r3). unfold O.eq in e.
subst. specialize (H18 r2). assert (D2.d_time g lt0 H None r3 < D2.d_time g lt0 H None r2).
apply H18. simplify. intro. subst. contradiction.
assert (D2.d_time g lt0 H None r2 < D2.f_time g lt0 H None r2). assert (r2 <> u) by auto.
pose proof (D2.parentheses_theorem g lt0 H None r2 u H11 H12 H23). omega. omega.
assert (F.desc (D2.dfs_forest g lt0 H None) r3 fst). apply HD3. auto. assumption.
apply D2.descendant_iff_interval in H22. specialize (H18 r2).
assert (D2.d_time g lt0 H None fst < D2.d_time g lt0 H None r2). apply H18. simplify.
intro. subst. contradiction.
assert (D2.d_time g lt0 H None r2 < D2.f_time g lt0 H None r2). assert (r2 <> u) by auto.
pose proof (D2.parentheses_theorem g lt0 H None r2 u H11 H12 H24). omega. omega.
eapply get_tree_in_graph. apply R3. assumption. assumption.
assert (F.desc (D2.dfs_forest g lt0 H None) r2 v). apply HD2. auto. assumption.
eapply D2.descendant_iff_interval in H22. destruct (O.eq_dec fst r3). unfold O.eq in e. subst.
specialize (H18 v). assert (D2.d_time g lt0 H None r3 < D2.d_time g lt0 H None v). apply H18.
simplify. intro. subst. contradiction. omega.
assert (F.desc (D2.dfs_forest g lt0 H None) r3 fst). apply HD3. auto. assumption.
apply D2.descendant_iff_interval in H23. specialize (H18 v).
assert (D2.d_time g lt0 H None fst < D2.d_time g lt0 H None v). apply H18. simplify.
intro. subst. contradiction. omega. eapply get_tree_in_graph. apply R3. assumption.
assumption. eapply get_tree_in_graph. apply R2. assumption. assumption.
-- destruct H20.
++ assert (r3 = r2). eapply D2.f_times_unique. eapply get_tree_in_graph. apply R3. assumption.
eapply get_tree_in_graph. apply R2. assumption. apply H20. subst.
destruct (S.equal t2 t3) eqn : ?. apply S.equal_2 in Heqb. rewrite Heqb in n. contradiction.
apply H19 in Heqb. unfold P.disjoint in Heqb. apply (Heqb r2). split; assumption.
assumption. assumption.
++ assert (A:= H0). eapply in_split_app_fst in A. destruct A as [l1]. destruct H21 as [l2].
destruct H21; subst. apply Pa.path_app in H9. destruct H9 as [HP1 HP2].
assert (F.desc (D2.dfs_forest g lt0 H None) fst v). eapply D2.white_path_theorem.
assumption. intros. exists l1. rewrite D2.P.path_list_ind_rev. split.
assumption. split. intros. rewrite D2.white_def. rewrite H9. rewrite Nat.ltb_lt.
apply H18. simplify. intro. subst. apply (H22 fst). apply H21. reflexivity.
rewrite D2.white_def. rewrite H9. rewrite Nat.ltb_lt. apply H18. simplify.
intro. subst. contradiction. destruct (O.eq_dec fst r3). unfold O.eq in e. subst.
rewrite <- HD3 in H9. destruct (S.equal t2 t3) eqn : ?. apply S.equal_2 in Heqb.
rewrite Heqb in n. contradiction. apply H19 in Heqb. unfold P.disjoint in Heqb.
apply (Heqb v). split; assumption. assumption. assumption. intro. subst. contradiction.
assert (F.desc (D2.dfs_forest g lt0 H None) r3 v). eapply F.is_descendant_trans. apply (HD3 fst).
auto. assumption. assumption. rewrite <- HD3 in H21. destruct (S.equal t2 t3) eqn : ?.
apply S.equal_2 in Heqb. rewrite Heqb in n. contradiction. apply H19 in Heqb.
unfold P.disjoint in Heqb. apply (Heqb v). split; assumption. assumption. assumption.
intro. subst. apply F.desc_neq in H21. contradiction. apply O.eq_dec.
Qed.
(*If vertex u is in DFS tree t and in scc C, then C is a subset of t*)
Lemma scc_subset: forall t u c g lt Ho,
InA S.Equal t (F.get_trees (D2.dfs_forest g lt Ho None)) ->
S.In u t ->
S.In u c ->
scc c g ->
(forall y, S.In y c -> S.In y t).
Proof.
intros. pose proof (get_trees_partition_graph g lt0 Ho None). pose proof (scc_partition_2 _ _ _ _ _ H2 H4 H H1 H0).
destruct H5. apply H5. apply H3. destruct H5 as [a]. destruct H5 as [b]. destruct H5 as [t'].
destruct_all. assert (P1 := H). assert (P2 := H6). apply F.get_trees_root in H. destruct H as [r1].
apply F.get_trees_root in H6. destruct H6 as [r2]. destruct_all.
assert (D2.f_time g lt0 Ho None r1 < D2.f_time g lt0 Ho None r2 \/
D2.f_time g lt0 Ho None r1 = D2.f_time g lt0 Ho None r2 \/
D2.f_time g lt0 Ho None r1 > D2.f_time g lt0 Ho None r2) by omega.
assert (S := H2). unfold scc in H2. destruct H2. unfold strongly_connected in H2. destruct_all.
assert (a <> b). { intro. subst. unfold P.partition in H4. destruct_all. apply H20 in H5.
unfold P.disjoint in H5. apply (H5 b). split; assumption. all: try assumption. }
destruct H16.
- assert (Pa.path g a b). apply H19. all: try assumption. exfalso. eapply no_path_to_later_tree.
apply P1. apply P2. apply H14. apply H12. assumption. assumption. assumption. apply H9.
apply H10. apply H21.
- destruct H16.
+ assert (r1 = r2). eapply D2.f_times_unique. eapply get_tree_in_graph. apply P1. assumption.
eapply get_tree_in_graph. apply P2. assumption. apply H16. subst.
unfold P.partition in H4. destruct H4. apply H21 in H5. unfold P.disjoint in H5.
specialize (H5 r2). exfalso. apply H5. split; assumption. all: try assumption.
+ exfalso. eapply no_path_to_later_tree. apply P2. apply P1. apply H12. apply H14.
assumption. assumption. omega. apply H10. apply H9. constructor. apply H11.
Qed.
Lemma desc_path: forall u v l,
u <> v ->
F.desc_list (D2.dfs_forest gt lt (reverseF g') None) u v l = true ->
Pa.path_list_rev gt u v l = true.
Proof.
intros. generalize dependent v. induction l; intros; simpl in *.
- eapply D2.same_edges. apply H0.
- simplify. eapply D2.same_edges. apply H1. apply IHl. intros. eapply F.desc_neq.
rewrite <- F.desc_list_iff_desc. exists l. apply H2. auto. apply H2.
Qed.
(*Every tree in the resulting DFS forest is strongly connected*)
Lemma all_trees_strongly_connected: forall t,
InA S.Equal t (F.get_trees (D2.dfs_forest gt lt (reverseF g') None)) ->
strongly_connected t gt.
Proof.
intros. destruct (strongly_connected_dec t gt). apply s.
assert (A:= H). apply F.get_trees_root in H. destruct_all.
pose proof (vertex_in_scc gt x). assert (G.contains_vertex gt x = true).
unfold gt in A. unfold lt in A. eapply get_tree_in_graph in A. apply A.
apply H0. specialize (H2 H3). destruct H2 as [C]. destruct_all.
assert (forall x0 : S.elt,
S.In x0 t ->
x0 <> x ->
exists l : list G.vertex, Pa.path_list_rev gt x x0 l = true /\ (forall y : G.vertex, In y l -> S.In y t)). {
intros. apply H1 in H5. assert (x <> x0) by auto. rewrite <- F.desc_list_iff_desc in H5.
destruct H5 as [l']. assert (B:= H5). eapply (desc_path _ _ _ H7) in H5. exists l'. simplify.
destruct (O.eq_dec y x). unfold O.eq in e. subst. assumption. apply H1. auto.
eapply F.desc_list_all_desc. apply B. apply H8. auto. }
assert ((forall x : S.elt, S.In x t -> G.contains_vertex gt x = true)). { intros.
eapply get_tree_in_graph. apply A. apply H6. }
pose proof (scc_vertex x t C gt H5 H6 n H2 H4 H0). clear H5.
destruct H7 as [v]. destruct H5 as [C']. destruct_all.
pose proof (find_max_scc_exists (D1.f_time None g') gt C H2).
pose proof (find_max_scc_exists (D1.f_time None g') gt C' H8).
destruct H12 as [fC]. destruct H13 as [fC'].
assert (forall y, S.In y C -> S.In y t). { eapply scc_subset. apply A. apply H0. apply H4.
apply H2. }
assert (fC = x). { destruct (O.eq_dec fC x). apply e.
assert (F.desc (D2.dfs_forest gt lt (reverseF g') None) x fC). apply H1. auto.
apply H14. eapply max_elt_set_in_set. apply H12.
eapply D2.descendant_iff_interval in H15. assert (B:= H12).
eapply max_elt_set_finds_max in H12. assert (D1.f_time None g' x < D1.f_time None g' fC) by apply H12.
clear H16. pose proof (D2.discovery_exists gt lt (reverseF g') None x H3).
destruct H16 as [sx]. pose proof (root_largest_finish_time x sx H16 H fC).
assert (D1.f_time None g' x > D1.f_time None g' fC). apply H17. rewrite G.transpose_vertices.
apply H6. apply H14. eapply max_elt_set_in_set. apply B. rewrite D2.white_def. rewrite H16.
rewrite Nat.ltb_lt. omega. omega. intros. eapply D1.f_times_unique. rewrite G.transpose_vertices.
unfold scc in H2. destruct H2. unfold strongly_connected in s. apply s. apply H16.
rewrite G.transpose_vertices.
unfold scc in H2. destruct H2. unfold strongly_connected in H2. apply H2. apply H17. apply H18.
assumption. auto. assumption. unfold scc in H2. destruct H2. unfold strongly_connected in H2. apply H2.
eapply max_elt_set_in_set in H12. apply H12. } subst.
assert (F.desc (D2.dfs_forest gt lt (reverseF g') None) x fC'). apply H1. intro. subst.
eapply neq_scc_disjoint in H9. apply H9. split. apply max_elt_set_in_set in H12. apply H12.
apply max_elt_set_in_set in H13. apply H13. apply H2. apply H8. eapply scc_subset.
apply A. apply H5. apply H7. apply H8. eapply max_elt_set_in_set. apply H13.
eapply D2.descendant_iff_interval in H15. assert (scc C g'). rewrite scc_transpose.
assumption. assert (scc C' g'). rewrite scc_transpose. assumption. pose proof (scc_finish_time_transpose
g' C C' x0 v H16 H17 x fC' H9 H11 H10 H7). unfold f_time_scc in H18. specialize (H18 H12 H13).
pose proof (D2.discovery_exists gt lt (reverseF g') None x H3). destruct H19 as [sx].
assert (D1.f_time None g' x > D1.f_time None g' fC'). eapply root_largest_finish_time.
apply H19. assumption. rewrite G.transpose_vertices. apply H6. eapply scc_subset.
apply A. apply H5. apply H7. assumption. apply max_elt_set_in_set in H13. assumption.
rewrite D2.white_def. rewrite H19. rewrite Nat.ltb_lt. omega. omega.
assumption. apply H6. eapply scc_subset.
apply A. apply H5. apply H7. assumption. apply max_elt_set_in_set in H13. assumption.
Qed.
(*Finally, the proof that the algorithm is correct: Every tree in the DFS forest of the second pass
of DFS is a SCC*)
Theorem scc_algorithm_correct: forall t,
InA S.Equal t (F.get_trees (D2.dfs_forest gt lt (reverseF g') None)) ->
scc t g'.
Proof.
intros. rewrite scc_transpose. assert (A:= H). apply all_trees_strongly_connected in H.
destruct (scc_dec t gt). apply s. pose proof (non_scc_has_another t gt H n).
destruct H0 as [v]. destruct H0. assert (R:= A). apply F.get_trees_root in A. destruct A as [r1].
destruct_all. pose proof (get_trees_partition_graph gt lt (reverseF g') None). unfold P.partition in H5.
destruct H5. assert (G.contains_vertex gt v = true). { unfold strongly_connected in H1. apply H1.
apply S.add_1. reflexivity. } specialize (H5 _ H7). destruct H5 as [t2]. destruct H5.
assert (R2 := H5). apply F.get_trees_root in H5. destruct H5 as [r2]. destruct_all.
unfold strongly_connected in H1. destruct_all.
assert (r1 <> v). intro. subst. pose proof (get_trees_partition_graph gt lt (reverseF g') None).
unfold P.partition in H13. destruct H13. destruct (S.equal t t2) eqn : ?. apply S.equal_2 in Heqb.
rewrite Heqb in H0. contradiction. apply H14 in Heqb. unfold P.disjoint in Heqb.
apply (Heqb v). split; assumption. assumption. assumption.
assert (Pa.path gt r1 v). apply H12. apply S.add_2. assumption. apply S.add_1. reflexivity. apply H13.
assert (Pa.path gt v r1). apply H12. apply S.add_1. reflexivity. apply S.add_2. assumption. auto.
assert (D2.f_time gt lt (reverseF g') None r1 < D2.f_time gt lt (reverseF g') None r2 \/
D2.f_time gt lt (reverseF g') None r1 = D2.f_time gt lt (reverseF g') None r2 \/
D2.f_time gt lt (reverseF g') None r1 > D2.f_time gt lt (reverseF g') None r2) by omega.
destruct H16.
- exfalso. eapply no_path_to_later_tree. apply R. apply R2. apply H3. apply H9. assumption.
assumption. assumption. apply H3. apply H8. apply H14.
- destruct H16.
+ assert (r1 = r2). eapply D2.f_times_unique. eapply get_tree_in_graph. apply R.
assumption. eapply get_tree_in_graph. apply R2. assumption. apply H16. subst.
pose proof (get_trees_partition_graph gt lt (reverseF g') None).
unfold P.partition in H17. destruct H17. destruct (S.equal t t2) eqn : ?. apply S.equal_2 in Heqb.
rewrite Heqb in H0. contradiction. apply H18 in Heqb. unfold P.disjoint in Heqb.
exfalso. apply (Heqb r2). split; assumption. all: try assumption.
+ exfalso. eapply no_path_to_later_tree. apply R2. apply R. apply H9. apply H3. assumption.
assumption. assumption. apply H8. apply H3. apply H15.
Qed.
End SecondPass.
End SCCAlg.
|
Require
MathClasses.orders.naturals MathClasses.implementations.peano_naturals.
Require Import
Coq.setoid_ring.Ring MathClasses.interfaces.abstract_algebra MathClasses.interfaces.naturals MathClasses.interfaces.orders MathClasses.interfaces.additional_operations.
Section contents.
Context `{Naturals N}.
Add Ring N : (rings.stdlib_semiring_theory N).
(* NatDistance instances are all equivalent, because their behavior is fully
determined by the specification. *)
Lemma nat_distance_unique_respectful {a b : NatDistance N} :
((=) ==> (=) ==> (=))%signature (nat_distance (nd:=a)) (nat_distance (nd:= b)).
Proof.
intros x1 y1 E x2 y2 F.
unfold nat_distance, nat_distance_sig.
destruct a as [[z1 A]|[z1 A]], b as [[z2 B]|[z2 B]]; simpl.
apply (left_cancellation (+) x1). now rewrite A, E, B.
destruct (naturals.zero_sum z1 z2).
apply (left_cancellation (+) x1).
rewrite associativity, A, F, B, E. ring.
transitivity 0; intuition.
destruct (naturals.zero_sum z1 z2).
rewrite commutativity.
apply (left_cancellation (+) y1).
rewrite associativity, B, <-F, A, E. ring.
transitivity 0; intuition.
apply (left_cancellation (+) x2).
now rewrite A, E, F, B.
Qed.
Lemma nat_distance_unique {a b: NatDistance N} {x y : N} : nat_distance (nd:=a) x y = nat_distance (nd:=b) x y.
Proof. now apply nat_distance_unique_respectful. Qed.
Context `{!NatDistance N}.
Global Instance nat_distance_proper : Proper ((=) ==> (=) ==> (=)) nat_distance.
Proof. apply nat_distance_unique_respectful. Qed.
End contents.
(* An existing instance of [CutMinus] allows to create an instance of [NatDistance] *)
Program Instance natdistance_cut_minus `{Naturals N} `{Apart N} `{!TrivialApart N} `{!FullPseudoSemiRingOrder Nle Nlt}
`{!CutMinusSpec N cm} `{β x y, Decision (x β€ y)} : NatDistance N :=
Ξ» x y, if decide_rel (β€) x y then inl (y βΈ x) else inr (x βΈ y).
Next Obligation. rewrite commutativity. now apply cut_minus_le. Qed.
Next Obligation. rewrite commutativity. now apply cut_minus_le, orders.le_flip. Qed.
(* Using the preceding instance we can make an instance for arbitrary models of the naturals
by translation into [nat] on which we already have a [CutMinus] instance. *)
Global Program Instance natdistance_default `{Naturals N} : NatDistance N | 10 := Ξ» x y,
match nat_distance_sig (naturals_to_semiring N nat x) (naturals_to_semiring N nat y) with
| inl (nβΎE) => inl (naturals_to_semiring nat N n)
| inr (nβΎE) => inr (naturals_to_semiring nat N n)
end.
Next Obligation.
rewrite <-(naturals.to_semiring_involutive N nat y), <-E.
now rewrite rings.preserves_plus, (naturals.to_semiring_involutive _ _).
Qed.
Next Obligation.
rewrite <-(naturals.to_semiring_involutive N nat x), <-E.
now rewrite rings.preserves_plus, (naturals.to_semiring_involutive _ _).
Qed.
|
The distance between a set $T$ and the closure of a set $S$ is the same as the distance between $T$ and $S$. |
#' Upload (replicate) a local database to a remote database server,
#' e.g., Cloudant, Iriscouch
#'
#' @export
#' @template return
#' @param from Couch to replicate from. An object of class \code{Cushion}.
#' Required.
#' @param to Remote couch to replicate to. An object of class \code{Cushion}.
#' Required.
#' @param dbname (character) Database name. Required.
#' @param createdb If \code{TRUE}, the function creates the db on the remote
#' server before uploading. The db has to exist before uploading, so either
#' you do it separately or this fxn can do it for you. Default: \code{FALSE}
#' @param as (character) One of list (default) or json
#' @param ... Curl args passed on to \code{\link[httr]{GET}}
#' @examples \dontrun{
#' ## create a connection
#' (x <- Cushion$new())
#'
#' # Create a database locally
#' db_list(x)
#' if ("hello_earth" %in% db_list(x)) {
#' invisible(db_delete(x, dbname="hello_earth"))
#' }
#' db_create(x, 'hello_earth')
#'
#' ## replicate to a remote server
#' z <- Cushion$new(host = "ropensci.cloudant.com", transport = 'https',
#' port = NULL, user = 'ropensci', pwd = Sys.getenv('CLOUDANT_PWD'))
#'
#' ## do the replication
#' db_replicate(x, z, dbname = "hello_earth", createdb = TRUE)
#'
#' ## check changes on the remote
#' db_list(z)
#' changes(z, dbname = "hello_earth")
#'
#' ## make some changes on the remote
#' doc_create(z, dbname = "hello_earth",
#' '{"language":"python","library":"requests"}', 'stuff')
#' changes(z, dbname = "hello_earth")
#'
#' ## create another document, and try to get it
#' doc_create(z, dbname = "hello_earth", doc = '{"language":"R"}',
#' docid="R_rules")
#' doc_get(z, dbname = "hello_earth", docid='R_rules')
#'
#' ## cleanup - delete the database
#' db_delete(z, 'hello_earth')
#' }
db_replicate <- function(from, to, dbname, createdb = FALSE, as = 'list', ...) {
check_cushion(to)
check_cushion(from)
if (createdb) db_create(to, dbname)
fromurl <- file.path(from$make_url(), '_replicate')
tourl <- file.path(to$make_url(), dbname)
args <- list(source = unbox(dbname), target = unbox(cloudant_url(to, dbname)))
message("Uploading ...")
sofa_POST(fromurl, as, content_type_json(), body = args,
encode = "json", from$get_headers(), ...)
}
|
function [x] = denormdata(xn,xmean,xstd)
%DENORMDATA De-normalize normalized data
%
% Description
% X = DENORMDATA(XN,XMEAN,XSTD) de-normalize XN using
% precomputed XMEAN and XSTD.
%
% See also NORMDATA
%
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin<3
error('Too few arguments')
end
x=bsxfun(@plus,bsxfun(@times,xn,xstd),xmean);
|
On August 3 , an extratropical cyclone developed into a tropical depression off the west coast of Florida . Initially a weak disturbance , it moved generally westward , slowly gaining in intensity . Early on August 4 , the depression attained tropical storm intensity . Ships in the vicinity of the storm reported a much stronger tropical cyclone than initially suggested . After reaching hurricane strength on August 5 south of the Mississippi River Delta , the storm strengthened further into a modern @-@ day Category 2 hurricane , with maximum sustained winds of 100 mph and a minimum barometric pressure of 972 mbar ( hPa ; 28 @.@ 71 inHg ) at 0600 UTC on August 7 . The hurricane moved ashore near Sabine Pass , Texas later that day at peak strength . Once inland , the storm executed a sharp curve to the north and quickly weakened , degenerating into a tropical storm on August 8 before dissipating over Arkansas on August 10 .
|
module Toolkit.Data.DList.Elem
import Toolkit.Data.DList
import public Toolkit.Decidable.Equality.Indexed
%default total
||| Proof that some element is found in a `DList`.
|||
||| @iTy The type of the element's index.
||| @elemTy The type of the list element.
||| @x An element in the list.
||| @xs The list itself.
||| @prf Proof that the element's index is in the list in the same position as the element itself.
public export
data Elem : (iTy : Type)
-> (elemTy : iTy -> Type)
-> forall i, is
. (x : elemTy i)
-> (xs : DList iTy elemTy is)
-> Type
where
||| Proof that the element is at the front of the list.
H : (Equals ity elemTy x y) -> Elem ity elemTy x (y :: xs)
||| Proof that the element is found later in the list.
T : (later : Elem iTy elemTy x xs)
-> Elem iTy elemTy x (x' ::xs)
listEmpty : Elem type e thing Nil -> Void
listEmpty (H x) impossible
listEmpty (T later) impossible
notInLater : (contraE : Equals type e x y -> Void)
-> (contraR : Elem type e x xs -> Void)
-> (prf : Elem type e x (y::xs))
-> Void
notInLater contraE contraR (H z) = contraE z
notInLater contraE contraR (T later) = contraR later
export
isElem : {type : Type}
-> {e : type -> Type}
-> DecEq type
=> DecEqIdx type e
=> {a : type}
-> {as : List type}
-> (thing : e a)
-> (things : DList type e as)
-> Dec (Elem type e thing things)
isElem thing [] = No listEmpty
isElem thing (elem :: rest) with (Index.decEq thing elem)
isElem thing (thing :: rest) | (Yes (Same Refl Refl)) = Yes (H (Same Refl Refl))
isElem thing (elem :: rest) | (No contra) with (isElem thing rest)
isElem thing (elem :: rest) | (No contra) | (Yes prf) = Yes (T prf)
isElem thing (elem :: rest) | (No contra) | (No f) = No (notInLater contra f)
-- [ EOF ]
|
'''
Use NN with LSTM to generate random text using provided text file. The model
is called char-level language model that tries to predict next character given
all previous characters.
'''
import os
import sys
import argparse
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras import optimizers
def read_file(file_name):
'''Read the text file in memory.'''
with open(file_name, 'r') as f:
text = f.read()
return text
def process_text(text, args):
'''Process text.'''
sentences = []
next_chars = []
# Iterate over text to get sentences and next_chars based on the size of
# the chunk of text the make up a sentence
for i in range(0, args.max_length, args.step):
sentences.append(text[i:i + args.max_length])
next_chars.append(text[i + args.max_length])
# Get vocabulary of characters
chars = sorted(list(set(text)))
char_idxs = {char: chars.index(char) for char in chars}
# Prepare nd-arrays of X and Y
X = np.zeros((len(sentences), args.max_length, len(chars)))
y = np.zeros((len(sentences), len(chars)))
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_idxs[char]] = 1
y[i, char_idxs[next_chars[i]]] = 1
return X, y, chars, char_idxs
def sample(preds, temperature):
'''
Reweight probability distribution based on temperature. Higher temperature
translates to more randomness.
'''
preds = np.asarray(preds, dtype='float64')
preds = np.log(preds) / temperature
# Normalize to get probs
probs = np.exp(preds) / np.sum(np.exp(preds))
# Get random index
out = np.random.multinomial(1, probs, 1)
return np.argmax(out)
def build_model(args, num_classes):
'''Build char-level language model.'''
# Build and compile the model
model = Sequential()
model.add(LSTM(128, input_shape=(args.max_length, num_classes)))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer=optimizers.RMSprop(lr=1e-1),
loss='categorical_crossentropy')
return model
def train(model, X_train, y_train, text, char_idxs, chars, args):
print('Started training ...')
for epoch in range(args.epochs):
print(f'{30 * "-":>50s} epoch : {epoch + 1} {30 * "-"}')
model.fit(X_train,
y_train,
batch_size=args.batch_size,
epochs=1,
verbose=args.verbose)
# Generate random text
start_idx = np.random.randint(0, len(text) - args.max_length - 1)
generated_text = text[start_idx:start_idx + args.max_length]
# Print out out generated text
sys.stdout.write(generated_text)
for _ in range(400):
# Prepare the data to feed it to the model to get predictions
sampled = np.zeros((1, args.max_length, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_idxs[char]] = 1
# Get predictions
preds = model.predict(sampled, verbose=0)[0]
next_char_idx = sample(preds, args.temperature)
next_char = chars[next_char_idx]
generated_text = generated_text[1:]
generated_text += next_char
# Print out new chars
sys.stdout.write(next_char)
print()
print('Done training.')
return model
def main():
parser = argparse.ArgumentParser(
description='Training char-level language model using a sample text')
parser.add_argument('--file_path', type=str, metavar='',
help='Path oftext file that will be used in training')
parser.add_argument('--max_length', type=int, default=50, metavar='',
help='Length of sentence used for each sample')
parser.add_argument('--step', type=int, default=3, metavar='',
help='Sliding step used in constructing sentences from'
'raw text')
parser.add_argument('--temperature', type=int, default=1, metavar='',
help='Temperature determines the level of randomness'
'in generating new text')
parser.add_argument('--batch_size', type=int, default=128, metavar='',
help='Batch size to be used in training')
parser.add_argument('--epochs', type=int, default=20, metavar='',
help='Number of full training cycles')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbosity mode')
args = parser.parse_args()
# Read and process text file
text = read_file(args.file_path)
X_train, y_train, chars, char_idxs = process_text(text, args)
# Build and train the model
model = build_model(args, len(chars))
model = train(model, X_train, y_train, text, char_idxs, chars, args)
# Save the model on disk
model.save('models/text-generation.h5')
if __name__ == '__main__':
main()
|
Born and raised in Louisiana, trading option since 2015, Bachelor of Science in Finance, clear, concise, and straight trades.
Keep It Simple Stupid. Trading is hard enough without over-complicating things. Have a Plan, Trade Your Plan, Review Your Plan. |
Here is your chance to own in the Prairie Creek Condominium development! 2108 S. Second Street in Morton is being offered at $109,900 and is a sparkling, move-in ready 2 bedroom, 1 1/2 bath condo!
10847 N. Glenfield Drive, Dunlap β Trail View Estates!
SOLD!! Pending in 2 days! Golf course view for this charming 3 bedroom, 1 bath ranch located just off Detweiller Drive.
Updated: 26th April, 2019 6:47 AM. |
#include <gsl/gsl_matrix.h>
#include "BandEnergy.h"
int main(int argc, char *argv[]) {
HTightBinding *Hrs = ExtractHTightBinding("test_data/Fe_soc/Fe_soc_hr.dat");
gsl_matrix *R = gsl_matrix_calloc(3, 3); // R -> all zeros
// Overall scale for R doesn't matter.
gsl_matrix_set(R, 0, 1, 1.0);
gsl_matrix_set(R, 0, 2, 1.0);
gsl_matrix_set(R, 1, 0, 1.0);
gsl_matrix_set(R, 1, 2, 1.0);
gsl_matrix_set(R, 2, 0, 1.0);
gsl_matrix_set(R, 2, 1, 1.0);
double num_electrons = 8.0;
int na = 8;
int nb = 8;
int nc = 8;
bool use_cache = true;
double E_Fermi = 0.0;
double energy = BandEnergy(&E_Fermi, Hrs, R, num_electrons, na, nb, nc, use_cache);
printf("energy = %f\n", energy);
printf("E_Fermi = %f\n", E_Fermi);
return 0;
}
|
function L = dist2poly ( p, edgexy, lim )
%*****************************************************************************80
%
% Find the minimum distance from the points in P to the polygon defined by
% the edges in EDGEXY. LIM is an optional argument that defines an upper
% bound on the distance for each point.
% Uses (something like?) a double sweep-line approach to reduce the number
% of edges that are required to be tested in order to determine the closest
% edge for each point. On average only size(EDGEXY)/4 comparisons need to
% be made for each point.
%
% Author:
%
% Darren Engwirda
%
if nargin<3
lim = [];
end
np = size(p,1);
ne = size(edgexy,1);
if isempty(lim)
lim = inf*ones(np,1);
end
% Choose the direction with the biggest range as the "y-coordinate" for the
% test. This should ensure that the sorting is done along the best
% direction for long and skinny problems wrt either the x or y axes.
dxy = max(p)-min(p);
if dxy(1)>dxy(2)
% Flip co-ords if x range is bigger
p = p(:,[2,1]);
edgexy = edgexy(:,[2,1,4,3]);
end
% Ensure edgexy(:,[1,2]) contains the lower y value
swap = edgexy(:,4)<edgexy(:,2);
edgexy(swap,:) = edgexy(swap,[3,4,1,2]);
% Sort edges
[i,i] = sort(edgexy(:,2)); % Sort edges by lower y value
edgexy_lower = edgexy(i,:);
[i,i] = sort(edgexy(:,4)); % Sort edges by upper y value
edgexy_upper = edgexy(i,:);
% Mean edge y value
ymean = 0.5*( sum(sum(edgexy(:,[2,4]))) )/ne;
% Alloc output
L = zeros(np,1);
% Loop through points
tol = 1000.0*eps*max(dxy);
for k = 1:np
x = p(k,1);
y = p(k,2);
d = lim(k);
if y<ymean
% Loop through edges bottom up
for j = 1:ne
y2 = edgexy_lower(j,4);
if y2>=(y-d)
y1 = edgexy_lower(j,2);
if y1<=(y+d)
x1 = edgexy_lower(j,1);
x2 = edgexy_lower(j,3);
if x1<x2
xmin = x1;
xmax = x2;
else
xmin = x2;
xmax = x1;
end
if xmin<=(x+d) && xmax>=(x-d)
% Calculate the distance along the normal projection from [x,y] to the jth edge
x2mx1 = x2-x1;
y2my1 = y2-y1;
r = ((x-x1)*x2mx1+(y-y1)*y2my1)/(x2mx1^2+y2my1^2);
if r>1.0 % Limit to wall endpoints
r = 1.0;
elseif r<0.0
r = 0.0;
end
dj = (x1+r*x2mx1-x)^2+(y1+r*y2my1-y)^2;
if (dj<d^2) && (dj>tol)
d = sqrt(dj);
end
end
else
break
end
end
end
else
% Loop through edges top down
for j = ne:-1:1
y1 = edgexy_upper(j,2);
if y1<=(y+d)
y2 = edgexy_upper(j,4);
if y2>=(y-d)
x1 = edgexy_upper(j,1);
x2 = edgexy_upper(j,3);
if x1<x2
xmin = x1;
xmax = x2;
else
xmin = x2;
xmax = x1;
end
if xmin<=(x+d) && xmax>=(x-d)
% Calculate the distance along the normal projection from [x,y] to the jth edge
x2mx1 = x2-x1;
y2my1 = y2-y1;
r = ((x-x1)*x2mx1+(y-y1)*y2my1)/(x2mx1^2+y2my1^2);
if r>1.0 % Limit to wall endpoints
r = 1.0;
elseif r<0.0
r = 0.0;
end
dj = (x1+r*x2mx1-x)^2+(y1+r*y2my1-y)^2;
if (dj<d^2) && (dj>tol)
d = sqrt(dj);
end
end
else
break
end
end
end
end
L(k) = d;
end
end % dist2poly()
|
(** Compilation+inference agrees with functional CWP (always). *)
Set Implicit Arguments.
Require Import Coq.Program.Basics.
Require Import Coq.QArith.QArith.
Require Import Coq.micromega.Lqa.
Require Import Coq.micromega.Lia.
Require Import ExtLib.Data.Monads.StateMonad.
Local Open Scope program_scope.
Require Import compile.
Require Import cpGCL.
Require Import cwp.
Require Import infer.
Require Import order.
Require Import Q.
Require Import tree.
(** wpf and unnormalized inference after compilation coincide when c
is well-formed. *)
Theorem wpf_infer_f c f n :
wf_cpGCL c ->
wpf c f ==f infer_f f β evalCompile c n.
Proof.
revert n f.
induction c; intros m f Hwf st;
unfold evalCompile, evalState, compose; simpl; inversion Hwf; subst.
- reflexivity.
- rewrite Qdiv_0_num; reflexivity.
- reflexivity.
- unfold compose.
unfold compose, f_Qeq, evalCompile, evalState in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
specialize (IHc1 m (wpf c2 f) H1 st).
rewrite IHc1. simpl.
rewrite Hc1. simpl.
unfold kcomp. simpl.
rewrite <- infer_f_bind. unfold compose. simpl.
+ apply infer_f_proper; intro x.
specialize (IHc2 n f H2 x).
rewrite IHc2, Hc2; reflexivity.
+ intros l x Hbound. apply not_in_not_bound_and_not_free. split.
* apply free_in_not_free_in; intro HC.
eapply compile_free_in_0 in Hc2; eauto. subst.
eapply compile_bound_labels in Hc1; eauto; lia.
* apply bound_in_not_bound_in; intro HC.
eapply compile_bound_labels in Hc2; eauto.
eapply compile_bound_labels in Hc1; eauto.
lia.
- unfold compose, evalCompile, evalState, f_Qeq in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
simpl; destruct (e st).
+ rewrite IHc1, Hc1; auto; reflexivity.
+ rewrite IHc2, Hc2; auto; reflexivity.
- unfold compose, evalCompile, evalState, f_Qeq in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
rewrite IHc1, Hc1, IHc2, Hc2; auto; reflexivity.
- destruct (runState (compile c) (S m)) eqn:Hc.
pose proof IHc as IHc'.
specialize (IHc (S m) f H0 st).
unfold compose, evalCompile, evalState, f_Qeq in *.
rewrite Hc in IHc. simpl in *.
set (g := fun st' => if e st' then 0 else f st').
set (h := fun st' => if e st' then 1 else 0).
set (k := fun st' => if e st' then Fail St (S m) else Leaf st').
destruct (e st).
+ cut (wpf c g st == infer_f f (tree_bind (t st) k)).
cut (wpf c h st == infer_fail (S m) (tree_bind (t st) k)).
{ intros H1 H2; rewrite H1, H2; reflexivity. }
* unfold h. unfold k.
rewrite infer_fail_tree_bind_infer_f.
specialize (IHc' (S m) (fun st => if e st then 1 else 0) H0 st).
rewrite Hc in IHc'. simpl in IHc'.
apply IHc'.
apply label_in_not_in; intro HC. apply label_in_bound_or_free in HC.
destruct HC as [HC|HC].
++ eapply compile_bound_labels in Hc; eauto; lia.
++ eapply compile_free_in_0 in Hc; eauto; lia.
++ eapply compile_wf_tree; eauto.
* specialize (IHc' (S m) g H0 st).
rewrite IHc'. rewrite Hc. simpl.
unfold g. unfold k.
rewrite infer_f_bind_fail.
++ reflexivity.
++ apply bound_in_not_bound_in; intro HC.
eapply compile_bound_labels in Hc; eauto; lia.
+ reflexivity.
- destruct (e st); reflexivity.
Qed.
(** wlpf and unnormalized liberal inference after compilation coincide
when c is well-formed. *)
Theorem wlpf_infer_f_lib c f n :
wf_cpGCL c ->
wlpf c f ==f infer_f_lib f β evalCompile c n.
Proof.
revert n f.
induction c; intros m f Hwf st;
unfold evalCompile, evalState, compose; simpl; inversion Hwf; subst.
- reflexivity.
- rewrite Nat.eqb_refl; reflexivity.
- reflexivity.
- unfold compose.
unfold compose, f_Qeq, evalCompile, evalState in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
specialize (IHc1 m (wlpf c2 f) H1 st).
rewrite IHc1. simpl.
rewrite Hc1. simpl.
unfold kcomp. simpl.
rewrite <- infer_f_lib_bind. unfold compose. simpl.
+ apply infer_f_lib_proper; intro x.
specialize (IHc2 n f H2 x).
rewrite IHc2, Hc2; reflexivity.
+ intros l x Hbound. apply not_in_not_bound_and_not_free. split.
* apply free_in_not_free_in; intro HC.
eapply compile_free_in_0 in Hc2; eauto. subst.
eapply compile_bound_labels in Hc1; eauto; lia.
* apply bound_in_not_bound_in; intro HC.
eapply compile_bound_labels in Hc2; eauto.
eapply compile_bound_labels in Hc1; eauto.
lia.
- unfold compose, evalCompile, evalState, f_Qeq in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
simpl; destruct (e st).
+ rewrite IHc1, Hc1; auto; reflexivity.
+ rewrite IHc2, Hc2; auto; reflexivity.
- unfold compose, evalCompile, evalState, f_Qeq in *.
destruct (runState (compile c1) m) eqn:Hc1.
destruct (runState (compile c2) n) eqn:Hc2.
rewrite IHc1, Hc1, IHc2, Hc2; auto; reflexivity.
- destruct (runState (compile c) (S m)) eqn:Hc.
pose proof IHc as IHc'.
specialize (IHc (S m) f H0 st).
unfold compose, evalCompile, evalState, f_Qeq in *.
rewrite Hc in IHc. simpl in *.
set (g := fun st' => if e st' then 0 else f st').
set (h := fun st' => if e st' then 1 else 0).
set (k := fun st' => if e st' then Fail St (S m) else Leaf st').
destruct (e st).
+ cut (wlpf c g st == infer_f_lib f (tree_bind (t st) k)).
cut (wpf c h st == infer_fail (S m) (tree_bind (t st) k)).
{ intros H1 H2. rewrite H1; simpl.
destruct (Qeq_dec (infer_fail (S m) (tree_bind (t st) k)) 1).
{ rewrite Qeq_eq_bool; auto; reflexivity. }
rewrite Qeq_bool_false; auto.
rewrite H1, H2. reflexivity. }
* unfold h. unfold k.
rewrite infer_fail_tree_bind_infer_f.
specialize (IHc' (S m) (fun st => if e st then 1 else 0) H0 st).
rewrite Hc in IHc'. simpl in IHc'.
generalize (@wpf_infer_f c (indicator e) (S m) H0 st).
unfold compose, evalCompile, evalState; rewrite Hc; auto.
apply label_in_not_in; intro HC. apply label_in_bound_or_free in HC.
destruct HC as [HC|HC].
++ eapply compile_bound_labels in Hc; eauto; lia.
++ eapply compile_free_in_0 in Hc; eauto; lia.
++ eapply compile_wf_tree; eauto.
* specialize (IHc' (S m) g H0 st).
rewrite IHc'. rewrite Hc. simpl.
unfold g. unfold k.
rewrite infer_f_lib_bind_fail.
++ reflexivity.
++ apply bound_in_not_bound_in; intro HC.
eapply compile_bound_labels in Hc; eauto; lia.
+ reflexivity.
- destruct (e st); reflexivity.
Qed.
(** cwpf and normalized inference after compilation coincide when c is
well-formed. *)
Theorem cwpf_infer c f n :
wf_cpGCL c ->
cwpf c f ==f infer f β evalCompile c n.
Proof.
intros Hwf x.
unfold cwpf, infer, compose; simpl.
apply Qeq_Qdiv.
- apply wpf_infer_f; auto.
- apply wlpf_infer_f_lib; auto.
Qed.
|
How do you know you're not just making karma worse??
The answer to the question in the OP is given in the first two verses of the Dharmapada.
But dependent origination, when reduced to its essential components, simply means: where there is affliction, there is a cause for action; where there is action there is a cause for suffering; and where there is suffering, there is a condition for further affliction. Without affliction, there is no cause for action; without action, there is no result, suffering.
A Buddhas deeds are not based on afflcition (desire, hatred, and ignorance); they are based on wisdom. Hence, they do not result in suffering.
A buddha's mind stream is conditioned and relative; it is however free of affliction and endowed with omniscience.
Dzogchen mainly describes how samsara begins (with an aim to reverse it), but the βmechanicsβ of samsara are the same: afflictionβ> action β> suffering β>affliction as infinitum unless one breaks the chain at affliction.
An action always has a result. An affliction does not need to have a result.
What is the definition (in the context of Dharma) of an affliction?
What is the definition (in the context for Dharma) of an action?
Per the mechanism illustrated in the sequence above, WHY is it that affliction does not need to have a result?
A painful mental state, i.e., desire, hatred, and ignorance, or anything that is conducive to a painful mental state, i.e., impure conditioned phenomena.
Volition is action, this produces verbal and physical acts.
Afflictions do not necessarily produce volitions. When we are aware of our afflictive state, we can disengage our minds from actions related towards afflictive objects. This is why we practice Εamatha, actually, so that we are aware of our mind's afflictive state. Being aware of our mind's afflictive state is called mindfulness and attention. Being unaware of our mind's afflictive stated is called being mindless and inattentive.
What we are today comes from our thoughts of yesterday, and our present thoughts build the life of tomorrow: our life is the creation of our mind.
If one speaks or acts with an impure mind, suffering follows as the wheel of the cart follows the beast that draws the cart.
If a person speaks or acts with a pure mind, joy follows as her own shadow.
I think this is where faith enters the picture, is it not? "How can I be sure I'm acting from pure motives?" The answer is: you can only do your best. That's where trust of your true nature and in the dharma comes into play. We can't understand all the depths and complexities in our current state, hence, the need for the teaching in the first place! That is why it is compared to a raft. And we need to have faith in it.
What are the post signs along the road I should be paying attention to?
Good intention is so important but perhaps not enough and sometimes even triggering. Even someone wishes so very much to help, the help can be seen as an attack. Just like a wolf painfully entangled in a trap, will bite when a hand is sticking out to help him out of that desperate situation. He would bite, as we would, to each other and perhaps even to Enlightened help.
Trapped in duality, I think your advice is a great recommendation to be less a slave of the dividing mind.
As long as dualism is present, there is continuation of karma.
That's why the relation to the teacher is so important. Because you need that faith until you see for yourself.
I am fully accepting that I don't see into the functionalities of cause and condition.
The more I practise the more firm beliefs transform into mere possibilities.
We talk about karma in principle, and I am fixed (determined) on understanding it in its correct view. But not to just understand and talk about it. I want this to be done already. I'm fed up with being ignorant. It is very clear to me once I entered this practice how incredibly multifaceted every relative situation is even in ONE family for each individual.. and grasping just how many conditions contribute to stimulate habituated patterns that manifest like from "many a fiction story for every episode of one's life," yet seeing them line up, tracking the trends, hindsight helps in defining and illustrating very clearly what is priority and what isn't... I fully trust the self-perfecting process precisely because the connection proves I prioritized my connection to the teachings before, in this being inhabited by a conventional me, and wh knows how many other Me's before. It sometimes feel like the anchor was to be born where my teacher was in the years he was there (not many Tibetans in Naples) but I had a specific situation which was quite awful for many years. It was slaking off debts one by one to get to a position where the practice can really begin, one insanity after the other and many of those afflictions did exhaust themselves and no longer bind my current conditions... But some DO! Yet somehow I'm not that attached to their story anymore, I see them for their worth and only feel a kind of gratitude my conditions provided me with the correct medicine to feel the transparency of things... As if becoming more relative in my understanding in which even more precision is called for!
Ogyen, you haven't responded to a question on the first page: which vehicle does your post refer to?
I read so much pondering in the long posts you write. I see your longing for logic, precision which you think to attain through determination. And I think (and that is the dzogchen view) that this is not how it is going to work. Let go and rest in what is there right now. Now remembering, no expectation.
Make the seeing of the true nature of your mind your one and only goal.
Trying to verify your karma is just another play of the ego's mind. Of course it may be fun going down that road, but stay in awareness of its inherent emptiness.
This is where the true journey starts.
Karma is a difficult one to figure out.
For example: My brother died earlier this year and left a heap of debts that my family and I have to pay for.
This lead me to wondering: How can I know if this is due to old debts which were accrued by me towards him that I now have to pay for, OR are these new debts being accrued by him towards me?
Only a Buddha can know.
Sorry to hear about your brother, Grigoris.
Your post got me thinking about collective karma though. What role does it play, if any, in a situation like this? Is that even how collective karma works? How much of our suffering is due to our own karmic debts? At what point can we say that suffering is not just the result of individual karmic debts? Is it when an entire country is devastated by some natural disaster or when an entire group of people is persecuted and killed? Does collective karma operate on a smaller scale - like between a group of co-workers or a family unit?
I understand that there is a danger with this line of thinking, especially for someone like me, where collective karma can be a means to externalising all of my suffering and causes of suffering. But can this be notion be totally ignored when looking at suffering in its entirety?
I'm probably not articulating this well due to incompetence. Apologies for the noob post.
At what point can we say that suffering is not just the result of individual karmic debts?
This is a very interesting question, which is going beyond my poor limitations. Instead of being able to provide an answer, other questions are popping up only: if all karmic debts would be purely individual, would there than not be a subtle belief in an "individual, a person, a one on itself"? Is not all dependent, even so subtle what our intellect cannot get?
I do not reject when we are acting unwholesome or wholesome, from these, our own harvest will be coloured. Would that only be, without any influence on all around us? Then how would it be called wholesome and unwholesome? Personal only?
I do not reject we must awaken ourselves, none can do it for us, (even we are temporary dependent on 'some navigation medicines' to help) But if it is solely individual ' karmic harvest', we must be energies on our own. Perhaps with a stony separating wall around body-speech and mind.
ps My warm support to Greece, for the whole family.
Hi, apologies.. life ate up a lot of time lately. Just seeing this. Karma applies to all vehicles. It is not even philosophy specific. It is a principle which encompasses all vehicles and beings that stumbled into Samsara.
It's nicely thought out thank you for taking the time to write. I may go on and on in my posts... I guarantee I'm not a terribly lengthy thinker off the boards, I just have general existential angst I'm working out overall, as I find myself in that awkward phase between birth and death.
I had a wonderful retreat recently which opened my eyes a great deal, and I have a strong confidence in the method of Dharma. I study all aspects of buddhadharma but due to karmic conditions and previous aspirations, I really resonate most with the Dzogchen vehicle.
Verifying karma is a fundamental part of understanding the true nature of your actual condition, I don't think it's an ego game. To see the true nature of your mind there's a lot of layers to get thru... I mean we are caked in conditioning that prevents us from really integrating our own true nature, so much of this is a very useful exercise.
The question mostly pertained to finding markers and knowing that you are going the right way (for you). We think a lot of things. That doesn't mean we see or understand what the conditions are which are producing that thinking/feeling and acting. I have now learned how to verify that and apply it.
All of a sudden... Karmic conditions are maturing rather quickly manifesting in the form of obstacles and changes which I sometimes can mitigate somewhat thru the effect of secondary causes generated by the protection of practice. The rest I deal with as I can.
I've also had that question pop up in some situations and I've come to think that maybe not even a personal thing between you and your brother... More like a series of conditions you both had accumulated at some point that now matured this way, you with having a kind of load to carry that is debt related, and he to leaving your family in this specific secanrio.. and now you're where this karma will run its course... The trick is to either just exhaust that karma trying to have better clarity of the nature of these relative (but very annoyingly felt) consequences, and to not generate new karma in body speech and mind. Of course it's much easier said than done... But I have been taking to welcoming exhausting karma as needed, and I've started to become sensitive to how every action and thought produces it's own kind of karma.
Many good hugs and vibes to you.
How do you know you're not just making things worse thinking you're making things better (good intentions and all that jazz)???
The solution is always to practice what you've been given until your mind becomes clear, meaning unhelpful states of mind have subsided & you can act based on wisdom. If you're unsure, practice more.
If practicing doesn't seem like enough, you have two options... one is to accumulate more karma and wait for it to burn itself out through your experiences, and the other is to practice anyway. Mechanically they are identical, but functionally they are opposed. Blindly creating karma through mundane activity is the risky one. Practice necessarily protects you from that, always, every single time, because it's not connected to your ordinary mind, but to your wisdom mind, which is the very mind of the Buddha.
More to the point, in which direction is your fear (i.e., of worsening karma) pushing you?
Is it constructive or not?
How much time do you spend with this fear by itself, vs. letting it spur you to action? The state of your mind, which is capable of indulging the fear in unproductive/harmful ways, is what creates the karma producing the fear you mean to escape.
If the fear is there and you can't get rid of it at this time (because you can't change the conditions causing it to arise), employ practice. Let go of the mind that judges & tries to ascertain what will happen, because this is based on the Eight Worldly Attitudes; seize the mind that is committed to making your life meaningful through practice.
It already exists within you, ready to be activated through conscious contemplation. Remember that the practice you've done in the past & that you choose to do currently is helping you right now, as opposed to in the future. |
#' Produce a model of irradiance for an entire study area or selected control points
#'
#' @param dem A [sp::SpatialGridDataFrame] or [raster::RasterLayer] object giving the digital
#' elevation model of the study area.
#' @param output_points A set out output points; either a [sp::SpatialPoints] object or a raster mask.
#' @param times A vector of [POSIXct] objects giving the dates and times at which to compute irradiance. These should be given in local (to the DEM) time
#' @param timezone An integer giving the offset in hours from UTC that applies to the DEM; see the manual for r.sun for details
#' @param use_existing logical, if TRUE then existing slope, aspect, longitute, and horizon
#' rasters will be used, if FALSE then they will be recomputed
#' @param dem_name character; this parameter (and similar name parameters) tell GRASS what names to
#' use for the rasters needed for the analysis; they must exist in the GRASS location/mapset if
#' `use_existing == TRUE`.
#' @param horizon_pars List of arguments for GRASS74's r.horizon; see the GRASS GIS help files ]
#' if you wish to change the defaults.
#' @param newSession Boolean, if TRUE a new grass session will be started even if one already
#' exists
#' @param gisBase character; the location of the GRASS installation
#' (see WatershedTools::GrassSession)
#' @details Requires an existing installation of GRASS7, as well as the rgrass7 package. It is
#' first necessary to run [rgrass7::initGRASS]. Recommended settings for initGRASS include
#' `home = tempdir()` and `SG = SpatialGrid(dem_gridded)`. To produce the gridded DEM
#' from a raster, use:
#' `dem_gridded <- SpatialPoints(dem)`
#' `gridded(dem_gridded) <- TRUE`
#'
#' Note that running the light model can be extremely slow for large study areas and/or high-resolution DEMs and/or long time series.
#'
#' It is strongly recommended to use the `output_points` argument to set a list of locations at which to compute the output, otherwise memory usage can be extreme. For rasters, output will be returned for all non-NA cells.
#' @return A matrix, one row per output point, one column per date/time for computation. Column names are the integer values of the date/time, rownames are the coordinates of the point
#' @export
irradiance <- function(dem, output_points, times, timezone, use_existing = FALSE, dem_name = "dem",
slope_name = "slope", aspect_name = "aspect", lon_name = "longitude",
horizon_pars = list(step=30, bufferzone=200, maxdistance=5000, horBaseName = "horizonAngle"),
newSession = FALSE, gisBase)
{
if(!requireNamespace('rgrass7'))
stop("The rgrass7 package must be installed to use this function")
if(!requireNamespace('WatershedTools'))
stop("The WatershedTools package must be installed to use this function")
if(is(output_points, "sf"))
output_points = as(output_points, "Spatial")
if(newSession || nchar(Sys.getenv("GISRC")) == 0) {
gs <- WatershedTools::GrassSession(dem, gisBase, dem_name, override = TRUE)
}
if(newSession || !use_existing) {
setup_irradiance(dem_name, slope_name, aspect_name, lon_name, horizon_pars)
}
## preallocate a large matrix to store results
irrad_mat <- matrix(NA, ncol=length(times), nrow=length(output_points))
colnames(irrad_mat) <- as.integer(times)
rownames(irrad_mat) <- apply(coordinates(output_points), 1, paste, collapse=',')
## for each time/date combo, run the light model
for(i in 1:length(times))
{
dt <- times[i]
dy <- lubridate::yday(dt)
tm <- lubridate::hour(dt) + lubridate::minute(dt)/60
err <- rgrass7::execGRASS("r.sun", flags=c("overwrite", "quiet"), elevation=dem_name,
horizon_basename = horizon_pars$horBaseName, horizon_step = horizon_pars$step,
day=dy, time=tm, glob_rad="irradiance_out", aspect=aspect_name, slope=slope_name,
long=lon_name, civil_time=timezone)
# bring the raster back into R
irr <- raster::raster(rgrass7::readRAST("irradiance_out"))
irrad_mat[,i] <- extract(irr, output_points)
}
irrad_mat
}
#' Create rasters needed for irradiance analysis
#' @param dem_name character; this parameter (and similar name parameters) tell GRASS what names to
#' use for the rasters needed for the analysis; they must exist in the GRASS location/mapset if
#' `use_existing == TRUE`.
#' @param horizon_pars List of arguments for GRASS74's r.horizon; see the GRASS GIS help files if you wish to change the defaults.
#' @keywords internal
setup_irradiance <- function(dem_name, slope_name, aspect_name, lon_name, horizon_pars)
{
## build slope and aspect maps
err <- rgrass7::execGRASS("r.slope.aspect", flags=c("overwrite"), elevation=dem_name,
aspect=aspect_name, slope=slope_name)
## produce a longitute raster
err <- rgrass7::execGRASS("r.latlong", flags=c('overwrite', 'l'), input=dem_name,
output=lon_name)
## pre-compute horizon angles, slow but saves significant time
err <- rgrass7::execGRASS("r.horizon", flags=c("overwrite"), elevation=dem_name,
step=horizon_pars$step, bufferzone=horizon_pars$bufferzone,
output = horizon_pars$horBaseName, maxdistance=horizon_pars$maxdistance)
}
|
Formal statement is: lemma open_image_snd: assumes "open S" shows "open (snd ` S)" Informal statement is: If $S$ is an open set in $\mathbb{R}^2$, then the projection of $S$ onto the second coordinate is open. |
#pragma once
#include <stdio.h>
#include <iostream>
#include <boost/test/unit_test.hpp>
#include "easylogging++.h"
|
THESE ARE AWESOME BALLS!! Maple Bacon Donuts from Joeβs Donuts in Pelham, AL.
YUMMY!!!!! I cannot disclose how many I ate. |
Surprisingly, many people are caught unaware how to survive zombie attack, despite the release of many educational films which deal with the subject. As such, we at Davis Wiki have assembled a survival guide, so you know how to deal with zombie attacks. For a more general info you may also want to check out http://www.amazon.com/ZombieSurvivalGuideCompleteProtection/dp/1400049628 The Zombie Survival Guide.
Initial Response
When you first Emergency Radio hear about the attack, take a good look around you. Look and listen. This, of course, assumes that you dont find out by being already under attack! If you are under attack, or even if you arent, head for some shelter immediately. It should either be a place that you can secure and hide, or a vehicle that can help you make a hasty retreat.
It is unlikely that your initial location will be safe, at least in the long term. Youll need to move to some place in order to get food and water, and to meet other noninfected humans to watch your back. If youre sure you have time, take any Guns weapons, water, food, flashlights, Gas Stations fuel, and other tools you have on hand with you, in that order. Remember, the goal is survival, not preservation of all of your stuff. Each one will keep you alive a little longer, but none will save you if you dally behind and someone chomps on your brains.
For emergency updates on the outbreak, tune to 101.5fm or 90.3fm or scan the AM band.
Transit
Transportation to a place of safety is key. Of course in Davis, we have a lot of bicycles. Bicycles do have some advantages, such as more flexibility to go through narrow areas and not requiring fuel. However, bicycles dont afford nearly as much protection (for the occupants) as a closedshell vehicle, so you wont be able to drive over the Zombies. But by arming yourself with a sharp http://www.youtube.com/watch?vDnvmNtCccpI lance you have also developed a powerful system of dezombification. Remember to watch your bikes weak spot zombies want chaaaaains. Trailers, extracycles, saddle bags, baskets and hulahoop/gun racks can help you carry all the zombie killing and survival gear you need to reach safety. Its not as fast as a car, but its almost certainly better than running! If one of your friends does have a car, Carpool carpooling is a great way to get there safely and efficiently.
For those of you who missed the opportunity to leave town safely, youll probably want to head to UC Davis campus, as it has Sproul Hall taller, more Mrak Hall defensible positions.
Unless you live on the outskirts of Davis, you may have trouble escaping Davis on your own. So youll probably want to get to a safe place before proceeding on out of town. Youll also want to stock up on provisions, especially if the attack is widespread.
North Davis, Northern Central Davis
Youll need to get weapons, and you need to get them soon. Big 5 Sporting Goods is the only place that sells guns in the City of Davis, so youll want to get there soon. They have a lot of windows, so its not easily defensible, so it will be a smashandgrab mission. If you have time, visit Safeway as well, though beware meeting zombies here, as its a much more popular place. Afterward, get on the Highway 113 freeway and go! Go north towards Woodland, as youll want to get as far away from the Davis population center as possible. If you have reason to think that Woodland may also have infected individuals, exit at either Country Road 29 or 27 and turn left (west). When you hit the Highway 505, you have a choice. Depending on what you know about the nature of the infection (AM radio is your friend), you may choose to go south towards the Bay Area, or north to the middle of nowhere. If you have no information, go north towards wiki:Redding, as the area is much less populated and likely has fewer zombies. Another, alternate idea is to run to the hills by going immediately west, towards Lake Berryessa.
West Davis
Given your relative isolation, you may just want to get the hell out of dodge. Isolated areas are your friends easy to see zombies come, fewer zombies to worry about. If youre near the Highway 113 113, take that. If youre closer to Lake, head north on Lake or west on Russell. If youre going out on Russell, turn north at Road 98. Do not turn south! You do not want to know what happens at the Monkey Farm when an infectious zombie disease strikes. Thinking about it, thats probably where the infection came from in the first place. :( In any case, the north is more isolated with fewer trees than south or west, and isolated flat plains are your friend. If you have reason to think that Woodland may also have infected individuals, turn left (west) at either Country Road 29 or 27. When you hit the Highway 505, you have a choice. Depending on what you know about the nature of the infection (AM radio is your friend), you may choose to go south towards the Bay Area, or north to the middle of nowhere. If you have no information, go north towards Redding, as the area is much less populated and has likely fewer zombies.
East Davis
The Peoples Republic of East Davis is virtually indefensible during a zombie onslaught. Crappy road access, and not a lot of services. Best advice is to head to campus. If you cant make it across the tracks to downtown, and cant leave Davis safely, the Nugget supermarket is the only truly defensible supermarket in town! It only has one opening, and no real windows near the ground level except for the door itself. When braced with the proper equipment, you could actually stand a chance in here for a while. However, since zombies exhibit superhuman strength, it is not a good idea to stay here for long, as they may break through any barriers you erect. You can also head by the Police Station, to pick up on firearms and Police trained users of said firearms.
If youre on the north side, your exit point is up Pole Line Road Pole Line, a.k.a. County Road 102 Road 102. You may choose to continue north to wiki:Woodland if Woodland is compromised, pass through the small section of city quickly. You can continue on 102 up towards Knights Landing and wiki:Chico. You may also want to turn left on Road 29 or 27, and follow the directions for North Davis above if you feel Woodland would be more dangerous. Southern East Davis: Head to Mace and I80. While eastbound to wiki:Sacramento seems attractive, this is a very bad idea. If the disease is kept within Davis, it is likely that the causeway will be destroyed to protect the major population center of wiki:Sacramento. Alternatively, if Sacramento is the source of the scourge, then blocking the causeway and rail lines may be a good idea. Given, if Sacramento is already infected, youre as good as dead. If westbound 80 looks clear, take it otherwise, head out on Mace into the farm fields. Isolation is your friend, as you can see the zombies coming and theres fewer of them. Dont forget to listen to AM radio, too. Turn right on Midway Road, and head west. Unless you know for sure that Dixon and wiki:Vacaville are clear, youll want to turn south on Highway 113 (a twolane road). Its the only way across all of the sloughs of the Delta. Youll probably want to turn west upon reaching Highway 12, as it will take you to Travis Air Force Base and Fairfield. Eastward leads Rio Vista, and from there either Stockton or the far East Bay.
South Davis
Theres not much in terms of services or weapons for you, so youll mostly want to get out of town. For the most part, follow the escape directions for southern East Davis above. However, you have an additional option of taking Drummond south, onto the dirt roads of the farm fields. After that, you can turn left until you hit Mace, turn right (south), and then continue on the directions above.
Downtown Davis, Southern Central Davis
Not many resources for you just get out using one of the other options. And remember, you can run stop signs in the event of zombie attack! Be careful of zombies above the underpass theyll cling on your vehicle!
You can camp out in Council Chambers, if you must, but beware of Zombie Sue Greenwald trying to munch on Lamar Heysteks head.
Campus
Campus has the most interesting options. And if youre a Freshman, its where youre most likely to be, as you dont have a car. You knew the lack of a car would be the death of you, you just knew it! Not to fear (um, not too much fear)! Campus has some of the best weapons and utilities to help you fight a fullscale zombie invasion. If you have access to a car, you may just want to jet. Northbound 113 is likely the best choice, though westbound 80 is not bad either. Read on to the next section if you cant just leave, or want/need to stock up before you go.
Surviving on Campus
Firearms
There are only two places you can get firearms on campus, which is pretty good considering thats about the same for the whole City of Davis. The first is the UC Davis Police Station, and just up the road, Hoagland Hall. Hoagland houses the ROTC armory in its basement (youll need to take the freight elevator on the west side down to access it. Warning: its a dark, unused basement, and seems kind of spooky under normal circumstances. However, if you make it all the way to the other side of the hall, a small room houses the ROTC stash. Getting inside the stash may be problematic without the help of an authorized user, but perhaps some improvised explosive devices could be of use?
Improvised Explosive Devices
Chemistry Building is the answer. They have a whole section, sitting outside the building in a shed on the south side, of tanks of gasses. Hmm, oxygen and methane... that should work! The nearby Craft Center has some welding classes, which means oxygen and acetylene on tap!
Additionally, use some of the bad vodka from frat parties to make molotov cocktails.
Fuel
Unless you get a CNG university vehicle, that methane from the Chem Building isnt going to do you any good. However, on La Rue south of the Dairy, Fleet Services has just about any kind of fuel you want. The nearby Unitrans shop even has hydrogen gas. Oh, look, another improvised explosive.
Vehicles
Besides fuel, Fleet Services even has vehicles! The vans are probably the best choice, and trucks are also good. However, unless you have time to tie one set of keys to the appropriate vehicle, you may want to check out the Unitrans yard. Theres nothing like running over a zombie with a big red bus for catharsis. And did you know that all of the busses are pushtostart? Once youre inside the vehicle, theres no key necessary to start. Now, I know youre thinking, doesnt this pose a security risk to Unitrans operations? Well, the thing about stealing a big red bus is that youve just stolen a big red bus that has Unitrans painted on it. And besides, theyre mostly dead by now. There may also be a bus or two parked next to Toomey Field, so you can grab that as well. Unitrans also has the upside of access to emergency channels for police and fire in their radios. An honorable mention goes to the Western Farm Equipment Center, which has slow vehicles like tractors, but the ability to plow the infected down.
Theres a particularly dangerous, but particularly good option. If you know how to fly, the University Airport is a great way to go. Once youre in the air, youre basically free. The problem is, its likely close to ground zero for the infection, which is quite likely the California National Primate Research Center monkey farm.
Camping/Hiding; Water and Food
You might just need a safe place to hide. And due to the number of windows on most campus buildings, this is a really significant problem for zombie defense. There are a few good places on campus, though.
1. Lower Freeborn. Its entry points are well guarded, and it connects to the MU underground. Plus, KDVS will help you tell the world about the terrible, terrible disaster that has befallen Davis. And play some bitchin tunes. You know, forget what I said earlier about AM radio, keep the dial on 90.3 KDVS FM. You can also try out the Aggies Orgazmatron if youre bored. The big downside of this location is lack of food, which you can only get with dangerous raids to the Coffee House or Aggie Student Store.
2. The Police Station, with its bulletproof glass and weapons, is also a fine choice. There are also some fire engines nearby for escape, but again, no food.
3. Tercero Dining Commons. Tons of food, and its all on the second floor! Block the main entrance, and use the employee entrances for exit and entry on the back side. If water and gas hold out, which they likely will given the proximity of the water tower, you can survive for days in here. Unfortunately most of the portable food is downstairs in Trudys. Its got decent proximity to Fleet Services and Unitrans for escape, but no real weapons other than chairs. Still, its probably the best place to hang out on campus until help arrives.
Dealing with the infected
Dont be stupid. If you think someone could be infected, restrain them immediately. Be prepared to kill them at the first signs of hunger for brains. Dont forget, theyll have superhuman strength if infected so do not try out Martial Arts and Self Defense your martial arts skills and/or selfdefense skills. If the incubation period passes, youll likely be safe, but remember, its better to be safe than sorry.
If you are unsure how to deal with zombieinfected humans and/or zombies (possibly due to long exposure to Whole Earth Festival this , Philosophy that , and/or others), you could talk to consultants Philosophy here and Front Page here about the course of action you should take (this is recommended unless you have a zombieproof shelter or an ejection seat guaranteed to transport you to a zombiefree zone). Be sure to bring as much coffee and food supplies as possible if you decide to talk to said consultants, as the length of the consultation is said to be dependent on however long it takes for the Rabbits bunnies and Squirrels squirrels nearby Death Star to fall in love and have offspring with each other.
Due to the unpredictableness of bunnysquirrel love, it is a good idea to prepare yourself throughly beforehand through http://www.amazon.com/UndeadPhilosophyChickenSoullessPopular/dp/0812696018 research and #Ethics tacticsplanning.
In case of zombie attack on the wiki
Run to Users/PhilipNeustrom Philbo or Users/JabberWokky the wokky, and Users/WilliamLewis/Revert stats revert like mad.
How to Deal with Zombies Ethically
For those unsure of the course of action they should employ upon zombies and zombieinfected humans, this might be a good place to ask questions(Do infected humans necessarily become zombies? or Is it ethical to kill a zombie when it/he/she poses harm upon me/another human being?...), consult the experts, pose opinions, and commiserate with other humans who are equally confused. Realizing that the zombies are already dead, or undead, should help any squeamish WEFies get over their non violent objections.
National Response
The Centers for Disease Control have finally wised up and issued recommendations for a Zombie Apocalypse http://emergency.cdc.gov/socialmedia/zombies_blog.asp emergency plan. Thank goodness!
As of March 2013, it looks like UC Davis has followed suit:
Zombie Defense Purchases
http://www.westlakehardware.com/specialties/zombies Ace Hardware has been known to provide very complete inventory of supplies for a possible zombie apocalypse. Unfortunately the known Ace Hardware for this is in Lenexa, Kansas, and not here in Davis. You could possibly order by mail from them, but it would defeat the Davis ethic of buying local. Just another Davis dilemma for dealing with civil defense.
20080616 14:02:58 nbsp I am very happy to see this information up. Being prepared for a potential zombie attack is crucial for any community. Lets just hope its not the fast zombies. Users/DagonJones
20080617 15:47:13 nbsp A much needed page indeed. I lost an uncle to the yuba city outbreak in back in 85. Had the the internet been around on the level it is today, we could have gotten word out faster and prevented a lot more infection. We probably wouldnt have had to put poor uncle Dan down the way we did. Thanks for posting. Go Daviswiki! Users/jefftolentino
20090127 11:54:39 nbsp A zombie outbreak was recently reported. Warnings have been placed on road signs, as shown in this photograph:
http://news.cnet.com/830113772_31014922952.html?tagrtcol;pop Users/IDoNotExist
20090204 18:59:35 nbsp This page adequately addresses a zombie attack due to infection or disease. If the cause was paranormal in nature, however, Id add that one must be really wary of going anywhere near East Davis given the prominence of the Davis Cemetery graveyard. Users/KevinChin
And after Swine Flu swine flu takes its toll on Davis, there will be a lot more raw material with which to build zombie armies.
20090420 21:00:59 nbsp I am reassured by this page since I will be living in Davis this July. I am glad that there are people out there that are figuring out how to survive a zombie attack. Thank you, Davis Wiki! Users/RyanMikulovsky
20101101 10:54:03 nbsp There are some crazy people who are already armed for a zombie attack down in South Davis. I heard that if youre cool enough, theyll let you be part of their survival group. Users/RickJames
20130612 03:11:42 nbsp Zombies arent real people need to get over it. I h8 how my friends pull me into scenarios asking me what if zombies were coming to get us and how would we survive. Then I say its not real zombies arent real and they say I know. and then they keep on believing. Foolish. Users/teamdarwin
|
## This is a very trivial demo of
## the RUnit test case execution system:
## ---------------------------------
## functions to be tested (usually defined in a different
## file from where the test cases are located):
## centigrade to Fahrenheit
c2f <- function(c) return(9/5 * c + 32)
## Fahrenheit to centigrade
f2c <- function(f) return(5/9 * f - 32) ## ups, a bug (brackets missing)
## test functions:
## ---------------------
.setUp <- function() { ## called before each test case, see also .tearDown()
print(".setUp")
}
test.c2f <- function() {
checkEquals(c2f(0), 32)
checkEquals(c2f(10), 50)
## check that an error is created for a bogus argument
checkException(c2f("xx"))
}
test.f2c <- function() {
checkEquals(f2c(32), 0)
checkEquals(f2c(50), 10)
## check that an error is created for a bogus argument
checkException(f2c("xx"))
}
test.errordemo <- function() {
stop("this is just to show what an error looks like as opposed to a failure")
}
## How to run the tests (do not uncomment in this file,
## but execute the commands at the R prompt):
## All you have to do is to adapt the directory locations.
## ------------------------------------------------
## define the test suite:
#testsuite.cf <- defineTestSuite("cfConversion", dirs="directoryOfThisFile")
## run test suite:
#testResult <- runTestSuite(testsuite.cf)
## print text protocol to console:
#printTextProtocol(testResult)
## print HTML version to a file:
#printHTMLProtocol(testResult, fileName="someFileName.html")
## In this case we also have a shortcut
#runTestFile("directoryOfThisFile/runitcfConversion.r")
|
# Load the packages
using GraphNeuralNetworks, DiffEqFlux, DifferentialEquations
using Flux: onehotbatch, onecold
using Flux.Losses: logitcrossentropy
using Statistics: mean
using MLDatasets: Cora
using CUDA
# CUDA.allowscalar(false) # Some scalar indexing is still done by DiffEqFlux
# device = cpu # `gpu` not working yet
device = CUDA.functional() ? gpu : cpu
# LOAD DATA
data = Cora.dataset()
g = GNNGraph(data.adjacency_list) |> device
X = data.node_features |> device
y = onehotbatch(data.node_labels, 1:data.num_classes) |> device
train_ids = data.train_indices |> device
val_ids = data.val_indices |> device
test_ids = data.test_indices |> device
ytrain = y[:, train_ids]
# Model and Data Configuration
nin = size(X, 1)
nhidden = 16
nout = data.num_classes
epochs = 40
# Define the Neural GDE
diffeqsol_to_array(x) = reshape(device(x), size(x)[1:2])
# GCNConv(nhidden => nhidden, graph=g),
node_chain = GNNChain(GCNConv(nhidden => nhidden, relu),
GCNConv(nhidden => nhidden, relu)) |> device
node = NeuralODE(WithGraph(node_chain, g),
(0.f0, 1.f0), Tsit5(), save_everystep = false,
reltol = 1e-3, abstol = 1e-3, save_start = false) |> device
model = GNNChain(GCNConv(nin => nhidden, relu),
Dropout(0.5),
node,
diffeqsol_to_array,
Dense(nhidden, nout)) |> device
# Loss
loss(x, y) = logitcrossentropy(model(g, x), y)
accuracy(x, y) = mean(onecold(model(g, x)) .== onecold(y))
# # Training
# ## Model Parameters
ps = Flux.params(model);
# ## Optimizer
opt = ADAM(0.01)
# ## Training Loop
for epoch in 1:epochs
gs = gradient(() -> loss(X, y), ps)
Flux.Optimise.update!(opt, ps, gs)
@show(accuracy(X, y))
end
|
The operating system is CentOS , a community driven Linux distribution and Red Hat Enterprise Linux clone . The Fastra II received a positive public impression . <unk> called it the " world 's most powerful desktop @-@ sized supercomputer " , describing it as a computer with " so much power in such a small space . " <unk> News Net called it " the Most Powerful Desktop Supercomputer " .
|
{-# OPTIONS --without-K #-}
module function.isomorphism.utils where
open import sum
open import equality.core
open import equality.calculus
open import function.core
open import function.overloading
open import function.isomorphism.core
open import function.isomorphism.coherent
open import function.extensionality.proof
open import sets.unit
open import sets.empty
open import sets.fin.core
open import hott.level.core
Ξ£-split-iso : β {i j}{X : Set i}{Y : X β Set j}
β {a a' : X}{b : Y a}{b' : Y a'}
β (Ξ£ (a β‘ a') Ξ» q β subst Y q b β‘ b')
β
((a , b) β‘ (a' , b'))
Ξ£-split-iso {Y = Y}{a}{a'}{b}{b'} = iso unapΞ£ apΞ£ H K
where
H : β {a a'}{b : Y a}{b' : Y a'}
β (p : Ξ£ (a β‘ a') Ξ» q β subst Y q b β‘ b')
β apΞ£ (unapΞ£ {a = a}{a' = a'}{b = b}{b' = b'} p) β‘ p
H (refl , refl) = refl
K : (p : (a , b) β‘ (a' , b')) β unapΞ£ (apΞ£ p) β‘ p
K = J (Ξ» u v p β unapΞ£ (apΞ£ p) β‘ p)
(Ξ» {(a , b) β refl })
(a , b) (a' , b')
Γ-split-iso : β {i j}{X : Set i}{Y : Set j}
β {a a' : X}{b b' : Y}
β ((a β‘ a') Γ (b β‘ b'))
β
((a , b) β‘ (a' , b'))
Γ-split-iso {X = X}{Y} = record
{ to = Ξ» { (p , q) β apβ _,_ p q }
; from = Ξ» { p β (ap projβ p , ap projβ p) }
; isoβ = Ξ» { (p , q) β H p q }
; isoβ = K }
where
H : {a a' : X}{b b' : Y}(p : a β‘ a')(q : b β‘ b')
β (ap projβ (apβ _,_ p q), ap projβ (apβ _,_ p q)) β‘ (p , q)
H refl refl = refl
K : {a a' : X}{b b' : Y}(p : (a , b) β‘ (a' , b'))
β apβ _,_ (ap projβ p) (ap projβ p) β‘ p
K refl = refl
Γ-ap-iso : β {i i' j j'}{X : Set i}{X' : Set i'}
{Y : Set j}{Y' : Set j'}
β (isom : X β
X')
β (isom' : Y β
Y')
β (X Γ Y) β
(X' Γ Y')
Γ-ap-iso isom isom' = record
{ to = Ξ» { (x , y) β (apply isom x , apply isom' y) }
; from = Ξ» { (x' , y') β (invert isom x' , invert isom' y') }
; isoβ = Ξ» { (x , y) β pairβ‘ (_β
_.isoβ isom x) (_β
_.isoβ isom' y) }
; isoβ = Ξ» { (x' , y') β pairβ‘ (_β
_.isoβ isom x') (_β
_.isoβ isom' y') } }
Ξ£-ap-isoβ : β {i j j'}{X : Set i}
β {Y : X β Set j}{Y' : X β Set j'}
β ((x : X) β Y x β
Y' x)
β Ξ£ X Y β
Ξ£ X Y'
Ξ£-ap-isoβ {X = X}{Y}{Y'} isom = record
{ to = Ξ» { (x , y) β (x , apply (isom x) y) }
; from = Ξ» { (x , y') β (x , invert (isom x) y') }
; isoβ = Ξ» { (x , y) β unapΞ£ (refl , _β
_.isoβ (isom x) y) }
; isoβ = Ξ» { (x , y') β unapΞ£ (refl , _β
_.isoβ (isom x) y') } }
Ξ£-ap-isoβ : β {i i' j}{X : Set i}{X' : Set i'}{Y : X' β Set j}
β (isom : X β
X')
β Ξ£ X (Y β apply isom) β
Ξ£ X' Y
Ξ£-ap-isoβ {X = X}{X'}{Y} isom = record
{ to = Ξ» { (x , y) β (f x , y) }
; from = Ξ» { (x , y) β (g x , subst Y (sym (K x)) y) }
; isoβ = Ξ» { (x , y) β unapΞ£ (H x ,
subst-naturality Y f (H x) _
Β· (subst-hom Y (sym (K (f x))) (ap f (H x)) y
Β· ap (Ξ» p β subst Y p y) (lem x) ) ) }
; isoβ = Ξ» { (x , y) β unapΞ£ (K x ,
subst-hom Y (sym (K x)) (K x) y
Β· ap (Ξ» p β subst Y p y) (right-inverse (K x)) ) } }
where
isom-c = β
ββ
' isom
Ξ³ = projβ isom-c
open _β
_ (projβ isom-c)
renaming ( to to f ; from to g
; isoβ to H; isoβ to K )
lem : (x : X) β sym (K (f x)) Β· ap f (H x) β‘ refl
lem x = ap (Ξ» z β sym (K (f x)) Β· z) (Ξ³ x)
Β· right-inverse (K (f x))
Ξ£-ap-iso : β {i i' j j'}{X : Set i}{X' : Set i'}
{Y : X β Set j}{Y' : X' β Set j'}
β (isom : X β
X')
β ((x : X) β Y x β
Y' (apply isom x))
β Ξ£ X Y β
Ξ£ X' Y'
Ξ£-ap-iso {X = X}{X'}{Y}{Y'} isom isom' = transβ
(Ξ£-ap-isoβ isom') (Ξ£-ap-isoβ isom)
Ξ£-ap-iso' : β {i i' j j'}{X : Set i}{X' : Set i'}
{Y : X β Set j}{Y' : X' β Set j'}
β (isom : X β
X')
β ((x : X') β Y (invert isom x) β
Y' x)
β Ξ£ X Y β
Ξ£ X' Y'
Ξ£-ap-iso' {X = X}{X'}{Y}{Y'} isom isom'
= symβ
(Ξ£-ap-iso (symβ
isom) (Ξ» x β symβ
(isom' x)))
Ξ -ap-iso : β {i i' j j'}{X : Set i}{X' : Set i'}
{Y : X β Set j}{Y' : X' β Set j'}
β (isom : X β
X')
β ((x' : X') β Y (invert isom x') β
Y' x')
β ((x : X) β Y x)
β
((x' : X') β Y' x')
Ξ -ap-iso {X = X}{X'}{Y}{Y'} isom isom' =
transβ
(Ξ -iso (β
ββ
' isom)) (Ξ -iso' isom')
where
Ξ -iso : (isom : X β
' X')
β ((x : X) β Y x)
β
((x' : X') β Y (invert (projβ isom) x'))
Ξ -iso (iso f g H K , Ξ³) = record
{ to = Ξ» h x' β h (g x')
; from = Ξ» h' x β subst Y (H x) (h' (f x))
; isoβ = Ξ» h β funext Ξ» x β ap' h (H x)
; isoβ = Ξ» h' β funext Ξ» x' β
ap (Ξ» p β subst Y p _) (sym (Ξ³' x'))
Β· sym (subst-naturality Y g (K x') _)
Β· ap' h' (K x') }
where Ξ³' = co-coherence (iso f g H K) Ξ³
Ξ -iso' : β {i j j'}{X : Set i}
{Y : X β Set j}{Y' : X β Set j'}
β ((x : X) β Y x β
Y' x)
β ((x : X) β Y x)
β
((x : X) β Y' x)
Ξ -iso' isom = record
{ to = Ξ» h x β apply (isom x) (h x)
; from = Ξ» h' x β invert (isom x) (h' x)
; isoβ = Ξ» h β funext Ξ» x β _β
_.isoβ (isom x) _
; isoβ = Ξ» h' β funext Ξ» x β _β
_.isoβ (isom x) _ }
Ξ Ξ£-swap-iso : β {i j k}{X : Set i}{Y : X β Set j}
β {Z : (x : X) β Y x β Set k}
β ((x : X) β Ξ£ (Y x) Ξ» y β Z x y)
β
(Ξ£ ((x : X) β Y x) Ξ» f β ((x : X) β Z x (f x)))
Ξ Ξ£-swap-iso = record
{ to = Ξ» f β (projβ β' f , projβ β' f)
; from = Ξ» { (f , g) x β (f x , g x) }
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
curry-iso : β {i j k}{X : Set i}{Y : X β Set j}
(Z : (x : X) β Y x β Set k)
β ((xy : Ξ£ X Y) β Z (projβ xy) (projβ xy))
β
((x : X) β (y : Y x) β Z x y)
curry-iso _ = record
{ to = Ξ» f x y β f (x , y)
; from = Ξ» { f (x , y) β f x y }
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
Ξ -comm-iso : β {i j k}{X : Set i}{Y : Set j}{Z : X β Y β Set k}
β ((x : X)(y : Y) β Z x y)
β
((y : Y)(x : X) β Z x y)
Ξ -comm-iso = record
{ to = Ξ» f y x β f x y
; from = Ξ» f x y β f y x
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
Ξ£-comm-iso : β {i j k}{X : Set i}{Y : Set j}{Z : X β Y β Set k}
β (Ξ£ X Ξ» x β Ξ£ Y Ξ» y β Z x y)
β
(Ξ£ Y Ξ» y β Ξ£ X Ξ» x β Z x y)
Ξ£-comm-iso = record
{ to = Ξ» { (x , y , z) β (y , x , z) }
; from = Ξ» { (y , x , z) β (x , y , z) }
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
impl-iso : β {i j}{X : Set i}{Y : X β Set j}
β ((x : X) β Y x) β
({x : X} β Y x)
impl-iso = record
{ to = Ξ» f β f _
; from = Ξ» f _ β f
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
Ξ£-assoc-iso : β {i j k}
{X : Set i}{Y : X β Set j}
{Z : (x : X) β Y x β Set k}
β Ξ£ (Ξ£ X Y) (Ξ» {(x , y) β Z x y})
β
Ξ£ X Ξ» x β Ξ£ (Y x) (Z x)
Ξ£-assoc-iso = record
{ to = Ξ» {((x , y) , z) β (x , y , z) }
; from = Ξ» {(x , y , z) β ((x , y) , z) }
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
β-Ξ£-iso : β {i}(X : Fin 2 β Set i)
β (X zero β X (suc zero))
β
Ξ£ (Fin 2) X
β-Ξ£-iso X = record
{ to = Ξ» { (injβ x) β zero , x
; (injβ x) β suc zero , x }
; from = Ξ» { (zero , x) β injβ x
; (suc zero , x) β injβ x
; (suc (suc ()) , _) }
; isoβ = Ξ» { (injβ x) β refl
; (injβ x) β refl }
; isoβ = Ξ» { (zero , x) β refl
; (suc zero , x) β refl
; (suc (suc ()) , _) } }
β-ap-iso : β {i j i' j'}
β {X : Set i}{X' : Set i'}
β {Y : Set j}{Y' : Set j'}
β X β
X'
β Y β
Y'
β (X β Y) β
(X' β Y')
β-ap-iso (iso f g Ξ± Ξ²) (iso f' g' Ξ±' Ξ²') = record
{ to = Ξ» { (injβ x) β injβ (f x) ; (injβ y) β injβ (f' y) }
; from = Ξ» { (injβ x) β injβ (g x) ; (injβ y) β injβ (g' y) }
; isoβ = Ξ» { (injβ x) β ap injβ (Ξ± x) ; (injβ y) β ap injβ (Ξ±' y) }
; isoβ = Ξ» { (injβ x) β ap injβ (Ξ² x) ; (injβ y) β ap injβ (Ξ²' y) } }
β-assoc-iso : β {i j k}
β {X : Set i}{Y : Set j}{Z : Set k}
β ((X β Y) β Z)
β
(X β (Y β Z))
β-assoc-iso = record
{ to = Ξ» { (injβ (injβ x)) β injβ x
; (injβ (injβ y)) β injβ (injβ y)
; (injβ z) β injβ (injβ z) }
; from = Ξ» { (injβ x) β injβ (injβ x)
; (injβ (injβ y)) β injβ (injβ y)
; (injβ (injβ z)) β injβ z }
; isoβ = Ξ» { (injβ (injβ x)) β refl
; (injβ (injβ y)) β refl
; (injβ z) β refl }
; isoβ = Ξ» { (injβ x) β refl
; (injβ (injβ y)) β refl
; (injβ (injβ z)) β refl } }
βΓ-distr-iso : β {i j k}
β {X : Set i}{Y : Set j}{Z : Set k}
β ((X β Y) Γ Z)
β
((X Γ Z) β (Y Γ Z))
βΓ-distr-iso = record
{ to = Ξ» { (injβ x , z) β injβ (x , z)
; (injβ y , z) β injβ (y , z) }
; from = Ξ» { (injβ (x , z)) β injβ x , z
; (injβ (y , z)) β injβ y , z }
; isoβ = Ξ» { (injβ x , z) β refl
; (injβ y , z) β refl }
; isoβ = Ξ» { (injβ (x , z)) β refl
; (injβ (y , z)) β refl } }
β-universal : β {i j k}{X : Set i}{Y : Set j}
β {Z : X β Y β Set k}
β ((u : X β Y) β Z u)
β
(((x : X) β Z (injβ x)) Γ ((y : Y) β Z (injβ y)))
β-universal = record
{ to = Ξ» f β (f β' injβ , f β' injβ)
; from = Ξ» { (gβ , gβ) (injβ x) β gβ x
; (gβ , gβ) (injβ y) β gβ y }
; isoβ = Ξ» f β funext Ξ»
{ (injβ x) β refl
; (injβ x) β refl }
; isoβ = Ξ» { (gβ , gβ) β refl } }
Γ-left-unit : β {i}{X : Set i} β (β€ Γ X) β
X
Γ-left-unit = record
{ to = Ξ» {(tt , x) β x }
; from = Ξ» x β tt , x
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
Γ-right-unit : β {i}{X : Set i} β (X Γ β€) β
X
Γ-right-unit = record
{ to = Ξ» {(x , tt) β x }
; from = Ξ» x β x , tt
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
contr-β€-iso : β {i}{X : Set i}
β contr X β X β
β€
contr-β€-iso hX = record
{ to = Ξ» x β tt
; from = Ξ» { tt β projβ hX }
; isoβ = Ξ» x β projβ hX x
; isoβ = Ξ» { tt β refl } }
empty-β₯-iso : β {i}{X : Set i}
β (X β β₯) β X β
β₯
empty-β₯-iso u = record
{ to = u
; from = β₯-elim
; isoβ = Ξ» x β β₯-elim (u x)
; isoβ = Ξ» () }
Γ-comm : β {i j}{X : Set i}{Y : Set j}
β (X Γ Y) β
(Y Γ X)
Γ-comm = record
{ to = Ξ» {(x , y) β (y , x)}
; from = Ξ» {(y , x) β (x , y)}
; isoβ = Ξ» _ β refl
; isoβ = Ξ» _ β refl }
Ξ -left-unit : β {i}{X : Set i}
β (β€ β X) β
X
Ξ -left-unit = record
{ to = Ξ» f β f tt
; from = Ξ» x _ β x
; isoβ = Ξ» _ β refl
; isoβ = Ξ» f β refl }
-- rewriting lemmas for equations on equalities
symβ‘-iso : β {i}{X : Set i}(x y : X)
β (x β‘ y)
β
(y β‘ x)
symβ‘-iso _ _ = iso sym sym double-inverse double-inverse
transβ‘-iso : β {i}{X : Set i}{x y z : X}
β (x β‘ y)
β (y β‘ z) β
(x β‘ z)
transβ‘-iso p = record
{ to = Ξ» q β p Β· q
; from = Ξ» q β sym p Β· q
; isoβ = Ξ» q β sym (associativity (sym p) p q)
Β· ap (Ξ» z β z Β· q) (right-inverse p)
; isoβ = Ξ» q β sym (associativity p (sym p) q)
Β· ap (Ξ» z β z Β· q) (left-inverse p) }
transβ‘-iso' : β {i}{X : Set i}{x y z : X}
β (y β‘ z)
β (x β‘ y) β
(x β‘ z)
transβ‘-iso' q = record
{ to = Ξ» p β p Β· q
; from = Ξ» p β p Β· sym q
; isoβ = Ξ» p β associativity p q (sym q)
Β· ap (_Β·_ p) (left-inverse q)
Β· left-unit p
; isoβ = Ξ» p β associativity p (sym q) q
Β· (ap (_Β·_ p) (right-inverse q)
Β· left-unit p) }
move-β‘-iso : β {i}{X : Set i}{x y z : X}
β (p : x β‘ y)(q : y β‘ z)(r : x β‘ z)
β (p Β· q β‘ r)
β
(sym p Β· r β‘ q)
move-β‘-iso refl = symβ‘-iso
J-iso : β {i j}{X : Set i}{x : X}
β {P : (y : X) β x β‘ y β Set j}
β P x refl
β
((y : X)(p : x β‘ y) β P y p)
J-iso {X = X}{x}{P} = record
{ to = J' P
; from = Ξ» u β u x refl
; isoβ = Ξ» _ β refl
; isoβ = Ξ» u β funext Ξ» y β funext Ξ» p β Ξ² u y p }
where
Ξ² : (u : (y : X)(p : x β‘ y) β P y p)
β (y : X)(p : x β‘ y)
β J' P (u x refl) y p β‘ u y p
Ξ² u .x refl = refl
|
#!/usr/bin/env python
# vim: ts=4 sw=4 et
__author__ = "Tamas Gal"
from datetime import datetime
from collections import deque, defaultdict
from functools import partial
from io import StringIO
import os
import shutil
import time
import threading
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.colors import LogNorm
from matplotlib import pylab
import pandas as pd
import numpy as np
from km3pipe import Pipeline, Module
from km3pipe.hardware import Detector
from km3pipe.io import CHPump
from km3pipe.io.daq import DAQPreamble, DAQSummaryslice, DAQEvent
from km3pipe.time import tai_timestamp
import km3pipe.style
km3pipe.style.use('km3pipe')
PLOTS_PATH = '/home/km3net/monitoring/www/plots'
N_DOMS = 18
N_DUS = 2
detector = Detector(det_id=14)
xfmt = md.DateFormatter('%Y-%m-%d %H:%M')
lock = threading.Lock()
class DOMHits(Module):
def configure(self):
self.run = True
self.max_events = 1000
self.hits = deque(maxlen=1000)
self.triggered_hits = deque(maxlen=1000)
self.thread = threading.Thread(target=self.plot).start()
def process(self, blob):
tag = str(blob['CHPrefix'].tag)
if not tag == 'IO_EVT':
return blob
data = blob['CHData']
data_io = StringIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
event = DAQEvent(file_obj=data_io)
with lock:
hits = np.zeros(N_DOMS * N_DUS)
for dom_id, _, _, _ in event.snapshot_hits:
du, floor, _ = detector.doms[dom_id]
hits[(du - 1) * N_DOMS + floor - 1] += 1
self.hits.append(hits)
triggered_hits = np.zeros(N_DOMS * N_DUS)
for dom_id, _, _, _, _ in event.triggered_hits:
du, floor, _ = detector.doms[dom_id]
triggered_hits[(du - 1) * N_DOMS + floor - 1] += 1
self.triggered_hits.append(triggered_hits)
return blob
def plot(self):
while self.run:
with lock:
self.create_plots()
time.sleep(10)
def create_plots(self):
if len(self.hits) > 0:
self.create_plot(self.hits, "Hits on DOMs", 'hits_on_doms')
if len(self.triggered_hits) > 0:
self.create_plot(
self.triggered_hits, "Triggered Hits on DOMs",
'triggered_hits_on_doms'
)
def create_plot(self, hits, title, filename):
fig, ax = plt.subplots(figsize=(16, 8))
ax.grid(True)
ax.set_axisbelow(True)
hit_matrix = np.array([np.array(x) for x in hits]).transpose()
im = ax.matshow(
hit_matrix,
interpolation='nearest',
filternorm=None,
cmap='plasma',
aspect='auto',
origin='lower',
zorder=3,
norm=LogNorm(vmin=1, vmax=np.amax(hit_matrix))
)
yticks = np.arange(N_DOMS * N_DUS)
ytick_labels = [
"DU{0:0.0f}-DOM{1:02d}".format(
np.ceil((y + 1) / N_DOMS), y % (N_DOMS) + 1
) for y in yticks
]
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
ax.tick_params(labelbottom=False)
ax.tick_params(labeltop=False)
ax.set_xlabel("event (latest on the right)")
ax.set_title(
"{0} - via the last {1} Events\n{2}".format(
title, self.max_events, time.strftime("%c")
)
)
cb = fig.colorbar(im, pad=0.05)
cb.set_label("number of hits")
fig.tight_layout()
f = os.path.join(PLOTS_PATH, filename + '.png')
f_tmp = os.path.join(PLOTS_PATH, filename + '_tmp.png')
plt.savefig(f_tmp, dpi=120, bbox_inches="tight")
plt.close('all')
shutil.move(f_tmp, f)
def finish(self):
self.run = False
if self.thread is not None:
self.thread.stop()
class TriggerRate(Module):
def configure(self):
self.run = True
self.event_times = deque(maxlen=4000)
self.trigger_rates = deque(maxlen=4000)
self.thread = threading.Thread(target=self.plot).start()
self.store = pd.HDFStore('data/trigger_rates.h5')
self.restore_data()
def restore_data(self):
with lock:
data = zip(
self.store.trigger_rates.timestamp,
self.store.trigger_rates.rate
)
self.trigger_rates.extend(data)
print("{0} data points restored.".format(len(self.trigger_rates)))
def process(self, blob):
tag = str(blob['CHPrefix'].tag)
if not tag == 'IO_EVT':
return blob
data = blob['CHData']
data_io = StringIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
event = DAQEvent(file_obj=data_io)
timestamp = event.header.time_stamp
with lock:
self.event_times.append(timestamp)
return blob
def plot(self):
while self.run:
print("Obtaining lock")
with lock:
self.create_plot()
print("Releasing lock")
time.sleep(10)
def create_plot(self):
print(self.__class__.__name__ + ": updating plot.")
timestamp = time.time()
now = datetime.fromtimestamp(timestamp)
interval = 60
n_events = sum(t > timestamp - interval for t in self.event_times)
rate = n_events / 60
self.trigger_rates.append((now, rate))
self.store.append(
'trigger_rates', pd.DataFrame({
'timestamp': [now],
'rate': [rate]
})
)
print(
"Number of rates recorded: {0} (last: {1}".format(
len(self.trigger_rates), self.trigger_rates[-1]
)
)
x, y = zip(*self.trigger_rates)
fig, ax = plt.subplots(figsize=(16, 4))
ax.xaxis.set_major_formatter(xfmt)
data = pd.DataFrame({'dates': x, 'rates': y})
data.plot('dates', 'rates', grid=True, ax=ax, legend=False, style='.')
ax.set_title(
"Trigger Rate - via Event Times\n{0}".format(time.strftime("%c"))
)
ax.set_xlabel("time")
ax.set_ylabel("trigger rate [Hz]")
try:
ax.set_yscale('log')
except ValueError:
pass
fig.tight_layout()
filename = os.path.join(PLOTS_PATH, 'trigger_rates.png')
filename_tmp = os.path.join(PLOTS_PATH, 'trigger_rates_tmp.png')
plt.savefig(filename_tmp, dpi=120, bbox_inches="tight")
plt.close('all')
shutil.move(filename_tmp, filename)
def finish(self):
self.run = False
self.store.close()
if self.thread is not None:
self.thread.stop()
class DOMActivityPlotter(Module):
def configure(self):
self.index = 0
self.rates = defaultdict(partial(deque, maxlen=4000))
self.run = True
self.thread = threading.Thread(target=self.plot, args=()).start()
def process(self, blob):
self.index += 1
if self.index % 30:
return blob
tag = str(blob['CHPrefix'].tag)
data = blob['CHData']
if not tag == 'IO_SUM':
return blob
data = blob['CHData']
data_io = StringIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
summaryslice = DAQSummaryslice(file_obj=data_io)
timestamp = summaryslice.header.time_stamp
with lock:
for dom_id, rates in summaryslice.summary_frames.iteritems():
du, dom, _ = detector.doms[dom_id]
self.rates[(du, dom)].append((timestamp, sum(rates)))
return blob
def plot(self):
while self.run:
with lock:
self.create_plot()
time.sleep(5)
def create_plot(self):
x, y, _ = zip(*detector.doms.values())
fig, ax = plt.subplots(figsize=(10, 6))
cmap = plt.get_cmap('RdYlGn_r')
cmap.set_over('deeppink', 1.0)
cmap.set_under('deepskyblue', 1.0)
vmax = 15 * 60
scatter_args = {
'edgecolors': 'None',
's': 100,
'vmin': 0.0,
'vmax': vmax,
}
sc_inactive = ax.scatter(
x, y, c='lightgray', label='inactive', **scatter_args
)
now = tai_timestamp()
try:
xa, ya = map(np.array, zip(*self.rates.keys()))
ts = np.array([now - max(zip(*d)[0]) for d in self.rates.values()])
except ValueError:
print("Not enough data.")
pass
else:
active_idx = ts < vmax
sc_active = ax.scatter(
xa[active_idx],
ya[active_idx],
c=ts[active_idx],
cmap=cmap,
**scatter_args
)
ax.scatter(
xa[~active_idx],
ya[~active_idx],
c='deeppink',
label='> {0} s'.format(vmax),
**scatter_args
)
cb = plt.colorbar(sc_active)
cb.set_label("last activity [s]")
ax.set_title(
"DOM Activity - via Summary Slices\n{0}".format(
time.strftime("%c")
)
)
ax.set_xlabel("DU")
ax.set_ylabel("DOM")
ax.set_ylim(-2)
ax.set_yticks(range(1, N_DOMS + 1))
major_locator = pylab.MaxNLocator(integer=True)
sc_inactive.axes.xaxis.set_major_locator(major_locator)
ax.legend(
bbox_to_anchor=(0., -.16, 1., .102),
loc=1,
ncol=2,
mode="expand",
borderaxespad=0.
)
fig.tight_layout()
filename = os.path.join(PLOTS_PATH, 'dom_activity.png')
filename_tmp = os.path.join(PLOTS_PATH, 'dom_activity_tmp.png')
plt.savefig(filename_tmp, dpi=120, bbox_inches="tight")
plt.close('all')
shutil.move(filename_tmp, filename)
def finish(self):
self.run = False
if self.thread is not None:
self.thread.stop()
pipe = Pipeline()
pipe.attach(
CHPump,
host='192.168.0.110',
port=5553,
tags='IO_EVT, IO_SUM',
timeout=60 * 60 * 24 * 7,
max_queue=2000
)
pipe.attach(DOMActivityPlotter)
pipe.attach(TriggerRate)
pipe.attach(DOMHits)
pipe.drain()
|
theory Degeneralization
imports
Acceptance
Sequence_Zip
begin
type_synonym 'a degen = "'a \<times> nat"
definition degen :: "'a pred gen \<Rightarrow> 'a degen pred" where
"degen cs \<equiv> \<lambda> (a, k). k \<ge> length cs \<or> (cs ! k) a"
lemma degen_simps[iff]: "degen cs (a, k) \<longleftrightarrow> k \<ge> length cs \<or> (cs ! k) a" unfolding degen_def by simp
definition count :: "'a pred gen \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> nat" where
"count cs a k \<equiv>
if k < length cs
then if (cs ! k) a then Suc k mod length cs else k
else if cs = [] then k else 0"
lemma count_empty[simp]: "count [] a k = k" unfolding count_def by simp
lemma count_nonempty[simp]: "cs \<noteq> [] \<Longrightarrow> count cs a k < length cs" unfolding count_def by simp
lemma count_constant_1:
assumes "k < length cs"
assumes "\<And> a. a \<in> set w \<Longrightarrow> \<not> (cs ! k) a"
shows "fold (count cs) w k = k"
using assms unfolding count_def by (induct w) (auto)
lemma count_constant_2:
assumes "k < length cs"
assumes "\<And> a. a \<in> set (w || k # scan (count cs) w k) \<Longrightarrow> \<not> degen cs a"
shows "fold (count cs) w k = k"
using assms unfolding count_def by (induct w) (auto)
lemma count_step:
assumes "k < length cs"
assumes "(cs ! k) a"
shows "count cs a k = Suc k mod length cs"
using assms unfolding count_def by simp
lemma degen_skip_condition:
assumes "k < length cs"
assumes "infs (degen cs) (w ||| k ## sscan (count cs) w k)"
obtains u a v
where "w = u @- a ## v" "fold (count cs) u k = k" "(cs ! k) a"
proof -
have 1: "Collect (degen cs) \<inter> sset (w ||| k ## sscan (count cs) w k) \<noteq> {}"
using infs_any assms(2) by auto
obtain ys x zs where 2:
"w ||| k ## sscan (count cs) w k = ys @- x ## zs"
"Collect (degen cs) \<inter> set ys = {}"
"x \<in> Collect (degen cs)"
using split_stream_first 1 by this
define u where "u \<equiv> stake (length ys) w"
define a where "a \<equiv> w !! length ys"
define v where "v \<equiv> sdrop (Suc (length ys)) w"
have "ys = stake (length ys) (w ||| k ## sscan (count cs) w k)" using shift_eq 2(1) by auto
also have "\<dots> = stake (length ys) w || stake (length ys) (k ## sscan (count cs) w k)" by simp
also have "\<dots> = take (length ys) u || take (length ys) (k # scan (count cs) u k)"
unfolding u_def
using append_eq_conv_conj length_stake length_zip stream.sel
using sscan_stake stake.simps(2) stake_Suc stake_szip take_stake
by metis
also have "\<dots> = take (length ys) (u || k # scan (count cs) u k)" using take_zip by rule
also have "\<dots> = u || k # scan (count cs) u k" unfolding u_def by simp
finally have 3: "ys = u || k # scan (count cs) u k" by this
have "x = (w ||| k ## sscan (count cs) w k) !! length ys" unfolding 2(1) by simp
also have "\<dots> = (w !! length ys, (k ## sscan (count cs) w k) !! length ys)" by simp
also have "\<dots> = (a, fold (count cs) u k)" unfolding u_def a_def by simp
finally have 4: "x = (a, fold (count cs) u k)" by this
have 5: "fold (count cs) u k = k" using count_constant_2 assms(1) 2(2) unfolding 3 by blast
show ?thesis
proof
show "w = u @- a ## v" unfolding u_def a_def v_def using id_stake_snth_sdrop by this
show "fold (count cs) u k = k" using 5 by this
show "(cs ! k) a" using assms(1) 2(3) unfolding 4 5 by simp
qed
qed
lemma degen_skip_arbitrary:
assumes "k < length cs" "l < length cs"
assumes "infs (degen cs) (w ||| k ## sscan (count cs) w k)"
obtains u v
where "w = u @- v" "fold (count cs) u k = l"
using assms
proof (induct "nat ((int l - int k) mod length cs)" arbitrary: l thesis)
case (0)
have 1: "length cs > 0" using assms(1) by auto
have 2: "(int l - int k) mod length cs = 0" using 0(1) 1 by (auto intro: antisym)
have 3: "int l mod length cs = int k mod length cs" using mod_eq_dvd_iff 2 by force
have 4: "k = l" using 0(3, 4) 3 by simp
show ?case
proof (rule 0(2))
show "w = [] @- w" by simp
show "fold (count cs) [] k = l" using 4 by simp
qed
next
case (Suc n)
have 1: "length cs > 0" using assms(1) by auto
define l' where "l' = nat ((int l - 1) mod length cs)"
obtain u v where 2: "w = u @- v" "fold (count cs) u k = l'"
proof (rule Suc(1))
have 2: "Suc n < length cs" using nat_less_iff Suc(2) 1 by simp
have "n = nat (int n)" by simp
also have "int n = (int (Suc n) - 1) mod length cs" using 2 by simp
also have "\<dots> = (int l - int k - 1) mod length cs" using Suc(2) by (simp add: mod_simps)
also have "\<dots> = (int l - 1 - int k) mod length cs" by (simp add: algebra_simps)
also have "\<dots> = (int l' - int k) mod length cs" using l'_def 1 by (simp add: mod_simps)
finally show "n = nat ((int l' - int k) mod length cs)" by this
show "k < length cs" using Suc(4) by this
show "l' < length cs" using nat_less_iff l'_def 1 by simp
show "infs (degen cs) (w ||| k ## sscan (count cs) w k)" using Suc(6) by this
qed
have 3: "l' < length cs" using nat_less_iff l'_def 1 by simp
have 4: "v ||| l' ## sscan (count cs) v l' = sdrop (length u) (w ||| k ## sscan (count cs) w k)"
using 2 eq_scons eq_shift
by (metis sdrop.simps(2) sdrop_simps sdrop_szip sscan_scons_snth sscan_sdrop stream.sel(2))
have 5: "infs (degen cs) (v ||| l' ## sscan (count cs) v l')" using Suc(6) unfolding 4 by blast
obtain vu a vv where 6: "v = vu @- a ## vv" "fold (count cs) vu l' = l'" "(cs ! l') a"
using degen_skip_condition 3 5 by this
have "l = nat (int l)" by simp
also have "int l = int l mod length cs" using Suc(5) by simp
also have "\<dots> = int (Suc l') mod length cs" using l'_def 1 by (simp add: mod_simps)
finally have 7: "l = Suc l' mod length cs" using nat_mod_as_int by metis
show ?case
proof (rule Suc(3))
show "w = (u @ vu @ [a]) @- vv" unfolding 2(1) 6(1) by simp
show "fold (count cs) (u @ vu @ [a]) k = l" using 2(2) 3 6(2, 3) 7 count_step by simp
qed
qed
lemma degen_skip_arbitrary_condition:
assumes "l < length cs"
assumes "infs (degen cs) (w ||| k ## sscan (count cs) w k)"
obtains u a v
where "w = u @- a ## v" "fold (count cs) u k = l" "(cs ! l) a"
proof -
have 0: "cs \<noteq> []" using assms(1) by auto
have 1: "count cs (shd w) k < length cs" using 0 by simp
have 2: "infs (degen cs) (stl w ||| count cs (shd w) k ## sscan (count cs) (stl w) (count cs (shd w) k))"
using assms(2) by (metis alw.cases sscan.code stream.sel(2) szip.simps(2))
obtain u v where 3: "stl w = u @- v" "fold (count cs) u (count cs (shd w) k) = l"
using degen_skip_arbitrary 1 assms(1) 2 by this
have 4: "v ||| l ## sscan (count cs) v l =
sdrop (length u) (stl w ||| count cs (shd w) k ## sscan (count cs) (stl w) (count cs (shd w) k))"
using 3 eq_scons eq_shift
by (metis sdrop.simps(2) sdrop_simps sdrop_szip sscan_scons_snth sscan_sdrop stream.sel(2))
have 5: "infs (degen cs) (v ||| l ## sscan (count cs) v l)" using 2 unfolding 4 by blast
obtain vu a vv where 6: "v = vu @- a ## vv" "fold (count cs) vu l = l" "(cs ! l) a"
using degen_skip_condition assms(1) 5 by this
show ?thesis
proof
show "w = (shd w # u @ vu) @- a ## vv" using 3(1) 6(1) by (simp add: eq_scons)
show "fold (count cs) (shd w # u @ vu) k = l" using 3(2) 6(2) by simp
show "(cs ! l) a" using 6(3) by this
qed
qed
lemma gen_degen_step:
assumes "gen infs cs w"
obtains u a v
where "w = u @- a ## v" "degen cs (a, fold (count cs) u k)"
proof (cases "k < length cs")
case True
have 1: "infs (cs ! k) w" using assms True by auto
have 2: "{a. (cs ! k) a} \<inter> sset w \<noteq> {}" using infs_any 1 by auto
obtain u a v where 3: "w = u @- a ## v" "{a. (cs ! k) a} \<inter> set u = {}" "a \<in> {a. (cs ! k) a}"
using split_stream_first 2 by this
have 4: "fold (count cs) u k = k" using count_constant_1 True 3(2) by auto
show ?thesis using 3(1, 3) 4 that by simp
next
case False
show ?thesis
proof
show "w = [] @- shd w ## stl w" by simp
show "degen cs (shd w, fold (count cs) [] k)" using False by simp
qed
qed
lemma degen_infs[iff]: "infs (degen cs) (w ||| k ## sscan (count cs) w k) \<longleftrightarrow> gen infs cs w"
proof
show "gen infs cs w" if "infs (degen cs) (w ||| k ## sscan (count cs) w k)"
proof
fix c
assume 1: "c \<in> set cs"
obtain l where 2: "c = cs ! l" "l < length cs" using in_set_conv_nth 1 by metis
show "infs c w"
using that unfolding 2(1)
proof (coinduction arbitrary: w k rule: infs_set_coinduct)
case (infs_set w k)
obtain u a v where 3: "w = u @- a ## v" "(cs ! l) a"
using degen_skip_arbitrary_condition 2(2) infs_set by this
let ?k = "fold (count cs) u k"
let ?l = "fold (count cs) (u @ [a]) k"
have 4: "a ## v ||| ?k ## sscan (count cs) (a ## v) ?k =
sdrop (length u) (w ||| k ## sscan (count cs) w k)"
using 3(1) eq_shift scons_eq
by (metis sdrop_simps(1) sdrop_stl sdrop_szip sscan_scons_snth sscan_sdrop stream.sel(2))
have 5: "infs (degen cs) (a ## v ||| ?k ## sscan (count cs) (a ## v) ?k)"
using infs_set unfolding 4 by blast
show ?case
proof (intro exI conjI bexI)
show "w = (u @ [a]) @- v" "(cs ! l) a" "a \<in> set (u @ [a])" "v = v" using 3 by auto
show "infs (degen cs) (v ||| ?l ## sscan (count cs) v ?l)" using 5 by simp
qed
qed
qed
show "infs (degen cs) (w ||| k ## sscan (count cs) w k)" if "gen infs cs w"
using that
proof (coinduction arbitrary: w k rule: infs_set_coinduct)
case (infs_set w k)
obtain u a v where 1: "w = u @- a ## v" "degen cs (a, fold (count cs) u k)"
using gen_degen_step infs_set by this
let ?u = "u @ [a] || k # scan (count cs) u k"
let ?l = "fold (count cs) (u @ [a]) k"
show ?case
proof (intro exI conjI bexI)
have "w ||| k ## sscan (count cs) w k =
(u @ [a]) @- v ||| k ## scan (count cs) u k @- ?l ## sscan (count cs) v ?l"
unfolding 1(1) by simp
also have "\<dots> = ?u @- (v ||| ?l ## sscan (count cs) v ?l)"
by (metis length_Cons length_append_singleton scan_length shift.simps(2) szip_shift)
finally show "w ||| k ## sscan (count cs) w k = ?u @- (v ||| ?l ## sscan (count cs) v ?l)" by this
show "degen cs (a, fold (count cs) u k)" using 1(2) by this
have "(a, fold (count cs) u k) = (last (u @ [a]), last (k # scan (count cs) u k))"
unfolding scan_last by simp
also have "\<dots> = last ?u" by (simp add: zip_eq_Nil_iff)
also have "\<dots> \<in> set ?u" by (fastforce intro: last_in_set simp: zip_eq_Nil_iff)
finally show "(a, fold (count cs) u k) \<in> set ?u" by this
show "v ||| ?l ## sscan (count cs) v ?l = v ||| ?l ## sscan (count cs) v ?l" by rule
show "gen infs cs v" using infs_set unfolding 1(1) by auto
qed
qed
qed
end
|
#ifndef qlex_ibor_ois_basis_swap_hpp
#define qlex_ibor_ois_basis_swap_hpp
#include <ql/instruments/swap.hpp>
#include <ql/indexes/iborindex.hpp>
#include <ql/time/daycounter.hpp>
#include <ql/time/schedule.hpp>
#include <boost/optional.hpp>
using namespace QuantLib;
namespace QLExtension {
class QuantLib::IborIndex;
class QuantLib::OvernightIndex;
//! Ibor OIS basis swap: ibor vs compounded overnight rate + spread
class IBOROISBasisSwap : public Swap {
public:
enum Type { Receiver = -1, Payer = 1 };
IBOROISBasisSwap(
Type type,
Real nominal,
const Schedule& floatSchedule,
const boost::shared_ptr<IborIndex>& iborIndex,
const DayCounter& floatingDayCount,
const Schedule& overnightSchedule,
const boost::shared_ptr<OvernightIndex>& overnightIndex,
Spread spread,
const DayCounter& overnightDayCount,
boost::optional<BusinessDayConvention> paymentConvention =
boost::none,
bool arithmeticAveragedCoupon = true);
IBOROISBasisSwap(
Type type,
std::vector<Real> nominals,
const Schedule& floatSchedule,
const boost::shared_ptr<IborIndex>& iborIndex,
const DayCounter& floatingDayCount,
const Schedule& overnightSchedule,
const boost::shared_ptr<OvernightIndex>& overnightIndex,
Spread spread,
const DayCounter& overnightDayCount,
boost::optional<BusinessDayConvention> paymentConvention =
boost::none,
bool arithmeticAveragedCoupon = true);
//! \name Inspectors
//@{
Type type() const { return type_; }
Real nominal() const;
std::vector<Real> nominals() const { return nominals_; }
const Schedule& floatingSchedule() const;
const boost::shared_ptr<IborIndex>& iborIndex() const { return iborIndex_; }
const DayCounter& floatingDayCount() const;
const Schedule& overnightSchedule() { return overnightSchedule_; }
const boost::shared_ptr<OvernightIndex>& overnightIndex() { return overnightIndex_; }
Spread spread() { return spread_; }
BusinessDayConvention paymentConvention() const { return paymentConvention_; }
const Leg& floatingLeg() const { return legs_[0]; }
const Leg& overnightLeg() const { return legs_[1]; }
//@}
//! \name Results
//@{
Real floatingLegBPS() const;
Real floatingLegNPV() const;
Real overnightLegBPS() const;
Real overnightLegNPV() const;
Spread fairSpread() const;
//@}
private:
void initialize();
Type type_;
std::vector<Real> nominals_;
Schedule floatingSchedule_;
boost::shared_ptr<IborIndex> iborIndex_;
DayCounter floatingDayCount_;
Schedule overnightSchedule_;
boost::shared_ptr<OvernightIndex> overnightIndex_;
Spread spread_;
DayCounter overnightDayCount_;
BusinessDayConvention paymentConvention_;
bool arithmeticAveragedCoupon_;
};
// inline
inline Real IBOROISBasisSwap::nominal() const {
QL_REQUIRE(nominals_.size() == 1, "varying nominals");
return nominals_[0];
}
}
#endif
|
source(paste0(dir_script_ed, '/01-prep/prep1/util.r'))
# Read data
df <- get_species_raw_data()
join <- df[, c("idx", "full.name.of.describer", "date", "family")][
order(as.numeric(idx))
]
# Log relevant statistics
write_ending_log(join, filepath_log)
# Format data and write as data.csv
format_data(join, dir_model_folder)
|
||| A DYI version of 'string interpolation', mimicking Python 3's 'f-string' syntax
||| Not as fancy
module Data.String.Interpolation
import Data.String
namespace Data.String.Interpolation.Basic
%inline
public export
F : List String -> String
F strs = concat (strs)
namespace Data.String.Interpolation.Nested
%inline
public export
F : List (List String) -> String
F strss = F (concat strss)
{- Examples:
fstring : String
fstring = let apples = "apples" in
F["I have some ", apples," here."] --- cf. f"I have some {apples} here."
multiline : String
multiline = let name = "Edwin"
profession = "Hacker"
affiliation = "the University of St. Andrews" in --- cf.
F [["Hi ",name,". " ] --- f"Hi {name}. " \
,["You are a ",profession,". "] --- f"You are a {profession}. " \
,["You were in ",affiliation,"."] --- f"You were in {affiliation}."
]
-}
|
cumA <- percentage_percentage_freq(openA$Roslyn)
cumWhoDo <- percentage_percentage_freq(openWhoDo$Roslyn)
s = seq(0, 100, .25)
s
#2
pA = get_cumulative_at_df (cumA, s)
pWhoDo = get_cumulative_at_df (cumA, s)
plot(s, pA, type = 'l', col = 'black', pch = 22, lty = 1, lwd = 3)
lines(s, pWhoDo, type = 'l', col = 'blue', pch = 2,lty = 2, lwd = 3)
plot_percentage_freqency(openA$CoreFX)
plot_line_percentage_freqency(openA$Rust, lty = 2)
pdf("~/Downloads/WorkloadActual.pdf")
plot(seq(1:100), seq(1:100), type = 'l', lwd = 3, lty = 1, ylab = "Cumulative Percentage of Reviews", xlab = "Percentage of Reviewers")
plot_line_percentage_percentage(percentage_percentage_freq(openA$CoreFX), lty = 1)
plot_line_percentage_percentage(percentage_percentage_freq(openA$CoreCLR), lty = 2)
plot_line_percentage_percentage(percentage_percentage_freq(openA$Roslyn), lty = 3)
plot_line_percentage_percentage(percentage_percentage_freq(openA$Rust), lty = 4)
plot_line_percentage_percentage(percentage_percentage_freq(openA$Kubernetes), lty = 5)
abline(v = 20)
abline(h = 80)
abline(h = 20)
legend("bottomright", legend=c("CoreFX", "CoreCLR", "Roslyn", "Rust", "Kubernetes"), lty=c(1:5), lwd = 3, cex=1.2)
dev.off()
get_cumulative_at_df(percentage_percentage_freq(openA$CoreFX), 20);
get_cumulative_at_df(percentage_percentage_freq(openWhoDo$CoreFX), 20);
get_cumulative_at_df(percentage_percentage_freq(openRet$CoreFX), 20);
|
\subsection{atexit -- Exit handlers}
To be done ....
%
|
program extract_multi
* read multi output from bsyn
* BPz 15/07-04
*
implicit none
character*128 filein
logical star
integer ieliel,ntau,maxlam,k,j
real absave(83),absos(150,10000),abso(150,10000),
& absocont(150,10000),tau(150),xi(150),xls
doubleprecision xl1,xl2,del,lambda(10000)
star=.true.
print*,' Input file with multi output from bsyn?'
read(5,10) filein
10 format (A)
open(10,form='unformatted", file=filein)
do while (star)
read(10) bla
if (bla(1:1).ne.'*') then
star=.false.
backspace(10)
endif
enddo
read(10) (ieliel,absave(ieliel),ieliel=1,60),
& (ieliel,absave(ieliel),ieliel=62,83)
read(10) ntau,maxlam,xl1,xl2,del
read(10) xls
read(10) (tau(k),k=1,ntau),(xi(k),k=1,ntau)
read(10) (absocont(k,j),k=1,ntau),j=1,maxlam)
read(10) (abso(k,j),k=1,ntau),j=1,maxlam)
read(10) ((absos(k,j)*ross(k),k=1,ntau),j=1,maxlam)
do j=1,maxlam
lambda(j)=xl1+(j-1)*del
enddo
stop
end
|
type $RR <class {
@e1 i32,
@e2 f32,
@classname "foo",
@e3 f64}>
type $SS <class <$RR> { @accessflag 1234,
&method1(agg)agg,
&method2(void)void}>
func $foo (
var %x <$SS>) i32 {
dassign %x 2 ( constval i32 32 )
return ( dread i32 %x 2 ) }
# EXEC: %irbuild Main.mpl
# EXEC: %irbuild Main.irb.mpl
# EXEC: %cmp Main.irb.mpl Main.irb.irb.mpl
|
using DPC, CairoMakie
include("utils.jl")
files = ["bio_schema.jl", "inhibition.jl", "multineuron_and.jl", "multineuron_or.jl", "place_cells.jl", "probabilistic.jl", "motifs.jl"]
for file in files
let
println("Running $(file) ...")
include(file)
end
end
println("DONE.")
|
/* EPPC.r is not available on Mac OS X */
|
// Copyright (C) 2012-2016 Internet Systems Consortium, Inc. ("ISC")
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <config.h>
#include <boost/shared_ptr.hpp>
#include <exceptions/exceptions.h>
#include <dhcp/dhcp4.h>
#include <dhcp/dhcp6.h>
#include <dhcp/pkt4.h>
#include <dhcp/pkt6.h>
#include "../stats_mgr.h"
#include <gtest/gtest.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/scoped_ptr.hpp>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
using namespace isc::perfdhcp;
namespace {
typedef StatsMgr<dhcp::Pkt4> StatsMgr4;
typedef StatsMgr<dhcp::Pkt6> StatsMgr6;
const uint32_t common_transid = 123;
/// @brief Number of packets to be used for testing packets collecting.
const size_t TEST_COLLECTED_PKT_NUM = 10;
/// @brief DHCPv4 packet with modifiable internal values.
///
/// Currently the only additional modifiable value is a packet
/// timestamp.
class Pkt4Modifiable : public Pkt4 {
public:
/// @brief Constructor.
///
/// @param msg_type DHCPv4 message type.
/// @param transid Transaction id.
Pkt4Modifiable(const uint8_t msg_type, const uint32_t transid)
: Pkt4(msg_type, transid) {
}
/// @brief Modifies packet timestamp.
///
/// @param delta Number of seconds to be added to the current
/// packet time. If this number is negative, the new timestamp
/// will point to earlier time than the original timestamp.
void modifyTimestamp(const long delta) {
timestamp_ += boost::posix_time::seconds(delta);
}
};
/// @brief Pointer to the Pkt4Modifiable.
typedef boost::shared_ptr<Pkt4Modifiable> Pkt4ModifiablePtr;
class StatsMgrTest : public ::testing::Test {
public:
StatsMgrTest() {
}
/// \brief Create DHCPv4 packet.
///
/// Method creates DHCPv4 packet and updates its timestamp.
///
/// \param msg_type DHCPv4 message type.
/// \param transid transaction id for the packet.
/// \return DHCPv4 packet.
Pkt4Modifiable* createPacket4(const uint8_t msg_type,
const uint32_t transid) {
Pkt4Modifiable* pkt = new Pkt4Modifiable(msg_type, transid);
// Packet timestamp is normally updated by interface
// manager on packets reception or send. Unit tests
// do not use interface manager so we need to do it
// ourselves.
pkt->updateTimestamp();
return (pkt);
}
/// \brief Create DHCPv6 packet.
///
/// Method creates DHCPv6 packet and updates its timestamp.
///
/// \param msg_type DHCPv6 message type.
/// \param transid transaction id.
/// \return DHCPv6 packet.
Pkt6* createPacket6(const uint8_t msg_type,
const uint32_t transid) {
Pkt6* pkt = new Pkt6(msg_type, transid);
// Packet timestamp is normally updated by interface
// manager on packets reception or send. Unit tests
// do not use interface manager so we need to do it
// ourselves.
pkt->updateTimestamp();
return pkt;
}
/// \brief Pass multiple DHCPv6 packets to Statistics Manager.
///
/// Method simulates sending or receiving multiple DHCPv6 packets.
///
/// \note The xchg_type parameter is passed as non-const value to avoid
/// false cppcheck errors which expect enum value being passed by reference.
/// This error is not reported when non-const enum is passed by value.
///
/// \param stats_mgr Statistics Manager instance to be used.
/// \param xchg_type packet exchange types.
/// \param packet_type DHCPv6 packet type.
/// \param num_packets packets to be passed to Statistics Manager.
/// \param receive simulated packets are received (if true)
/// or sent (if false)
void passMultiplePackets6(const boost::shared_ptr<StatsMgr6> stats_mgr,
StatsMgr6::ExchangeType xchg_type,
const uint8_t packet_type,
const int num_packets,
const bool receive = false) {
for (int i = 0; i < num_packets; ++i) {
boost::shared_ptr<Pkt6>
packet(createPacket6(packet_type, i));
if (receive) {
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(xchg_type, packet);
);
} else {
ASSERT_NO_THROW(
stats_mgr->passSentPacket(xchg_type, packet)
);
}
}
}
/// \brief Simulate DHCPv4 DISCOVER-OFFER with delay.
///
/// Method simulates DHCPv4 DISCOVER-OFFER exchange. The OFFER packet
/// creation is delayed by the specified number of seconds. This imposes
/// different packet timestamps and affects delay counters in Statistics
/// Manager.
///
/// \param stats_mgr Statistics Manager instance.
/// \param delay delay in seconds between DISCOVER and OFFER packets.
void passDOPacketsWithDelay(const boost::shared_ptr<StatsMgr4> stats_mgr,
unsigned int delay,
uint32_t transid) {
boost::shared_ptr<Pkt4> sent_packet(createPacket4(DHCPDISCOVER,
transid));
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
);
// There is way to differentiate timstamps of two packets other than
// sleep for before we create another packet. Packet is using current
// time to update its timestamp.
// Sleeping for X seconds will guarantee that delay between packets
// will be greater than 1 second. Note that posix time value is
// transformed to double value and it makes it hard to determine
// actual value to expect.
std::cout << "Sleeping for " << delay << "s to test packet delays"
<< std::endl;
sleep(delay);
boost::shared_ptr<Pkt4> rcvd_packet(createPacket4(DHCPOFFER,
transid));
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet);
);
// Calculate period between packets.
boost::posix_time::ptime sent_time = sent_packet->getTimestamp();
boost::posix_time::ptime rcvd_time = rcvd_packet->getTimestamp();
ASSERT_FALSE(sent_time.is_not_a_date_time());
ASSERT_FALSE(rcvd_time.is_not_a_date_time());
}
/// @brief This test verifies that timed out packets are collected.
///
/// @param transid_index Index in the table of transaction ids which
/// points to the transaction id to be selected for the DHCPOFFER.
void testSendReceiveCollected(const size_t transid_index) {
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
// The second parameter indicates that transactions older than
// 2 seconds should be removed and respective packets collected.
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO, 2);
// Transaction ids of packets to be sent. All transaction ids
// belong to the same bucket (match the transid & 1023 hashing
// function).
uint32_t transid[TEST_COLLECTED_PKT_NUM] =
{ 1, 1025, 2049, 3073, 4097, 5121, 6145, 7169, 8193, 9217 };
// Simulate sending a number of packets.
for (unsigned int i = 0; i < TEST_COLLECTED_PKT_NUM; ++i) {
Pkt4ModifiablePtr sent_packet(createPacket4(DHCPDISCOVER,
transid[i]));
// For packets with low indexes we set the timestamps to
// 10s in the past. When DHCPOFFER is processed, the
// packets with timestamps older than 2s should be collected.
if (i < TEST_COLLECTED_PKT_NUM / 2) {
sent_packet->modifyTimestamp(-10);
}
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
) << "failure for transaction id " << transid[i];
}
// Create a server response for one of the packets sent.
Pkt4ModifiablePtr rcvd_packet(createPacket4(DHCPOFFER,
transid[transid_index]));
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet);
);
// There is exactly one case (transaction id) for which perfdhcp
// will find a packet using ordered lookup. In this case, no
// packets will be collected. Otherwise, for any unordered lookup
// all packets from a bucket should be collected.
if (stats_mgr->getUnorderedLookups(StatsMgr4::XCHG_DO) > 0) {
// All packets in the bucket having transaction id
// index below TEST_COLLECTED_PKT_NUM / 2 should be removed.
EXPECT_EQ(TEST_COLLECTED_PKT_NUM / 2,
stats_mgr->getCollectedNum(StatsMgr4::XCHG_DO));
}
// Make sure that we can still use the StatsMgr. It is possible
// that the pointer to 'next sent' packet was invalidated
// during packet removal.
for (unsigned int i = 0; i < TEST_COLLECTED_PKT_NUM; ++i) {
// Increase transaction ids by 1 so as they don't duplicate
// with transaction ids of already sent packets.
Pkt4ModifiablePtr sent_packet(createPacket4(DHCPDISCOVER,
transid[i] + 1));
Pkt4ModifiablePtr rcvd_packet(createPacket4(DHCPOFFER,
transid[i] + 1));
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
) << "failure for transaction id " << transid[i];
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet);
) << "failure for transaction id " << transid[i];
}
// We should have processed TEST_COLLECTED_PKT_NUM but it is possible
// that one of them we couldn't match (orphan packet), because
// the matched packet had to be collected because of the transaction
// timeout. Therefore, we have to count both received packets and
// orphans.
EXPECT_EQ(TEST_COLLECTED_PKT_NUM + 1,
stats_mgr->getRcvdPacketsNum(StatsMgr4::XCHG_DO) +
stats_mgr->getOrphans(StatsMgr4::XCHG_DO));
}
};
TEST_F(StatsMgrTest, Constructor) {
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO);
EXPECT_DOUBLE_EQ(
std::numeric_limits<double>::max(),
stats_mgr->getMinDelay(StatsMgr4::XCHG_DO)
);
EXPECT_DOUBLE_EQ(0, stats_mgr->getMaxDelay(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getOrphans(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getOrderedLookups(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getUnorderedLookups(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getSentPacketsNum(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getRcvdPacketsNum(StatsMgr4::XCHG_DO));
EXPECT_EQ(0, stats_mgr->getCollectedNum(StatsMgr4::XCHG_DO));
EXPECT_THROW(stats_mgr->getAvgDelay(StatsMgr4::XCHG_DO), InvalidOperation);
EXPECT_THROW(stats_mgr->getStdDevDelay(StatsMgr4::XCHG_DO),
InvalidOperation);
EXPECT_THROW(stats_mgr->getAvgUnorderedLookupSetSize(StatsMgr4::XCHG_DO),
InvalidOperation);
}
TEST_F(StatsMgrTest, Exchange) {
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
boost::shared_ptr<Pkt4> sent_packet(createPacket4(DHCPDISCOVER,
common_transid));
boost::shared_ptr<Pkt4> rcvd_packet(createPacket4(DHCPOFFER,
common_transid));
// This is expected to throw because XCHG_DO was not yet
// added to Stats Manager for tracking.
ASSERT_FALSE(stats_mgr->hasExchangeStats(StatsMgr4::XCHG_DO));
ASSERT_FALSE(stats_mgr->hasExchangeStats(StatsMgr4::XCHG_RA));
EXPECT_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet),
BadValue
);
EXPECT_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet),
BadValue
);
// Adding DISCOVER-OFFER exchanges to be tracked by Stats Manager.
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO);
ASSERT_TRUE(stats_mgr->hasExchangeStats(StatsMgr4::XCHG_DO));
ASSERT_FALSE(stats_mgr->hasExchangeStats(StatsMgr4::XCHG_RA));
// The following two attempts are expected to throw because
// invalid exchange types are passed (XCHG_RA instead of XCHG_DO)
EXPECT_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_RA, sent_packet),
BadValue
);
EXPECT_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_RA, rcvd_packet),
BadValue
);
// The following two attempts are expected to run fine because
// right exchange type is specified.
EXPECT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
);
EXPECT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet)
);
}
TEST_F(StatsMgrTest, MultipleExchanges) {
boost::shared_ptr<StatsMgr6> stats_mgr(new StatsMgr6());
stats_mgr->addExchangeStats(StatsMgr6::XCHG_SA);
stats_mgr->addExchangeStats(StatsMgr6::XCHG_RR);
// Simulate sending number of solicit packets.
const int solicit_packets_num = 10;
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_SA, DHCPV6_SOLICIT,
solicit_packets_num);
// Simulate sending number of request packets. It is important that
// number of request packets is different then number of solicit
// packets. We can now check if right number packets went to
// the right exchange type group.
const int request_packets_num = 5;
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_RR, DHCPV6_REQUEST,
request_packets_num);
// Check if all packets are successfully passed to packet lists.
EXPECT_EQ(solicit_packets_num,
stats_mgr->getSentPacketsNum(StatsMgr6::XCHG_SA));
EXPECT_EQ(request_packets_num,
stats_mgr->getSentPacketsNum(StatsMgr6::XCHG_RR));
// Simulate reception of multiple packets for both SOLICIT-ADVERTISE
// and REQUEST-REPLY exchanges. Assume no packet drops.
const bool receive_packets = true;
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_SA, DHCPV6_ADVERTISE,
solicit_packets_num, receive_packets);
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_RR, DHCPV6_REPLY,
request_packets_num, receive_packets);
// Verify that all received packets are counted.
EXPECT_EQ(solicit_packets_num,
stats_mgr->getRcvdPacketsNum(StatsMgr6::XCHG_SA));
EXPECT_EQ(request_packets_num,
stats_mgr->getRcvdPacketsNum(StatsMgr6::XCHG_RR));
}
TEST_F(StatsMgrTest, ExchangeToString) {
// Test DHCPv4 specific exchange names.
EXPECT_EQ("DISCOVER-OFFER",
StatsMgr4::exchangeToString(StatsMgr4::XCHG_DO));
EXPECT_EQ("REQUEST-ACK", StatsMgr4::exchangeToString(StatsMgr4::XCHG_RA));
EXPECT_EQ("REQUEST-ACK (renewal)",
StatsMgr4::exchangeToString(StatsMgr4::XCHG_RNA));
// Test DHCPv6 specific exchange names.
EXPECT_EQ("SOLICIT-ADVERTISE",
StatsMgr6::exchangeToString(StatsMgr6::XCHG_SA));
EXPECT_EQ("REQUEST-REPLY", StatsMgr6::exchangeToString(StatsMgr6::XCHG_RR));
EXPECT_EQ("RENEW-REPLY", StatsMgr6::exchangeToString(StatsMgr6::XCHG_RN));
EXPECT_EQ("RELEASE-REPLY", StatsMgr6::exchangeToString(StatsMgr6::XCHG_RL));
}
TEST_F(StatsMgrTest, SendReceiveSimple) {
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
boost::shared_ptr<Pkt4> sent_packet(createPacket4(DHCPDISCOVER,
common_transid));
boost::shared_ptr<Pkt4> rcvd_packet(createPacket4(DHCPOFFER,
common_transid));
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO);
// The following attempt is expected to pass because the right
// exchange type is used.
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
);
// It is ok, to pass to received packets here. First one will
// be matched with sent packet. The latter one will not be
// matched with sent packet but orphans counter will simply
// increase.
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet)
);
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet)
);
EXPECT_EQ(1, stats_mgr->getOrphans(StatsMgr4::XCHG_DO));
}
TEST_F(StatsMgrTest, SendReceiveUnordered) {
const int packets_num = 10;
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO);
// Transaction ids of 10 packets to be sent and received.
uint32_t transid[packets_num] =
{ 1, 1024, 2, 1025, 3, 1026, 4, 1027, 5, 1028 };
for (int i = 0; i < packets_num; ++i) {
boost::shared_ptr<Pkt4> sent_packet(createPacket4(DHCPDISCOVER,
transid[i]));
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
);
}
// We are simulating that received packets are coming in reverse order:
// 1028, 5, 1027 ....
for (int i = 0; i < packets_num; ++i) {
boost::shared_ptr<Pkt4>
rcvd_packet(createPacket4(DHCPDISCOVER,
transid[packets_num - 1 - i]));
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet);
);
}
// All packets are expected to match (we did not drop any)
EXPECT_EQ(0, stats_mgr->getOrphans(StatsMgr4::XCHG_DO));
// Most of the time we have to do unordered lookups except for the last
// one. Packets are removed from the sent list every time we have a match
// so eventually we come up with the single packet that caching iterator
// is pointing to. This is counted as ordered lookup.
EXPECT_EQ(1, stats_mgr->getOrderedLookups(StatsMgr4::XCHG_DO));
EXPECT_EQ(9, stats_mgr->getUnorderedLookups(StatsMgr4::XCHG_DO));
}
TEST_F(StatsMgrTest, SendReceiveCollected) {
// Check that the packet collection mechanism works fine
// for any packet returned by the server.
for (unsigned int i = 0; i < TEST_COLLECTED_PKT_NUM; ++i) {
testSendReceiveCollected(i);
}
}
TEST_F(StatsMgrTest, Orphans) {
const int packets_num = 6;
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO);
// We skip every second packet to simulate drops.
for (int i = 0; i < packets_num; i += 2) {
boost::shared_ptr<Pkt4> sent_packet(createPacket4(DHCPDISCOVER, i));
ASSERT_NO_THROW(
stats_mgr->passSentPacket(StatsMgr4::XCHG_DO, sent_packet)
);
}
// We pass all received packets.
for (int i = 0; i < packets_num; ++i) {
boost::shared_ptr<Pkt4> rcvd_packet(createPacket4(DHCPOFFER, i));
ASSERT_NO_THROW(
stats_mgr->passRcvdPacket(StatsMgr4::XCHG_DO, rcvd_packet);
);
}
// The half of received packets are expected not to have matching
// sent packet.
EXPECT_EQ(packets_num / 2, stats_mgr->getOrphans(StatsMgr4::XCHG_DO));
}
TEST_F(StatsMgrTest, Delays) {
boost::shared_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
stats_mgr->addExchangeStats(StatsMgr4::XCHG_DO, 5);
// Send DISCOVER, wait 2s and receive OFFER. This will affect
// counters in Stats Manager.
passDOPacketsWithDelay(stats_mgr, 2, common_transid);
// Initially min delay is equal to MAX_DOUBLE. After first packets
// are passed, it is expected to set to actual value.
EXPECT_LT(stats_mgr->getMinDelay(StatsMgr4::XCHG_DO),
std::numeric_limits<double>::max());
EXPECT_GT(stats_mgr->getMinDelay(StatsMgr4::XCHG_DO), 1);
// Max delay is supposed to the same value as minimum
// or maximum delay.
EXPECT_GT(stats_mgr->getMaxDelay(StatsMgr4::XCHG_DO), 1);
// Delay sums are now the same as minimum or maximum delay.
EXPECT_GT(stats_mgr->getAvgDelay(StatsMgr4::XCHG_DO), 1);
// Simulate another DISCOVER-OFFER exchange with delay between
// sent and received packets. Delay is now shorter than earlier
// so standard deviation of delay will now increase.
const unsigned int delay2 = 1;
passDOPacketsWithDelay(stats_mgr, delay2, common_transid + 1);
// Standard deviation is expected to be non-zero.
EXPECT_GT(stats_mgr->getStdDevDelay(StatsMgr4::XCHG_DO), 0);
}
TEST_F(StatsMgrTest, CustomCounters) {
boost::scoped_ptr<StatsMgr4> stats_mgr(new StatsMgr4());
// Specify counter keys and names.
const std::string too_short_key("tooshort");
const std::string too_short_name("Too short packets");
const std::string too_late_key("toolate");
const std::string too_late_name("Packets sent too late");
// Add two custom counters.
stats_mgr->addCustomCounter(too_short_key, too_short_name);
stats_mgr->addCustomCounter(too_late_key, too_late_name);
// Increment one of the counters 10 times.
const uint64_t tooshort_num = 10;
for (uint64_t i = 0; i < tooshort_num; ++i) {
stats_mgr->incrementCounter(too_short_key);
}
// Increment another counter by 5 times.
const uint64_t toolate_num = 5;
for (uint64_t i = 0; i < toolate_num; ++i) {
stats_mgr->incrementCounter(too_late_key);
}
// Check counter's current value and name.
StatsMgr4::CustomCounterPtr tooshort_counter =
stats_mgr->getCounter(too_short_key);
EXPECT_EQ(too_short_name, tooshort_counter->getName());
EXPECT_EQ(tooshort_num, tooshort_counter->getValue());
// Check counter's current value and name.
StatsMgr4::CustomCounterPtr toolate_counter =
stats_mgr->getCounter(too_late_key);
EXPECT_EQ(too_late_name, toolate_counter->getName());
EXPECT_EQ(toolate_num, toolate_counter->getValue());
}
TEST_F(StatsMgrTest, PrintStats) {
std::cout << "This unit test is checking statistics printing "
<< "capabilities. It is expected that some counters "
<< "will be printed during this test. It may also "
<< "cause spurious errors." << std::endl;
boost::shared_ptr<StatsMgr6> stats_mgr(new StatsMgr6());
stats_mgr->addExchangeStats(StatsMgr6::XCHG_SA);
// Simulate sending and receiving one packet. Otherwise printing
// functions will complain about lack of packets.
const int packets_num = 1;
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_SA, DHCPV6_SOLICIT,
packets_num);
passMultiplePackets6(stats_mgr, StatsMgr6::XCHG_SA, DHCPV6_ADVERTISE,
packets_num, true);
// This function will print statistics even if packets are not
// archived because it relies on counters. There is at least one
// exchange needed to count the average delay and std deviation.
EXPECT_NO_THROW(stats_mgr->printStats());
// Printing timestamps is expected to fail because by default we
// disable packets archiving mode. Without packets we can't get
// timestamps.
EXPECT_THROW(stats_mgr->printTimestamps(), isc::InvalidOperation);
// Now, we create another statistics manager instance and enable
// packets archiving mode.
const bool archive_packets = true;
boost::shared_ptr<StatsMgr6> stats_mgr2(new StatsMgr6(archive_packets));
stats_mgr2->addExchangeStats(StatsMgr6::XCHG_SA);
// Timestamps should now get printed because packets have been preserved.
EXPECT_NO_THROW(stats_mgr2->printTimestamps());
}
}
|
(* Author: Norbert Schirmer
Maintainer: Norbert Schirmer, norbert.schirmer at web de
License: LGPL
*)
(* Title: UserGuide.thy
Author: Norbert Schirmer, TU Muenchen
Copyright (C) 2004-2008 Norbert Schirmer
Some rights reserved, TU Muenchen
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
section {* User Guide \label{sec:UserGuide}*}
(*<*)
theory UserGuide
imports HeapList Vcg
"HOL-Statespace.StateSpaceSyntax" "HOL-Library.LaTeXsugar"
begin
(*>*)
(*<*)
syntax
"_statespace_updates" :: "('a \<Rightarrow> 'b) \<Rightarrow> updbinds \<Rightarrow> ('a \<Rightarrow> 'b)" ("_\<langle>_\<rangle>" [900,0] 900)
(*>*)
text {*
We introduce the verification environment with a couple
of examples that illustrate how to use the different
bits and pieces to verify programs.
*}
subsection {* Basics *}
text {*
First of all we have to decide how to represent the state space. There
are currently two implementations. One is based on records the other
one on the concept called `statespace' that was introduced with
Isabelle 2007 (see \texttt{HOL/Statespace}) . In contrast to records a
'satespace' does not define a new type, but provides a notion of state,
based on locales. Logically
the state is modelled as a function from (abstract) names to
(abstract) values and the statespace infrastructure organises
distinctness of names an projection/injection of concrete values into
the abstract one. Towards the user the interface of records and
statespaces is quite similar. However, statespaces offer more
flexibility, inherited from the locale infrastructure, in
particular multiple inheritance and renaming of components.
In this user guide we prefer statespaces, but give some comments on
the usage of records in Section \ref{sec:records}.
*}
hoarestate vars =
A :: nat
I :: nat
M :: nat
N :: nat
R :: nat
S :: nat
text (in vars) {* The command \isacommand{hoarestate} is a simple preprocessor
for the command \isacommand{statespaces} which decorates the state
components with the suffix @{text "_'"}, to avoid cluttering the
namespace. Also note that underscores are printed as hyphens in this
documentation. So what you see as @{term "A_'"} in this document is
actually \texttt{A\_'}. Every component name becomes a fixed variable in
the locale @{text vars} and can no longer be used for logical
variables.
Lookup of a component @{term "A_'"} in a state @{term "s"} is written as
@{term "s\<cdot>A_'"}, and update with a value @{term "term v"} as @{term "s\<langle>A_' := v\<rangle>"}.
To deal with local and global variables in the context of procedures the
program state is organised as a record containing the two componets @{const "locals"}
and @{const "globals"}. The variables defined in hoarestate @{text "vars"} reside
in the @{const "locals"} part.
*}
text {*
Here is a first example.
*}
lemma (in vars) "\<Gamma>\<turnstile> \<lbrace>\<acute>N = 5\<rbrace> \<acute>N :== 2 * \<acute>N \<lbrace>\<acute>N = 10\<rbrace>"
apply vcg
txt {* @{subgoals} *}
apply simp
txt {* @{subgoals} *}
done
text {* We enable the locale of statespace @{text vars} by the
\texttt{in vars} directive. The verification condition generator is
invoked via the @{text vcg} method and leaves us with the expected
subgoal that can be proved by simplification. *}
text (in vars) {*
If we refer to components (variables) of the state-space of the program
we always mark these with @{text "\<acute>"} (in assertions and also in the
program itself). It is the acute-symbol and is present on
most keyboards. The assertions of the Hoare tuple are
ordinary Isabelle sets. As we usually want to refer to the state space
in the assertions, we provide special brackets for them. They can be written
as {\verb+{| |}+} in ASCII or @{text "\<lbrace> \<rbrace>"} with symbols. Internally,
marking variables has two effects. First of all we refer to the implicit
state and secondary we get rid of the suffix @{text "_'"}.
So the assertion @{term "{|\<acute>N = 5|}"} internally gets expanded to
@{text "{s. locals s \<cdot>N_' = 5}"} written in ordinary set comprehension notation of
Isabelle. It describes the set of states where the @{text "N_'"} component
is equal to @{text "5"}.
An empty context and an empty postcondition for abrupt termination can be
omitted. The lemma above is a shorthand for
@{text "\<Gamma>,{}\<turnstile> \<lbrace>\<acute>N = 5\<rbrace> \<acute>N :== 2 * \<acute>N \<lbrace>\<acute>N = 10\<rbrace>,{}"}.
*}
text {* We can step through verification condition generation by the
method @{text vcg_step}.
*}
lemma (in vars) "\<Gamma>,{}\<turnstile> \<lbrace>\<acute>N = 5\<rbrace> \<acute>N :== 2 * \<acute>N \<lbrace>\<acute>N = 10\<rbrace>"
apply vcg_step
txt {* @{subgoals} *}
txt {* The last step of verification condition generation,
transforms the inclusion of state sets to the corresponding
predicate on components of the state space.
*}
apply vcg_step
txt {* @{subgoals} *}
by simp
text {*
Although our assertions work semantically on the state space, stepping
through verification condition generation ``feels'' like the expected
syntactic substitutions of traditional Hoare logic. This is achieved
by light simplification on the assertions calculated by the Hoare rules.
*}
lemma (in vars) "\<Gamma>\<turnstile> \<lbrace>\<acute>N = 5\<rbrace> \<acute>N :== 2 * \<acute>N \<lbrace>\<acute>N = 10\<rbrace>"
apply (rule HoarePartial.Basic)
txt {* @{subgoals} *}
apply (simp only: mem_Collect_eq)
txt {* @{subgoals} *}
apply (tactic
{* Hoare.BasicSimpTac @{context} Hoare.Function false
[] (K all_tac) 1*})
txt {* @{subgoals} *}
by simp
text {* The next example shows how we deal with the while loop. Note the
invariant annotation.
*}
lemma (in vars)
"\<Gamma>,{}\<turnstile> \<lbrace>\<acute>M = 0 \<and> \<acute>S = 0\<rbrace>
WHILE \<acute>M \<noteq> a
INV \<lbrace>\<acute>S = \<acute>M * b\<rbrace>
DO \<acute>S :== \<acute>S + b;; \<acute>M :== \<acute>M + 1 OD
\<lbrace>\<acute>S = a * b\<rbrace>"
apply vcg
txt {* @{subgoals [display]} *}
txt {* The verification condition generator gives us three proof obligations,
stemming from the path from the precondition to the invariant,
from the invariant together with loop condition through the
loop body to the invariant, and finally from the invariant together
with the negated loop condition to the postcondition.*}
apply auto
done
subsection {* Procedures *}
subsubsection {* Declaration *}
text {*
Our first procedure is a simple square procedure. We provide the
command \isacommand{procedures}, to declare and define a
procedure.
*}
procedures
Square (N::nat|R::nat)
where I::nat in
"\<acute>R :== \<acute>N * \<acute>N"
text {* A procedure is given by the signature of the procedure
followed by the procedure body. The signature consists of the name of
the procedure and a list of parameters together with their types. The
parameters in front of the pipe @{text "|"} are value parameters and
behind the pipe are the result parameters. Value parameters model call
by value semantics. The value of a result parameter at the end of the
procedure is passed back to the caller. Local variables follow the
@{text "where"}. If there are no local variables the @{text "where \<dots>
in"} can be omitted. The variable @{term "I"} is actually unused in
the body, but is used in the examples below. *}
text {*
The procedures command provides convenient syntax
for procedure calls (that creates the proper @{term init}, @{term return} and
@{term result} functions on the fly) and creates locales and statespaces to
reason about the procedure. The purpose of locales is to set up logical contexts
to support modular reasoning. Locales can be seen as freeze-dried proof contexts that
get alive as you setup a new lemma or theorem (\cite{Ballarin-04-locales}).
The locale the user deals with is named @{text "Square_impl"}.
It defines the procedure name (internally @{term "Square_'proc"}), the procedure body
(named @{text "Square_body"}) and the statespaces for parameters and local and
global variables.
Moreover it contains the
assumption @{term "\<Gamma> Square_'proc = Some Square_body"}, which states
that the procedure is properly defined in the procedure context.
The purpose of the locale is to give us easy means to setup the context
in which we prove programs correct.
In this locale the procedure context @{term "\<Gamma>"} is fixed.
So we always use this letter for the procedure
specification. This is crucial, if we prove programs under the
assumption of some procedure specifications.
*}
(*<*)
context Square_impl
begin
(*>*)
text {* The procedures command generates syntax, so that we can
either write @{text "CALL Square(\<acute>I,\<acute>R)"} or @{term "\<acute>I :== CALL
Square(\<acute>R)"} for the procedure call. The internal term is the
following:
*}
(*<*) declare [[hoare_use_call_tr' = false]] (*>*)
text {* \small @{term [display] "CALL Square(\<acute>I,\<acute>R)"} *}
(*<*) declare [[hoare_use_call_tr' = true]] (*>*)
text {* Note the
additional decoration (with the procedure name) of the parameter and
local variable names.*}
(*<*)
end
(*>*)
text {* The abstract syntax for the
procedure call is @{term "call init p return result"}. The @{term
"init"} function copies the values of the actual parameters to the
formal parameters, the @{term return} function copies the global
variables back (in our case there are no global variables), and the
@{term "result"} function additionally copies the values of the formal
result parameters to the actual locations. Actual value parameters can
be all kind of expressions, since we only need their value. But result
parameters must be proper ``lvalues'': variables (including
dereferenced pointers) or array locations, since we have to assign
values to them.
*}
subsubsection {* Verification *}
text (in Square_impl) {*
A procedure specification is an ordinary Hoare tuple.
We use the parameterless
call for the specification; @{text "\<acute>R :== PROC Square(\<acute>N)"} is syntactic sugar
for @{text "Call Square_'proc"}. This emphasises that the specification
describes the internal behaviour of the procedure, whereas parameter passing
corresponds to the procedure call.
The following precondition fixes the current value @{text "\<acute>N"} to the logical
variable @{term n}.
Universal quantification of @{term "n"} enables us to adapt
the specification to an actual parameter. The specification is
used in the rule for procedure call when we come upon a call to @{term Square}.
Thus @{term "n"} plays the role of the auxiliary variable @{term "Z"}.
*}
text {* To verify the procedure we need to verify the body. We use
a derived variant of the general recursion rule, tailored for non recursive procedures:
@{thm [source] HoarePartial.ProcNoRec1}:
\begin{center}
@{thm [mode=Rule,mode=ParenStmt] HoarePartial.ProcNoRec1 [no_vars]}
\end{center}
The naming convention for the rule
is the following: The @{text "1"} expresses that we look at one
procedure, and @{text NoRec} that the procedure is non
recursive.
*}
lemma (in Square_impl)
shows "\<forall>n. \<Gamma>\<turnstile>\<lbrace>\<acute>N = n\<rbrace> \<acute>R :== PROC Square(\<acute>N) \<lbrace>\<acute>R = n * n\<rbrace>"
txt {* The directive @{text "in"} has the effect that
the context of the locale @{term "Square_impl"} is included to the current
lemma, and that the lemma is added as a fact to the locale, after it is proven. The
next time locale @{term "Square_impl"} is invoked this lemma is immediately available
as fact, which the verification condition generator can use.
*}
apply (hoare_rule HoarePartial.ProcNoRec1)
txt "@{subgoals[display]}"
txt {* The method @{text "hoare_rule"}, like @{text "rule"} applies a
single rule, but additionally does some ``obvious'' steps:
It solves the canonical side-conditions of various Hoare-rules and it
automatically expands the
procedure body: With @{thm [source] Square_impl}: @{thm [names_short] Square_impl [no_vars]} we
get the procedure body out of the procedure context @{term "\<Gamma>"};
with @{thm [source] Square_body_def}: @{thm [names_short] Square_body_def [no_vars]} we
can unfold the definition of the body.
The proof is finished by the vcg and simp.
*}
txt "@{subgoals[display]}"
by vcg simp
text {* If the procedure is non recursive and there is no specification given, the
verification condition generator automatically expands the body.*}
lemma (in Square_impl) Square_spec:
shows "\<forall>n. \<Gamma>\<turnstile>\<lbrace>\<acute>N = n\<rbrace> \<acute>R :== PROC Square(\<acute>N) \<lbrace>\<acute>R = n * n\<rbrace>"
by vcg simp
text {* An important naming convention is to name the specification as
@{text "<procedure-name>_spec"}. The verification condition generator refers to
this name in order to search for a specification in the theorem database.
*}
subsubsection {* Usage *}
text{* Let us see how we can use procedure specifications. *}
(* FIXME: maybe don't show this at all *)
lemma (in Square_impl)
shows "\<Gamma>\<turnstile>\<lbrace>\<acute>I = 2\<rbrace> \<acute>R :== CALL Square(\<acute>I) \<lbrace>\<acute>R = 4\<rbrace>"
txt {* Remember that we have already proven @{thm [source] "Square_spec"} in the locale
@{text "Square_impl"}. This is crucial for
verification condition generation. When reaching a procedure call,
it looks for the specification (by its name) and applies the
rule @{thm [source,mode=ParenStmt] HoarePartial.ProcSpec}
instantiated with the specification
(as last premise).
Before we apply the verification condition generator, let us
take some time to think of what we can expect.
Let's look at the specification @{thm [source] Square_spec} again:
@{thm [display] Square_spec [no_vars]}
The specification talks about the formal parameters @{term "N"} and
@{term R}. The precondition @{term "\<lbrace>\<acute>N = n\<rbrace>"} just fixes the initial
value of @{text N}.
The actual parameters are @{term "I"} and @{term "R"}. We
have to adapt the specification to this calling context.
@{term "\<forall>n. \<Gamma>\<turnstile> \<lbrace>\<acute>I = n\<rbrace> \<acute>R :== CALL Square(\<acute>I) \<lbrace>\<acute>R = n * n\<rbrace>"}.
From the postcondition @{term "\<lbrace>\<acute>R = n * n\<rbrace>"} we
have to derive the actual postcondition @{term "\<lbrace>\<acute>R = 4\<rbrace>"}. So
we gain something like: @{term "\<lbrace>n * n = (4::nat)\<rbrace>"}.
The precondition is @{term "\<lbrace>\<acute>I = 2\<rbrace>"} and the specification
tells us @{term "\<lbrace>\<acute>I = n\<rbrace>"} for the pre-state. So the value of @{term n}
is the value of @{term I} in the pre-state. So we arrive at
@{term "\<lbrace>\<acute>I = 2\<rbrace> \<subseteq> \<lbrace>\<acute>I * \<acute>I = 4\<rbrace>"}.
*}
apply vcg_step
txt "@{subgoals[display]}"
txt {*
The second set looks slightly more involved:
@{term "\<lbrace>\<forall>t. \<^bsup>t\<^esup>R = \<acute>I * \<acute>I \<longrightarrow> \<acute>I * \<acute>I = 4\<rbrace>"}, this is an artefact from the
procedure call rule. Originally @{text "\<acute>I * \<acute>I = 4"} was @{text "\<^bsup>t\<^esup>R = 4"}. Where
@{term "t"} denotes the final state of the procedure and the superscript notation
allows to select a component from a particular state.
*}
apply vcg_step
txt "@{subgoals[display]}"
by simp
text {*
The adaption of the procedure specification to the actual calling
context is done due to the @{term init}, @{term return} and @{term result} functions
in the rule @{thm [source] HoarePartial.ProcSpec} (or in the variant
@{thm [source] HoarePartial.ProcSpecNoAbrupt} which already
incorporates the fact that the postcondition for abrupt termination
is the empty set). For the readers interested in the internals,
here a version without vcg.
*}
lemma (in Square_impl)
shows "\<Gamma>\<turnstile>\<lbrace>\<acute>I = 2\<rbrace> \<acute>R :== CALL Square(\<acute>I) \<lbrace>\<acute>R = 4\<rbrace>"
apply (rule HoarePartial.ProcSpecNoAbrupt [OF _ _ Square_spec])
txt "@{subgoals[display]}"
txt {* This is the raw verification condition,
It is interesting to see how the auxiliary variable @{term "Z"} is
actually used. It is unified with @{term n} of the specification and
fixes the state after parameter passing.
*}
apply simp
txt "@{subgoals[display]}"
prefer 2
apply vcg_step
txt "@{subgoals[display]}"
apply (auto intro: ext)
done
subsubsection {* Recursion *}
text {* We want to define a procedure for the factorial. We first
define a HOL function that calculates it, to specify the procedure later on.
*}
primrec fac:: "nat \<Rightarrow> nat"
where
"fac 0 = 1" |
"fac (Suc n) = (Suc n) * fac n"
(*<*)
lemma fac_simp [simp]: "0 < i \<Longrightarrow> fac i = i * fac (i - 1)"
by (cases i) simp_all
(*>*)
text {* Now we define the procedure. *}
procedures
Fac (N::nat | R::nat)
"IF \<acute>N = 0 THEN \<acute>R :== 1
ELSE \<acute>R :== CALL Fac(\<acute>N - 1);;
\<acute>R :== \<acute>N * \<acute>R
FI"
text {*
Now let us prove that our implementation of @{term "Fac"} meets its specification.
*}
lemma (in Fac_impl)
shows "\<forall>n. \<Gamma>\<turnstile> \<lbrace>\<acute>N = n\<rbrace> \<acute>R :== PROC Fac(\<acute>N) \<lbrace>\<acute>R = fac n\<rbrace>"
apply (hoare_rule HoarePartial.ProcRec1)
txt "@{subgoals[display]}"
apply vcg
txt "@{subgoals[display]}"
apply simp
done
text {*
Since the factorial is implemented recursively,
the main ingredient of this proof is, to assume that the specification holds for
the recursive call of @{term Fac} and prove the body correct.
The assumption for recursive calls is added to the context by
the rule @{thm [source] HoarePartial.ProcRec1}
(also derived from the general rule for mutually recursive procedures):
\begin{center}
@{thm [mode=Rule,mode=ParenStmt] HoarePartial.ProcRec1 [no_vars]}
\end{center}
The verification condition generator infers the specification out of the
context @{term "\<Theta>"} when it encounters a recursive call of the factorial.
*}
subsection {* Global Variables and Heap \label{sec:VcgHeap}*}
text {*
Now we define and verify some procedures on heap-lists. We consider
list structures consisting of two fields, a content element @{term "cont"} and
a reference to the next list element @{term "next"}. We model this by the
following state space where every field has its own heap.
*}
hoarestate globals_heap =
"next" :: "ref \<Rightarrow> ref"
cont :: "ref \<Rightarrow> nat"
text {* It is mandatory to start the state name with `globals'. This is exploited
by the syntax translations to store the components in the @{const globals} part
of the state.
*}
text {* Updates to global components inside a procedure are
always propagated to the caller. This is implicitly done by the
parameter passing syntax translations.
*}
text {* We first define an append function on lists. It takes two
references as parameters. It appends the list referred to by the first
parameter with the list referred to by the second parameter. The statespace
of the global variables has to be imported.
*}
procedures (imports globals_heap)
append(p :: ref, q::ref | p::ref)
"IF \<acute>p=Null THEN \<acute>p :== \<acute>q
ELSE \<acute>p\<rightarrow>\<acute>next :== CALL append(\<acute>p\<rightarrow>\<acute>next,\<acute>q) FI"
(*<*)
context append_impl
begin
(*>*)
text {*
The difference of a global and a local variable is that global
variables are automatically copied back to the procedure caller.
We can study this effect on the translation of @{term "\<acute>p :== CALL append(\<acute>p,\<acute>q)"}:
*}
(*<*)
declare [[hoare_use_call_tr' = false]]
(*>*)
text {*
@{term [display] "\<acute>p :== CALL append(\<acute>p,\<acute>q)"}
*}
(*<*)
declare [[hoare_use_call_tr' = true]]
end
(*>*)
text {* Below we give two specifications this time.
One captures the functional behaviour and focuses on the
entities that are potentially modified by the procedure, the second one
is a pure frame condition.
*}
text {*
The functional specification below introduces two logical variables besides the
state space variable @{term "\<sigma>"}, namely @{term "Ps"} and @{term "Qs"}.
They are universally quantified and range over both the pre-and the postcondition, so
that we are able to properly instantiate the specification
during the proofs. The syntax @{text "\<lbrace>\<sigma>. \<dots>\<rbrace>"} is a shorthand to fix the current
state: @{text "{s. \<sigma> = s \<dots>}"}. Moreover @{text "\<^bsup>\<sigma>\<^esup>x"} abbreviates
the lookup of variable @{text "x"} in the state
@{text \<sigma>}.
The approach to specify procedures on lists
basically follows \cite{MehtaN-CADE03}. From the pointer structure
in the heap we (relationally) abstract to HOL lists of references. Then
we can specify further properties on the level of HOL lists, rather then
on the heap. The basic abstractions are:
@{thm [display] Path.simps [no_vars]}
@{term [show_types] "Path x h y ps"}: @{term ps} is a list of references that we can obtain
out of the heap @{term h} by starting with the reference @{term x}, following
the references in @{term h} up to the reference @{term y}.
@{thm [display] List_def [no_vars]}
A list @{term "List p h ps"} is a path starting in @{term p} and ending up
in @{term Null}.
*}
lemma (in append_impl) append_spec1:
shows "\<forall>\<sigma> Ps Qs.
\<Gamma>\<turnstile> \<lbrace>\<sigma>. List \<acute>p \<acute>next Ps \<and> List \<acute>q \<acute>next Qs \<and> set Ps \<inter> set Qs = {}\<rbrace>
\<acute>p :== PROC append(\<acute>p,\<acute>q)
\<lbrace>List \<acute>p \<acute>next (Ps@Qs) \<and> (\<forall>x. x\<notin>set Ps \<longrightarrow> \<acute>next x = \<^bsup>\<sigma>\<^esup>next x)\<rbrace>"
apply (hoare_rule HoarePartial.ProcRec1)
txt {* @{subgoals [margin=80,display]}
Note that @{term "hoare_rule"} takes care of multiple auxiliary variables!
@{thm [source] HoarePartial.ProcRec1} has only one auxiliary variable, namely @{term Z}.
But the type of @{term Z} can be instantiated arbitrarily. So @{text "hoare_rule"}
instantiates @{term Z} with the tuple @{term "(\<sigma>,Ps,Qs)"} and derives a proper variant
of the rule. Therefore @{text "hoare_rule"} depends on the proper quantification of
auxiliary variables!
*}
apply vcg
txt {* @{subgoals [display]}
For each branch of the @{text IF} statement we have one conjunct to prove. The
@{text THEN} branch starts with @{text "p = Null \<longrightarrow> \<dots>"} and the @{text ELSE} branch
with @{text "p \<noteq> Null \<longrightarrow> \<dots>"}. Let us focus on the @{text ELSE} branch, were the
recursive call to append occurs. First of all we have to prove that the precondition for
the recursive call is fulfilled. That means we have to provide some witnesses for
the lists @{term Psa} and @{term Qsa} which are referenced by @{text "p\<rightarrow>next"} (now
written as @{term "next p"}) and @{term q}. Then we have to show that we can
derive the overall postcondition from the postcondition of the recursive call. The
state components that have changed by the recursive call are the ones with the suffix
@{text a}, like @{text nexta} and @{text pa}.
*}
apply fastforce
done
text {* If the verification condition generator works on a procedure
call it checks whether it can find a modifies clause in the
context. If one is present the procedure call is simplified before the
Hoare rule @{thm [source] HoarePartial.ProcSpec} is
applied. Simplification of the procedure call means that the ``copy
back'' of the global components is simplified. Only those components
that occur in the modifies clause are actually copied back. This
simplification is justified by the rule @{thm [source]
HoarePartial.ProcModifyReturn}.
So after this simplification all global
components that do not appear in the modifies clause are treated
as local variables. *}
text {* We study the effect of the modifies clause on the following
examples, where we want to prove that @{term "append"} does not change
the @{term "cont"} part of the heap.
*}
lemma (in append_impl)
shows "\<Gamma>\<turnstile> \<lbrace>\<acute>cont=c\<rbrace> \<acute>p :== CALL append(Null,Null) \<lbrace>\<acute>cont=c\<rbrace>"
proof -
note append_spec = append_spec1
show ?thesis
apply vcg
txt {* @{subgoals [display]} *}
txt {* Only focus on the very last line: @{term conta} is the heap component
after the procedure call,
and @{term cont} the heap component before the procedure call. Since
we have not added the modified clause we do not know that they have
to be equal.
*}
oops
text {*
We now add the frame condition.
The list in the modifies clause names all global state components that
may be changed by the procedure. Note that we know from the modifies clause
that the @{term cont} parts are not changed. Also a small
side note on the syntax. We use ordinary brackets in the postcondition
of the modifies clause, and also the state components do not carry the
acute, because we explicitly note the state @{term t} here.
*}
lemma (in append_impl) append_modifies:
shows "\<forall>\<sigma>. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {\<sigma>} \<acute>p :== PROC append(\<acute>p,\<acute>q)
{t. t may_only_modify_globals \<sigma> in [next]}"
apply (hoare_rule HoarePartial.ProcRec1)
apply (vcg spec=modifies)
done
text {* We tell the verification condition generator to use only the
modifies clauses and not to search for functional specifications by
the parameter @{text "spec=modifies"}. It also tries to solve the
verification conditions automatically. Again it is crucial to name
the lemma with this naming scheme, since the verfication condition
generator searches for these names.
*}
text {* The modifies clause is equal to a state update specification
of the following form.
*}
lemma (in append_impl) shows "{t. t may_only_modify_globals Z in [next]}
=
{t. \<exists>next. globals t=update id id next_' (K_statefun next) (globals Z)}"
apply (unfold mex_def meq_def)
apply simp
done
text {* Now that we have proven the frame-condition, it is available within
the locale @{text "append_impl"} and the @{text "vcg"} exploits it.*}
lemma (in append_impl)
shows "\<Gamma>\<turnstile> \<lbrace>\<acute>cont=c\<rbrace> \<acute>p :== CALL append(Null,Null) \<lbrace>\<acute>cont=c\<rbrace>"
proof -
note append_spec = append_spec1
show ?thesis
apply vcg
txt {* @{subgoals [display]} *}
txt {* With a modifies clause present we know that no change to @{term cont}
has occurred.
*}
by simp
qed
text {*
Of course we could add the modifies clause to the functional specification as
well. But separating both has the advantage that we split up the verification
work. We can make use of the modifies clause before we apply the
functional specification in a fully automatic fashion.
*}
text {*
To prove that a procedure respects the modifies clause, we only need
the modifies clauses of the procedures called in the body. We do not need
the functional specifications. So we can always prove the modifies
clause without functional specifications, but we may need the modifies
clause to prove the functional specifications. So usually the modifies clause is
proved before the proof of the functional specification, so that it can already be used
by the verification condition generator.
*}
subsection {* Total Correctness *}
text {* When proving total correctness the additional proof burden to
the user is to come up with a well-founded relation and to prove that
certain states get smaller according to this relation. Proving that a
relation is well-founded can be quite hard. But fortunately there are
ways to construct and stick together relations so that they are
well-founded by construction. This infrastructure is already present
in Isabelle/HOL. For example, @{term "measure f"} is always well-founded;
the lexicographic product of two well-founded relations is again
well-founded and the inverse image construction @{term "inv_image"} of
a well-founded relation is again well-founded. The constructions are
best explained by some equations:
@{thm in_measure_iff [no_vars]}\\
@{thm in_lex_iff [no_vars]}\\
@{thm in_inv_image_iff [no_vars]}
Another useful construction is @{text "<*mlex*>"} which is a combination
of a measure and a lexicographic product:
@{thm in_mlex_iff [no_vars]}\\
In contrast to the lexicographic product it does not construct a product type.
The state may either decrease according to the measure function @{term f} or the
measure stays the same and the state decreases because of the relation @{term r}.
Lets look at a loop:
*}
lemma (in vars)
"\<Gamma>\<turnstile>\<^sub>t \<lbrace>\<acute>M = 0 \<and> \<acute>S = 0\<rbrace>
WHILE \<acute>M \<noteq> a
INV \<lbrace>\<acute>S = \<acute>M * b \<and> \<acute>M \<le> a\<rbrace>
VAR MEASURE a - \<acute>M
DO \<acute>S :== \<acute>S + b;; \<acute>M :== \<acute>M + 1 OD
\<lbrace>\<acute>S = a * b\<rbrace>"
apply vcg
txt {* @{subgoals [display]}
The first conjunct of the second subgoal is the proof obligation that the
variant decreases in the loop body.
*}
by auto
text {* The variant annotation is preceded by @{text VAR}. The capital @{text MEASURE}
is a shorthand for @{text "measure (\<lambda>s. a - \<^bsup>s\<^esup>M)"}. Analogous there is a capital
@{text "<*MLEX*>"}.
*}
lemma (in Fac_impl) Fac_spec':
shows "\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} \<acute>R :== PROC Fac(\<acute>N) \<lbrace>\<acute>R = fac \<^bsup>\<sigma>\<^esup>N\<rbrace>"
apply (hoare_rule HoareTotal.ProcRec1 [where r="measure (\<lambda>(s,p). \<^bsup>s\<^esup>N)"])
txt {* In case of the factorial the parameter @{term N} decreases in every call. This
is easily expressed by the measure function. Note that the well-founded relation for
recursive procedures is formally defined on tuples
containing the state space and the procedure name.
*}
txt {* @{subgoals [display]}
The initial call to the factorial is in state @{term "\<sigma>"}. Note that in the
precondition @{term "{\<sigma>} \<inter> {\<sigma>'}"}, @{term "\<sigma>'"} stems from the lemma we want to prove
and @{term "\<sigma>"} stems from the recursion rule for total correctness. Both are
synonym for the initial state. To use the assumption in the Hoare context we
have to show that the call to the factorial is invoked on a smaller @{term N} compared
to the initial @{text "\<^bsup>\<sigma>\<^esup>N"}.
*}
apply vcg
txt {* @{subgoals [display]}
The tribute to termination is that we have to show @{text "N - 1 < N"} in case of
the recursive call.
*}
by simp
lemma (in append_impl) append_spec2:
shows "\<forall>\<sigma> Ps Qs. \<Gamma>\<turnstile>\<^sub>t
\<lbrace>\<sigma>. List \<acute>p \<acute>next Ps \<and> List \<acute>q \<acute>next Qs \<and> set Ps \<inter> set Qs = {}\<rbrace>
\<acute>p :== PROC append(\<acute>p,\<acute>q)
\<lbrace>List \<acute>p \<acute>next (Ps@Qs) \<and> (\<forall>x. x\<notin>set Ps \<longrightarrow> \<acute>next x = \<^bsup>\<sigma>\<^esup>next x)\<rbrace>"
apply (hoare_rule HoareTotal.ProcRec1
[where r="measure (\<lambda>(s,p). length (list \<^bsup>s\<^esup>p \<^bsup>s\<^esup>next))"])
txt {* In case of the append function the length of the list referenced by @{term p}
decreases in every recursive call.
*}
txt {* @{subgoals [margin=80,display]} *}
apply vcg
apply (fastforce simp add: List_list)
done
text {*
In case of the lists above, we have used a relational list abstraction @{term List}
to construct the HOL lists @{term Ps} and @{term Qs} for the pre- and postcondition.
To supply a proper measure function we use a functional abstraction @{term list}.
The functional abstraction can be defined by means of the relational list abstraction,
since the lists are already uniquely determined by the relational abstraction:
@{thm islist_def [no_vars]}\\
@{thm list_def [no_vars]}
\isacommand{lemma} @{thm List_conv_islist_list [no_vars]}
*}
text {*
The next contrived example is taken from \cite{Homeier-95-vcg}, to illustrate
a more complex termination criterion for mutually recursive procedures. The procedures
do not calculate anything useful.
*}
procedures
pedal(N::nat,M::nat)
"IF 0 < \<acute>N THEN
IF 0 < \<acute>M THEN
CALL coast(\<acute>N- 1,\<acute>M- 1) FI;;
CALL pedal(\<acute>N- 1,\<acute>M)
FI"
and
coast(N::nat,M::nat)
"CALL pedal(\<acute>N,\<acute>M);;
IF 0 < \<acute>M THEN CALL coast(\<acute>N,\<acute>M- 1) FI"
text {*
In the recursive calls in procedure @{text pedal} the first argument always decreases.
In the body of @{text coast} in the recursive call of @{text coast} the second
argument decreases, but in the call to @{text pedal} no argument decreases.
Therefore an relation only on the state space is insufficient. We have to
take the procedure names into account, too.
We consider the procedure @{text coast} to be ``bigger'' than @{text pedal}
when we construct a well-founded relation on the product of state space and procedure
names.
*}
ML {* ML_Thms.bind_thm ("HoareTotal_ProcRec2", Hoare.gen_proc_rec @{context} Hoare.Total 2)*}
text {*
We provide the ML function {\tt gen\_proc\_rec} to
automatically derive a convenient rule for recursion for a given number of mutually
recursive procedures.
*}
lemma (in pedal_coast_clique)
shows "(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC pedal(\<acute>N,\<acute>M) UNIV) \<and>
(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC coast(\<acute>N,\<acute>M) UNIV)"
apply (hoare_rule HoareTotal_ProcRec2
[where r= "((\<lambda>(s,p). \<^bsup>s\<^esup>N) <*mlex*>
(\<lambda>(s,p). \<^bsup>s\<^esup>M) <*mlex*>
measure (\<lambda>(s,p). if p = coast_'proc then 1 else 0))"])
txt {* We can directly express the termination condition described above with
the @{text "<*mlex*>"} construction. Either state component @{text N} decreases,
or it stays the same and @{text M} decreases or this also stays the same, but
then the procedure name has to decrease.*}
txt {* @{subgoals [margin=80,display]} *}
apply simp_all
txt {* @{subgoals [margin=75,display]} *}
by (vcg,simp)+
text {* We can achieve the same effect without @{text "<*mlex*>"} by using
the ordinary lexicographic product @{text "<*lex*>"}, @{text "inv_image"} and
@{text "measure"}
*}
lemma (in pedal_coast_clique)
shows "(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC pedal(\<acute>N,\<acute>M) UNIV) \<and>
(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC coast(\<acute>N,\<acute>M) UNIV)"
apply (hoare_rule HoareTotal_ProcRec2
[where r= "inv_image (measure (\<lambda>m. m) <*lex*>
measure (\<lambda>m. m) <*lex*>
measure (\<lambda>p. if p = coast_'proc then 1 else 0))
(\<lambda>(s,p). (\<^bsup>s\<^esup>N,\<^bsup>s\<^esup>M,p))"])
txt {* With the lexicographic product we construct a well-founded relation on
triples of type @{typ "(nat\<times>nat\<times>string)"}. With @{term inv_image} we project
the components out of the state-space and the procedure names to this
triple.
*}
txt {* @{subgoals [margin=75,display]} *}
apply simp_all
by (vcg,simp)+
text {* By doing some arithmetic we can express the termination condition with a single
measure function.
*}
lemma (in pedal_coast_clique)
shows "(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC pedal(\<acute>N,\<acute>M) UNIV) \<and>
(\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t {\<sigma>} PROC coast(\<acute>N,\<acute>M) UNIV)"
apply(hoare_rule HoareTotal_ProcRec2
[where r= "measure (\<lambda>(s,p). \<^bsup>s\<^esup>N + \<^bsup>s\<^esup>M + (if p = coast_'proc then 1 else 0))"])
apply simp_all
txt {* @{subgoals [margin=75,display]} *}
by (vcg,simp,arith?)+
subsection {* Guards *}
text (in vars) {* The purpose of a guard is to guard the {\bf (sub-) expressions} of a
statement against runtime faults. Typical runtime faults are array bound violations,
dereferencing null pointers or arithmetical overflow. Guards make the potential
runtime faults explicit, since the expressions themselves never ``fail'' because
they are ordinary HOL expressions. To relieve the user from typing in lots of standard
guards for every subexpression, we supply some input syntax for the common
language constructs that automatically generate the guards.
For example the guarded assignment @{text "\<acute>M :==\<^sub>g (\<acute>M + 1) div \<acute>N"} gets expanded to
guarded command @{term "\<acute>M :==\<^sub>g (\<acute>M + 1) div \<acute>N"}. Here @{term "in_range"} is
uninterpreted by now.
*}
lemma (in vars) "\<Gamma>\<turnstile>\<lbrace>True\<rbrace> \<acute>M :==\<^sub>g (\<acute>M + 1) div \<acute>N \<lbrace>True\<rbrace>"
apply vcg
txt {* @{subgoals} *}
oops
text {*
The user can supply on (overloaded) definition of @{text "in_range"}
to fit to his needs.
Currently guards are generated for:
\begin{itemize}
\item overflow and underflow of numbers (@{text "in_range"}). For subtraction of
natural numbers @{text "a - b"} the guard @{text "b \<le> a"} is generated instead
of @{text "in_range"} to guard against underflows.
\item division by @{text 0}
\item dereferencing of @{term Null} pointers
\item array bound violations
\end{itemize}
Following (input) variants of guarded statements are available:
\begin{itemize}
\item Assignment: @{text "\<dots> :==\<^sub>g \<dots>"}
\item If: @{text "IF\<^sub>g \<dots>"}
\item While: @{text "WHILE\<^sub>g \<dots>"}
\item Call: @{text "CALL\<^sub>g \<dots>"} or @{text "\<dots> :== CALL\<^sub>g \<dots>"}
\end{itemize}
*}
subsection {* Miscellaneous Techniques *}
subsubsection {* Modifies Clause *}
text {* We look at some issues regarding the modifies clause with the example
of insertion sort for heap lists.
*}
primrec sorted:: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool"
where
"sorted le [] = True" |
"sorted le (x#xs) = ((\<forall>y\<in>set xs. le x y) \<and> sorted le xs)"
procedures (imports globals_heap)
insert(r::ref,p::ref | p::ref)
"IF \<acute>r=Null THEN SKIP
ELSE IF \<acute>p=Null THEN \<acute>p :== \<acute>r;; \<acute>p\<rightarrow>\<acute>next :== Null
ELSE IF \<acute>r\<rightarrow>\<acute>cont \<le> \<acute>p\<rightarrow>\<acute>cont
THEN \<acute>r\<rightarrow>\<acute>next :== \<acute>p;; \<acute>p:==\<acute>r
ELSE \<acute>p\<rightarrow>\<acute>next :== CALL insert(\<acute>r,\<acute>p\<rightarrow>\<acute>next)
FI
FI
FI"
lemma (in insert_impl) insert_modifies:
"\<forall>\<sigma>. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {\<sigma>} \<acute>p :== PROC insert(\<acute>r,\<acute>p)
{t. t may_only_modify_globals \<sigma> in [next]}"
by (hoare_rule HoarePartial.ProcRec1) (vcg spec=modifies)
lemma (in insert_impl) insert_spec:
"\<forall>\<sigma> Ps . \<Gamma>\<turnstile>
\<lbrace>\<sigma>. List \<acute>p \<acute>next Ps \<and> sorted (op \<le>) (map \<acute>cont Ps) \<and>
\<acute>r \<noteq> Null \<and> \<acute>r \<notin> set Ps\<rbrace>
\<acute>p :== PROC insert(\<acute>r,\<acute>p)
\<lbrace>\<exists>Qs. List \<acute>p \<acute>next Qs \<and> sorted (op \<le>) (map \<^bsup>\<sigma>\<^esup>cont Qs) \<and>
set Qs = insert \<^bsup>\<sigma>\<^esup>r (set Ps) \<and>
(\<forall>x. x \<notin> set Qs \<longrightarrow> \<acute>next x = \<^bsup>\<sigma>\<^esup>next x)\<rbrace>"
(*<*)
apply (hoare_rule HoarePartial.ProcRec1)
apply vcg
apply (intro conjI impI)
apply fastforce
apply fastforce
apply fastforce
apply (clarsimp)
apply force
done
(*>*)
text {*
In the postcondition of the functional specification there is a small but
important subtlety. Whenever we talk about the @{term "cont"} part we refer to
the one of the pre-state.
The reason is that we have separated out the information that @{term "cont"} is not
modified by the procedure, to the modifies clause. So whenever we talk about unmodified
parts in the postcondition we have to use the pre-state part, or explicitly
state an equality in the postcondition.
The reason is simple. If the postcondition would talk about @{text "\<acute>cont"}
instead of \mbox{@{text "\<^bsup>\<sigma>\<^esup>cont"}}, we get a new instance of @{text "cont"} during
verification and the postcondition would only state something about this
new instance. But as the verification condition generator uses the
modifies clause the caller of @{term "insert"} instead still has the
old @{text "cont"} after the call. Thats the sense of the modifies clause.
So the caller and the specification simply talk about two different things,
without being able to relate them (unless an explicit equality is added to
the specification).
*}
subsubsection {* Annotations *}
text {*
Annotations (like loop invariants)
are mere syntactic sugar of statements that are used by the @{text "vcg"}.
Logically a statement with an annotation is
equal to the statement without it. Hence annotations can be introduced by the user
while building a proof:
@{thm [source] HoarePartial.annotateI}: @{thm [mode=Rule] HoarePartial.annotateI [no_vars]}
When introducing annotations it can easily happen that these mess around with the
nesting of sequential composition. Then after stripping the annotations the resulting statement
is no longer syntactically identical to original one, only equivalent modulo associativity of sequential composition. The following rule also deals with this case:
@{thm [source] HoarePartial.annotate_normI}: @{thm [mode=Rule] HoarePartial.annotate_normI [no_vars]}
*}
text_raw {* \paragraph{Loop Annotations}
\mbox{}
\medskip
\mbox{}
*}
procedures (imports globals_heap)
insertSort(p::ref| p::ref)
where r::ref q::ref in
"\<acute>r:==Null;;
WHILE (\<acute>p \<noteq> Null) DO
\<acute>q :== \<acute>p;;
\<acute>p :== \<acute>p\<rightarrow>\<acute>next;;
\<acute>r :== CALL insert(\<acute>q,\<acute>r)
OD;;
\<acute>p:==\<acute>r"
lemma (in insertSort_impl) insertSort_modifies:
shows
"\<forall>\<sigma>. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {\<sigma>} \<acute>p :== PROC insertSort(\<acute>p)
{t. t may_only_modify_globals \<sigma> in [next]}"
apply (hoare_rule HoarePartial.ProcRec1)
apply (vcg spec=modifies)
done
text {* Insertion sort is not implemented recursively here, but with a
loop. Note that the while loop is not annotated with an invariant in the
procedure definition. The invariant only comes into play during verification.
Therefore we annotate the loop first, before we run the @{text "vcg"}.
*}
lemma (in insertSort_impl) insertSort_spec:
shows "\<forall>\<sigma> Ps.
\<Gamma>\<turnstile> \<lbrace>\<sigma>. List \<acute>p \<acute>next Ps \<rbrace>
\<acute>p :== PROC insertSort(\<acute>p)
\<lbrace>\<exists>Qs. List \<acute>p \<acute>next Qs \<and> sorted (op \<le>) (map \<^bsup>\<sigma>\<^esup>cont Qs) \<and>
set Qs = set Ps\<rbrace>"
apply (hoare_rule HoarePartial.ProcRec1)
apply (hoare_rule anno=
"\<acute>r :== Null;;
WHILE \<acute>p \<noteq> Null
INV \<lbrace>\<exists>Qs Rs. List \<acute>p \<acute>next Qs \<and> List \<acute>r \<acute>next Rs \<and>
set Qs \<inter> set Rs = {} \<and>
sorted (op \<le>) (map \<acute>cont Rs) \<and> set Qs \<union> set Rs = set Ps \<and>
\<acute>cont = \<^bsup>\<sigma>\<^esup>cont \<rbrace>
DO \<acute>q :== \<acute>p;; \<acute>p :== \<acute>p\<rightarrow>\<acute>next;; \<acute>r :== CALL insert(\<acute>q,\<acute>r) OD;;
\<acute>p :== \<acute>r" in HoarePartial.annotateI)
apply vcg
txt {* @{text "\<dots>"} *}
(*<*)
apply fastforce
prefer 2
apply fastforce
apply (clarsimp)
apply (rule_tac x=ps in exI)
apply (intro conjI)
apply (rule heap_eq_ListI1)
apply assumption
apply clarsimp
apply (subgoal_tac "x\<noteq>p \<and> x \<notin> set Rs")
apply auto
done
(*>*)
text {* The method @{text "hoare_rule"} automatically solves the side-condition
that the annotated
program is the same as the original one after stripping the annotations. *}
text_raw {* \paragraph{Specification Annotations}
\mbox{}
\medskip
\mbox{}
*}
text {*
When verifying a larger block of program text, it might be useful to split up
the block and to prove the parts in isolation. This is especially useful to
isolate loops. On the level of the Hoare calculus
the parts can then be combined with the consequence rule. To automate this
process we introduce the derived command @{term specAnno}, which allows to introduce
a Hoare tuple (inclusive auxiliary variables) in the program text:
@{thm specAnno_def [no_vars]}
The whole annotation reduces to the body @{term "c undefined"}. The
type of the assertions @{term "P"}, @{term "Q"} and @{term "A"} is
@{typ "'a \<Rightarrow> 's set"} and the type of command @{term c} is @{typ "'a \<Rightarrow> ('s,'p,'f) com"}.
All entities formally depend on an auxiliary (logical) variable of type @{typ "'a"}.
The body @{term "c"} formally also depends on this variable, since a nested annotation
or loop invariant may also depend on this logical variable. But the raw body without
annotations does not depend on the logical variable. The logical variable is only
used by the verification condition generator. We express this by defining the
whole @{term specAnno} to be equivalent with the body applied to an arbitrary
variable.
The Hoare rule for @{text "specAnno"} is mainly an instance of the consequence rule:
@{thm [mode=Rule,mode=ParenStmt] HoarePartial.SpecAnno [no_vars]}
The side-condition @{term "\<forall>Z. c Z = c undefined"} expresses the intention of body @{term c}
explained above: The raw body is independent of the auxiliary variable. This
side-condition is solved automatically by the @{text "vcg"}. The concrete syntax for
this specification annotation is shown in the following example:
*}
lemma (in vars) "\<Gamma>\<turnstile> {\<sigma>}
\<acute>I :== \<acute>M;;
ANNO \<tau>. \<lbrace>\<tau>. \<acute>I = \<^bsup>\<sigma>\<^esup>M\<rbrace>
\<acute>M :== \<acute>N;; \<acute>N :== \<acute>I
\<lbrace>\<acute>M = \<^bsup>\<tau>\<^esup>N \<and> \<acute>N = \<^bsup>\<tau>\<^esup>I\<rbrace>
\<lbrace>\<acute>M = \<^bsup>\<sigma>\<^esup>N \<and> \<acute>N = \<^bsup>\<sigma>\<^esup>M\<rbrace>"
txt {* With the annotation we can name an intermediate state @{term \<tau>}. Since the
postcondition refers to @{term "\<sigma>"} we have to link the information about
the equivalence of @{text "\<^bsup>\<tau>\<^esup>I"} and @{text "\<^bsup>\<sigma>\<^esup>M"} in the specification in order
to be able to derive the postcondition.
*}
apply vcg_step
apply vcg_step
txt {* @{subgoals [display]} *}
txt {* The first subgoal is the isolated Hoare tuple. The second one is the
side-condition of the consequence rule that allows us to derive the outermost
pre/post condition from our inserted specification.
@{text "\<acute>I = \<^bsup>\<sigma>\<^esup>M"} is the precondition of the specification,
The second conjunct is a simplified version of
@{text "\<forall>t. \<^bsup>t\<^esup>M = \<acute>N \<and> \<^bsup>t\<^esup>N = \<acute>I \<longrightarrow> \<^bsup>t\<^esup>M = \<^bsup>\<sigma>\<^esup>N \<and> \<^bsup>t\<^esup>N = \<^bsup>\<sigma>\<^esup>M"} expressing that the
postcondition of the specification implies the outermost postcondition.
*}
apply vcg
txt {* @{subgoals [display]} *}
apply simp
apply vcg
txt {* @{subgoals [display]} *}
by simp
lemma (in vars)
"\<Gamma>\<turnstile> {\<sigma>}
\<acute>I :== \<acute>M;;
ANNO \<tau>. \<lbrace>\<tau>. \<acute>I = \<^bsup>\<sigma>\<^esup>M\<rbrace>
\<acute>M :== \<acute>N;; \<acute>N :== \<acute>I
\<lbrace>\<acute>M = \<^bsup>\<tau>\<^esup>N \<and> \<acute>N = \<^bsup>\<tau>\<^esup>I\<rbrace>
\<lbrace>\<acute>M = \<^bsup>\<sigma>\<^esup>N \<and> \<acute>N = \<^bsup>\<sigma>\<^esup>M\<rbrace>"
apply vcg
txt {* @{subgoals [display]} *}
by simp_all
text {* Note that @{text "vcg_step"} changes the order of sequential composition, to
allow the user to decompose sequences by repeated calls to @{text "vcg_step"}, whereas
@{text "vcg"} preserves the order.
The above example illustrates how we can introduce a new logical state variable
@{term "\<tau>"}. You can introduce multiple variables by using a tuple:
*}
lemma (in vars)
"\<Gamma>\<turnstile> {\<sigma>}
\<acute>I :== \<acute>M;;
ANNO (n,i,m). \<lbrace>\<acute>I = \<^bsup>\<sigma>\<^esup>M \<and> \<acute>N=n \<and> \<acute>I=i \<and> \<acute>M=m\<rbrace>
\<acute>M :== \<acute>N;; \<acute>N :== \<acute>I
\<lbrace>\<acute>M = n \<and> \<acute>N = i\<rbrace>
\<lbrace>\<acute>M = \<^bsup>\<sigma>\<^esup>N \<and> \<acute>N = \<^bsup>\<sigma>\<^esup>M\<rbrace>"
apply vcg
txt {* @{subgoals [display]} *}
by simp_all
text_raw {* \paragraph{Lemma Annotations}
\mbox{}
\medskip
\mbox{}
*}
text {*
The specification annotations described before split the verification
into several Hoare triples which result in several subgoals. If we
instead want to proof the Hoare triples independently as
separate lemmas we can use the @{text "LEMMA"} annotation to plug together the
lemmas. It
inserts the lemma in the same fashion as the specification annotation.
*}
lemma (in vars) foo_lemma:
"\<forall>n m. \<Gamma>\<turnstile> \<lbrace>\<acute>N = n \<and> \<acute>M = m\<rbrace> \<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
\<lbrace>\<acute>N = n + 1 \<and> \<acute>M = m + 1\<rbrace>"
apply vcg
apply simp
done
lemma (in vars)
"\<Gamma>\<turnstile> \<lbrace>\<acute>N = n \<and> \<acute>M = m\<rbrace>
LEMMA foo_lemma
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
END;;
\<acute>N :== \<acute>N + 1
\<lbrace>\<acute>N = n + 2 \<and> \<acute>M = m + 1\<rbrace>"
apply vcg
apply simp
done
lemma (in vars)
"\<Gamma>\<turnstile> \<lbrace>\<acute>N = n \<and> \<acute>M = m\<rbrace>
LEMMA foo_lemma
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
END;;
LEMMA foo_lemma
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
END
\<lbrace>\<acute>N = n + 2 \<and> \<acute>M = m + 2\<rbrace>"
apply vcg
apply simp
done
lemma (in vars)
"\<Gamma>\<turnstile> \<lbrace>\<acute>N = n \<and> \<acute>M = m\<rbrace>
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1;;
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
\<lbrace>\<acute>N = n + 2 \<and> \<acute>M = m + 2\<rbrace>"
apply (hoare_rule anno=
"LEMMA foo_lemma
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
END;;
LEMMA foo_lemma
\<acute>N :== \<acute>N + 1;; \<acute>M :== \<acute>M + 1
END"
in HoarePartial.annotate_normI)
apply vcg
apply simp
done
subsubsection {* Total Correctness of Nested Loops *}
text {*
When proving termination of nested loops it is sometimes necessary to express that
the loop variable of the outer loop is not modified in the inner loop. To express this
one has to fix the value of the outer loop variable before the inner loop and use this value
in the invariant of the inner loop. This can be achieved by surrounding the inner while loop
with an @{text "ANNO"} specification as explained previously. However, this
leads to repeating the invariant of the inner loop three times: in the invariant itself and
in the the pre- and postcondition of the @{text "ANNO"} specification. Moreover one has
to deal with the additional subgoal introduced by @{text "ANNO"} that expresses how
the pre- and postcondition is connected to the invariant. To avoid this extra specification
and verification work, we introduce an variant of the annotated while-loop, where one can
introduce logical variables by @{text "FIX"}. As for the @{text "ANNO"} specification
multiple logical variables can be introduced via a tuple (@{text "FIX (a,b,c)."}).
The Hoare logic rule for the augmented while-loop is a mixture of the invariant rule for
loops and the consequence rule for @{text "ANNO"}:
\begin{center}
@{thm [mode=Rule,mode=ParenStmt] HoareTotal.WhileAnnoFix' [no_vars]}
\end{center}
The first premise expresses that the precondition implies the invariant and that
the invariant together with the negated loop condition implies the postcondition. Since
both implications may depend on the choice of the auxiliary variable @{term "Z"} these two
implications are expressed in a single premise and not in two of them as for the usual while
rule. The second premise is the preservation of the invariant by the loop body. And the third
premise is the side-condition that the computational part of the body does not depend on
the auxiliary variable. Finally the last premise is the well-foundedness of the variant.
The last two premises are usually discharged automatically by the verification condition
generator. Hence usually two subgoals remain for the user, stemming from the first two
premises.
The following example illustrates the usage of this rule. The outer loop increments the
loop variable @{term "M"} while the inner loop increments @{term "N"}. To discharge the
proof obligation for the termination of the outer loop, we need to know that the inner loop
does not mess around with @{term "M"}. This is expressed by introducing the logical variable
@{term "m"} and fixing the value of @{term "M"} to it.
*}
lemma (in vars)
"\<Gamma>\<turnstile>\<^sub>t \<lbrace>\<acute>M=0 \<and> \<acute>N=0\<rbrace>
WHILE (\<acute>M < i)
INV \<lbrace>\<acute>M \<le> i \<and> (\<acute>M \<noteq> 0 \<longrightarrow> \<acute>N = j) \<and> \<acute>N \<le> j\<rbrace>
VAR MEASURE (i - \<acute>M)
DO
\<acute>N :== 0;;
WHILE (\<acute>N < j)
FIX m.
INV \<lbrace>\<acute>M=m \<and> \<acute>N \<le> j\<rbrace>
VAR MEASURE (j - \<acute>N)
DO
\<acute>N :== \<acute>N + 1
OD;;
\<acute>M :== \<acute>M + 1
OD
\<lbrace>\<acute>M=i \<and> (\<acute>M\<noteq>0 \<longrightarrow> \<acute>N=j)\<rbrace>"
apply vcg
txt {* @{subgoals [display]}
The first subgoal is from the precondition to the invariant of the outer loop.
The fourth subgoal is from the invariant together with the negated loop condition
of the outer loop to the postcondition. The subgoals two and three are from the body
of the outer while loop which is mainly the inner while loop. Because we introduce the
logical variable @{term "m"} here, the while Rule described above is used instead of the
ordinary while Rule. That is why we end up with two subgoals for the inner loop. Subgoal
two is from the invariant and the loop condition of the outer loop to the invariant
of the inner loop. And at the same time from the invariant of the inner loop to the
invariant of the outer loop (together with the proof obligation that the measure of the
outer loop decreases). The universal quantified variables @{term "Ma"} and @{term "N"} are
the ``fresh'' state variables introduced for the final state of the inner loop.
The equality @{term "Ma=M"} is the result of the equality @{text "\<acute>M=m"} in the inner
invariant. Subgoal three is the preservation of the invariant by the
inner loop body (together with the proof obligation that the measure of
the inner loop decreases).
*}
(*<*)
apply (simp)
apply (simp,arith)
apply (simp,arith)
done
(*>*)
subsection {* Functional Correctness, Termination and Runtime Faults *}
text {*
Total correctness of a program with guards conceptually leads to three verification
tasks.
\begin{itemize}
\item functional (partial) correctness
\item absence of runtime faults
\item termination
\end{itemize}
In case of a modifies specification the functional correctness part
can be solved automatically. But the absence of runtime faults and
termination may be non trivial. Fortunately the modifies clause is
usually just a helpful companion of another specification that
expresses the ``real'' functional behaviour. Therefor the task to
prove the absence of runtime faults and termination can be dealt with
during the proof of this functional specification. In most cases the
absence of runtime faults and termination heavily build on the
functional specification parts. So after all there is no reason why
we should again prove the absence of runtime faults and termination
for the modifies clause. Therefor it suffices to have partial
correctness of the modifies clause for a program were all guards are
ignored. This leads to the following pattern: *}
procedures foo (N::nat|M::nat)
"\<acute>M :== \<acute>M
(* think of body with guards instead *)"
foo_spec: "\<forall>\<sigma>. \<Gamma>\<turnstile>\<^sub>t (P \<sigma>) \<acute>M :== PROC foo(\<acute>N) (Q \<sigma>)"
foo_modifies: "\<forall>\<sigma>. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {\<sigma>} \<acute>M :== PROC foo(\<acute>N)
{t. t may_only_modify_globals \<sigma> in []}"
text {*
The verification condition generator can solve those modifies clauses automatically
and can use them to simplify calls to @{text foo} even in the context of total
correctness.
*}
subsection {* Procedures and Locales \label{sec:Locales}*}
text {*
Verification of a larger program is organised on the granularity of procedures.
We proof the procedures in a bottom up fashion. Of course you can also always use Isabelle's
dummy proof @{text "sorry"} to prototype your formalisation. So you can write the
theory in a bottom up fashion but actually prove the lemmas in any other order.
Here are some explanations of handling of locales. In the examples below, consider
@{text proc\<^sub>1} and @{text proc\<^sub>2} to be ``leaf'' procedures, which do not call any
other procedure.
Procedure @{text "proc"} directly calls @{text proc\<^sub>1} and @{text proc\<^sub>2}.
\isacommand{lemma} (\isacommand{in} @{text "proc\<^sub>1_impl"}) @{text "proc\<^sub>1_modifies"}:\\
\isacommand{shows} @{text "\<dots>"}
After the proof of @{text "proc\<^sub>1_modifies"}, the \isacommand{in} directive
stores the lemma in the
locale @{text "proc\<^sub>1_impl"}. When we later on include @{text "proc\<^sub>1_impl"} or prove
another theorem in locale @{text "proc\<^sub>1_impl"} the lemma @{text "proc\<^sub>1_modifies"}
will already be available as fact.
\isacommand{lemma} (\isacommand{in} @{text "proc\<^sub>1_impl"}) @{text "proc\<^sub>1_spec"}:\\
\isacommand{shows} @{text "\<dots>"}
\isacommand{lemma} (\isacommand{in} @{text "proc\<^sub>2_impl"}) @{text "proc\<^sub>2_modifies"}:\\
\isacommand{shows} @{text "\<dots>"}
\isacommand{lemma} (\isacommand{in} @{text "proc\<^sub>2_impl"}) @{text "proc\<^sub>2_spec"}:\\
\isacommand{shows} @{text "\<dots>"}
\isacommand{lemma} (\isacommand{in} @{text "proc_impl"}) @{text "proc_modifies"}:\\
\isacommand{shows} @{text "\<dots>"}
Note that we do not explicitly include anything about @{text "proc\<^sub>1"} or
@{text "proc\<^sub>2"} here. This is handled automatically. When defining
an @{text impl}-locale it imports all @{text impl}-locales of procedures that are
called in the body. In case of @{text "proc_impl"} this means, that @{text "proc\<^sub>1_impl"}
and @{text "proc\<^sub>2_impl"} are imported. This has the neat effect that all theorems that
are proven in @{text "proc\<^sub>1_impl"} and @{text "proc\<^sub>2_impl"} are also present
in @{text "proc_impl"}.
\isacommand{lemma} (\isacommand{in} @{text "proc_impl"}) @{text "proc_spec"}:\\
\isacommand{shows} @{text "\<dots>"}
As we have seen in this example you only have to prove a procedure in its own
@{text "impl"} locale. You do not have to include any other locale.
*}
subsection {* Records \label{sec:records}*}
text {*
Before @{term "statespaces"} where introduced the state was represented as a @{term "record"}.
This is still supported. Compared to the flexibility of statespaces there are some drawbacks
in particular with respect to modularity. Even names of local variables and
parameters are globally visible and records can only be extended in a linear fashion, whereas
statespaces also allow multiple inheritance. The usage of records is quite similar to the usage of statespaces.
We repeat the example of an append function for heap lists.
First we define the global components.
Again the appearance of the prefix `globals' is mandatory. This is the way the syntax layer distinguishes local and global variables.
*}
record globals_list =
next_' :: "ref \<Rightarrow> ref"
cont_' :: "ref \<Rightarrow> nat"
text {* The local variables also have to be defined as a record before the actual definition
of the procedure. The parent record @{text "state"} defines a generic @{term "globals"}
field as a place-holder for the record of global components. In contrast to the
statespace approach there is no single @{term "locals"} slot. The local components are
just added to the record.
*}
record 'g list_vars = "'g state" +
p_' :: "ref"
q_' :: "ref"
r_' :: "ref"
root_' :: "ref"
tmp_' :: "ref"
text {* Since the parameters and local variables are determined by the record, there are
no type annotations or definitions of local variables while defining a procedure.
*}
procedures
append'(p,q|p) =
"IF \<acute>p=Null THEN \<acute>p :== \<acute>q
ELSE \<acute>p \<rightarrow>\<acute>next:== CALL append'(\<acute>p\<rightarrow>\<acute>next,\<acute>q) FI"
text {* As in the statespace approach, a locale called @{text "append'_impl"} is created.
Note that we do not give any explicit information which global or local state-record to use.
Since the records are already defined we rely on Isabelle's type inference.
Dealing with the locale is analogous to the case with statespaces.
*}
lemma (in append'_impl) append'_modifies:
shows
"\<forall>\<sigma>. \<Gamma>\<turnstile> {\<sigma>} \<acute>p :== PROC append'(\<acute>p,\<acute>q)
{t. t may_only_modify_globals \<sigma> in [next]}"
apply (hoare_rule HoarePartial.ProcRec1)
apply (vcg spec=modifies)
done
lemma (in append'_impl) append'_spec:
shows "\<forall>\<sigma> Ps Qs. \<Gamma>\<turnstile>
\<lbrace>\<sigma>. List \<acute>p \<acute>next Ps \<and> List \<acute>q \<acute>next Qs \<and> set Ps \<inter> set Qs = {}\<rbrace>
\<acute>p :== PROC append'(\<acute>p,\<acute>q)
\<lbrace>List \<acute>p \<acute>next (Ps@Qs) \<and> (\<forall>x. x\<notin>set Ps \<longrightarrow> \<acute>next x = \<^bsup>\<sigma>\<^esup>next x)\<rbrace>"
apply (hoare_rule HoarePartial.ProcRec1)
apply vcg
apply fastforce
done
text {*
However, in some corner cases the inferred state type in a procedure definition
can be too general which raises problems when attempting to proof a suitable
specifications in the locale.
Consider for example the simple procedure body @{term "\<acute>p :== NULL"} for a procedure
@{text "init"}.
*}
procedures init (|p) =
"\<acute>p:== Null"
text {*
Here Isabelle can only
infer the local variable record. Since no reference to any global variable is
made the type fixed for the global variables (in the locale @{text "init'_impl"}) is a
type variable say @{typ "'g"} and not a @{term "globals_list"} record. Any specification
mentioning @{term "next"} or @{term "cont"} restricts the state type and cannot be
added to the locale @{text "init_impl"}. Hence we have to restrict the body
@{term "\<acute>p :== NULL"} in the first place by adding a typing annotation:
*}
procedures init' (|p) =
"\<acute>p:== Null::(('a globals_list_scheme, 'b) list_vars_scheme, char list, 'c) com"
subsubsection {* Extending State Spaces *}
text {*
The records in Isabelle are
extensible \cite{Nipkow-02-hol,NaraschewskiW-TPHOLs98}. In principle this can be exploited
during verification. The state space can be extended while we we add procedures.
But there is one major drawback:
\begin{itemize}
\item records can only be extended in a linear fashion (there is no multiple inheritance)
\end{itemize}
You can extend both the main state record as well as the record for the global variables.
*}
subsubsection {* Mapping Variables to Record Fields *}
text {*
Generally the state space (global and local variables) is flat and all components
are accessible from everywhere. Locality or globality of variables is achieved by
the proper @{text "init"} and @{text "return"}/@{text "result"} functions in procedure
calls. What is the best way to map programming language variables to the state records?
One way is to disambiguate all names, by using the procedure names as prefix or the
structure names for heap components. This leads to long names and lots of
record components. But for local variables this is not necessary, since
variable @{term i} of procedure @{term A} and variable @{term "i"} of procedure @{term B}
can be mapped to the same record component, without any harm, provided they have the
same logical type. Therefor for local variables it is preferable to map them per type. You
only have to distinguish a variable with the same name if they have a different type.
Note that all pointers just have logical type @{text "ref"}. So you even do not
have to distinguish between a pointer @{text p} to a integer and a pointer @{text p} to
a list.
For global components (global variables and heap structures) you have to disambiguate the
name. But hopefully the field names of structures have different names anyway.
Also note that there is no notion of hiding of a global component by a local one in
the logic. You have to disambiguate global and local names!
As the names of the components show up in the specifications and the
proof obligations, names are even more important as for programming. Try to
find meaningful and short names, to avoid cluttering up your reasoning.
*}
(*<*)
text {*
in locales, includes, spec or impl?
Names: per type not per procedure\<dots>
downgrading total to partial\<dots>
*}
(*>*)
text {**}
(*<*)
end
(*>*)
|
Formal statement is: lemma norm_of_real [simp]: "norm (of_real r :: 'a::real_normed_algebra_1) = \<bar>r\<bar>" Informal statement is: The norm of a real number is its absolute value. |
\section{Research Interests}
\begin{itemize}[noitemsep,nolistsep]
\item Scalable compiler directed workload analysis
\item Hardware software co-design for specialized architectures
\item Core micro-architecture with a focus on the cache memory hierarchy
\end{itemize}
|
%% Copyright (C) 2014 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
function s = python_copy_vars_from(out, tryexcept)
%private function
if (nargin == 1)
tryexcept = true;
end
if (~tryexcept)
%% no error checking
s = { sprintf('octoutput_drv(%s)', out) };
else
%% with try-except block
s = { 'try:' ...
sprintf(' octoutput_drv(%s)', out) ...
'except:' ...
' echo_exception_stdout("while copying variables from Python")' ...
' raise'
};
end
|
Require Import Bedrock.Bedrock Platform.tests.MiniMasterDriver Bedrock.I386_gas.
Definition compiled := moduleS E.m.
Recursive Extraction compiled.
|
[STATEMENT]
lemma exec_all_def:
"P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>' = ((\<sigma>,\<sigma>') \<in> {(\<sigma>,\<sigma>'). exec (P,\<sigma>) = Some \<sigma>'}\<^sup>*)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>' = ((\<sigma>, \<sigma>') \<in> {(\<sigma>, \<sigma>'). exec (P, \<sigma>) = \<lfloor>\<sigma>'\<rfloor>}\<^sup>*)
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>' = ((\<sigma>, \<sigma>') \<in> {(\<sigma>, \<sigma>'). exec (P, \<sigma>) = \<lfloor>\<sigma>'\<rfloor>}\<^sup>*)
[PROOF STEP]
by (simp add: exec_all_def1 exec_1_eq) |
As we know that now itβs a fashion age. And the girls and women even every person of this entire and whole world want to look great and so much fashionable and Exclusive to other. So the people and masses buy and search so many beautiful, dashing, attractive, charming, grateful, exclusive and gorgeous, fashionable, graceful.
Nomi Ansariβs collections are available in Karachi, Lahore, Islamabad, Dubai, Chicago, and Singapore. This latest bridal wear dresses collection is so trendy and graceful, well-dressed. Letβs have a look at Nomi Ansari Bridal Wear Dresses Collection 2014 for Bridals here belowβ¦.
Pakistyles.com has so many beautiful, graceful and most modern attracting winter dresses 2014 for the whole and entire girls and women of the modern and recent era.
Nomi Ansari is a very famous, well known, wonderful and splendid designer. The girls and women can easily search and collect Nomi Ansari Bridal Wear Dresses Collection 2014 for Bridals. |
Require Import XR_Rmin.
Require Import XR_Rlt.
Require Import XR_Rle_dec.
Require Import XR_Rlt_le_trans.
Require Import XR_Rnot_le_lt.
Local Open Scope R_scope.
Lemma Rmin_Rgt_l : forall r1 r2 r, r < Rmin r1 r2 -> r < r1 /\ r < r2.
Proof.
intros x y z.
intro h.
unfold Rmin in h.
destruct (Rle_dec x y) as [ hminl | hminr ].
{
split.
{ exact h. }
{
apply Rlt_le_trans with x.
{ exact h. }
{ exact hminl. }
}
}
{
split.
{
apply Rlt_trans with y.
{ exact h. }
{
apply Rnot_le_lt.
exact hminr.
}
}
{
exact h.
}
}
Qed. |
Cunningham 's treatment was not followed by later authorities , who largely considered Astraeus a distinct genus . According to the taxonomical authority MycoBank , synonyms of Astraeus hygrometricus include Lycoperdon stellatus Scop . ( 1772 ) ; Geastrum <unk> <unk> . ( 1822 ) ; Geastrum <unk> ( Scop . ) <unk> . ( 1885 ) ; and Astraeus stellatus E.Fisch. ( 1900 ) .
|
[STATEMENT]
lemma time_replicate: "\<lbrakk>\<And>h. time x h \<le> c \<rbrakk> \<Longrightarrow> time (replicatei n x) h \<le> (1+(1+c)*n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>h. time x h \<le> c) \<Longrightarrow> time (replicatei n x) h \<le> 1 + (1 + c) * n
[PROOF STEP]
apply (induction n arbitrary: h)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>h. (\<And>h. time x h \<le> c) \<Longrightarrow> time (replicatei 0 x) h \<le> 1 + (1 + c) * 0
2. \<And>n h. \<lbrakk>\<And>h. (\<And>h. time x h \<le> c) \<Longrightarrow> time (replicatei n x) h \<le> 1 + (1 + c) * n; \<And>h. time x h \<le> c\<rbrakk> \<Longrightarrow> time (replicatei (Suc n) x) h \<le> 1 + (1 + c) * Suc n
[PROOF STEP]
apply (simp add: time_simp algebra_simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n h. \<lbrakk>\<And>h. (\<And>h. time x h \<le> c) \<Longrightarrow> time (replicatei n x) h \<le> 1 + (1 + c) * n; \<And>h. time x h \<le> c\<rbrakk> \<Longrightarrow> time (replicatei (Suc n) x) h \<le> 1 + (1 + c) * Suc n
[PROOF STEP]
apply (auto simp: time_simp fails_simp algebra_simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n h. \<lbrakk>\<And>h. time (replicatei n x) h \<le> Suc (n + n * c); \<And>h. time x h \<le> c; \<not> fails x h; \<not> fails (replicatei n x) (the_heap x h)\<rbrakk> \<Longrightarrow> time x h + time (replicatei n x) (the_heap x h) \<le> Suc (n + (c + n * c))
[PROOF STEP]
by (metis add_le_mono group_cancel.add2 nat_arith.suc1) |
import data.nat.prime
import tactic.linarith
/-! # LoVe Preface
## Proof Assistants
Proof assistants (also called interactive theorem provers)
* check and help develop formal proofs;
* can be used to prove big theorems, not only logic puzzles;
* can be tedious to use;
* are highly addictive (think video games).
A selection of proof assistants, classified by logical foundations:
* set theory: Isabelle/ZF, Metamath, Mizar;
* simple type theory: HOL4, HOL Light, Isabelle/HOL;
* **dependent type theory**: Agda, Coq, **Lean**, Matita, PVS.
## Success Stories
Mathematics:
* the four-color theorem (in Coq);
* the odd-order theorem (in Coq);
* the Kepler conjecture (in HOL Light and Isabelle/HOL).
Computer science:
* hardware
* operating systems
* programming language theory
* compilers
* security
## Lean
Lean is a proof assistant developed primarily by Leonardo de Moura (Microsoft
Research) since 2012.
Its mathematical library, `mathlib`, is developed by a user community.
We use community version 3.20.0. We use its basic libraries, `mathlib`, and
`LoVelib`. Lean is a research project.
Strengths:
* highly expressive logic based on a dependent type theory called the
**calculus of inductive constructions**;
* extended with classical axioms and quotient types;
* metaprogramming framework;
* modern user interface;
* documentation;
* open source;
* wonderful user community.
## This Course
### Web Site
https://cs.brown.edu/courses/cs1951x/
### Repository (Demos, Exercises, Homework)
https://github.com/BrownCS1951x/fpv2021
The file you are currently looking at is a demo.
For each chapter of the Hitchhiker's Guide, there will be approximately
one demo, one exercise sheet, and one homework.
* Demos will be covered in class. These are "lecture notes."
We'll post skeletons of the demos before class, and completed demos after class.
* Exercises are ungraded practice problems for you to use to learn.
Sometimes we'll cover exercise problems in class. Occasionally we may run
class like a lab, giving you time to work on exercise problems with us around.
* Homeworks are for you to do on your own, and submit via Gradescope.
### The Hitchhiker's Guide to Logical Verification
https://cs.brown.edu/courses/cs1951x/static_files/main.pdf
The lecture notes consist of a preface and 13 chapters. They cover the same
material as the corresponding lectures but with more details. Sometimes there
will not be enough time to cover everything in class, so reading the lecture
notes will be necessary.
Download this version, not others that you might find online!
## Our Goal
We want you to
* master fundamental theory and techniques in interactive theorem proving;
* familiarize yourselves with some application areas;
* develop some practical skills you can apply on a larger project (as a hobby,
for an MSc or PhD, or in industry);
* feel ready to move to another proof assistant and apply what you have learned;
* understand the domain well enough to start reading scientific papers.
This course is neither a pure logical foundations course nor a Lean tutorial.
Lean is our vehicle, not an end in itself.
-/
open nat
open_locale nat
theorem infinitude_of_primes : β N, β p β₯ N, prime p :=
sorry |
/*
* -----------------------------------------------------------------
* Binary Search Tree Library --- bst_lib.h
* Version: 1.6180
* Date: Feb 19, 2010
* -----------------------------------------------------------------
* Programmer: Americo Barbosa da Cunha Junior
* [email protected]
* -----------------------------------------------------------------
* Copyright (c) 2010 by Americo Barbosa da Cunha Junior
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* A copy of the GNU General Public License is available in
* LICENSE.txt or http://www.gnu.org/licenses/.
* -----------------------------------------------------------------
* This is the header file for a library with
* to operates a binary search tree.
* -----------------------------------------------------------------
*/
#ifndef __BST_LIB_H__
#define __BST_LIB_H__
#include <gsl/gsl_vector.h>
#include <gsl/gsl_matrix.h>
#undef RIGHT
#define RIGHT 1
#undef LEFT
#define LEFT 0
/*
*------------------------------------------------------------
* structure of binary search tree leaf
*
* last update: Feb 2, 2009
*------------------------------------------------------------
*/
typedef struct leaf
{
gsl_vector *phi; /* composition */
gsl_vector *Rphi; /* reaction mapping */
gsl_matrix *A; /* mapping gradient matrix */
gsl_matrix *L; /* ellipsoid Cholesky matrix */
} bst_leaf;
/*------------------------------------------------------------*/
/*
*------------------------------------------------------------
* structure of binary search tree node
*
* last update: Feb 2, 2009
*------------------------------------------------------------
*/
typedef struct node
{
gsl_vector *v; /* cutting plane normal vector */
double a; /* scalar a */
struct node *r_node; /* right node */
struct node *l_node; /* left node */
struct leaf *r_leaf; /* right leaf */
struct leaf *l_leaf; /* left leaf */
} bst_node;
/*------------------------------------------------------------*/
/*
*------------------------------------------------------------
* function prototypes
*------------------------------------------------------------
*/
bst_leaf *bst_leaf_alloc();
bst_node* bst_node_alloc();
void bst_leaf_free(void **leaf_bl);
void bst_node_free(void **node_bl);
int bst_leaf_set(gsl_vector *phi,
gsl_vector *Rphi,
gsl_matrix *A,
gsl_matrix *L,
bst_leaf *leaf);
double bst_cutplane(gsl_vector *phi_l,
gsl_vector *phi_r,
gsl_vector *v);
int bst_node_set(bst_leaf *l_leaf,
bst_leaf *r_leaf,
bst_node *node);
int bst_node_add(int side,
bst_node *old_node,
bst_node *new_node);
int bst_search(bst_node *root,
gsl_vector *phi,
bst_node **end_node,
bst_leaf **end_leaf);
int bst_height(bst_node *root);
#endif /* __BST_LIB_H__ */
|
A : Set
B : Setβ
B = Set
C : Set
|
import mynat.pow
-- * Level 1
lemma zero_pow_zero : (0 : mynat) ^ (0 : mynat) = 1 :=
begin
rw pow_zero,
refl
end
-- * Level 2
lemma zero_pow_succ (m : mynat) : (0 : mynat) ^ (succ m) = 0 :=
begin
induction m,
rw pow_succ,
rw pow_zero,
rw mul_zero,
refl,
rw pow_succ,
end
-- * Level 3
lemma pow_one (a : mynat) : a ^ (1 : mynat) = a :=
begin
induction a,
rw one_eq_succ_zero,
rw pow_succ,
rw mul_zero,
refl,
rw one_eq_succ_zero,
rw pow_succ,
end
-- * Level 4
lemma one_pow (m : mynat) : (1 : mynat) ^ m = 1 :=
begin
induction m,
rw pow_zero,
refl,
rw pow_succ,
rw m_ih,
rw mul_one,
refl
end
-- * Level 5
lemma pow_add (a m n : mynat) : a ^ (m + n) = a ^ m * a ^ n :=
begin
induction n,
rw pow_zero,
rw mul_one,
rw add_zero,
refl,
rw pow_succ,
rw add_comm,
rw succ_add,
rw pow_succ,
rw add_comm,
end
-- * Level 6
lemma mul_pow (a b n : mynat) : (a * b) ^ n = a ^ n * b ^ n :=
begin
repeat {rw pow_succ },
rw n_ih,
simp,
end
-- * Level 7
lemma pow_pow (a m n : mynat) : (a ^ m) ^ n = a ^ (m * n) :=
begin
induction n,
rw mul_zero,
repeat {rw pow_zero},
repeat {rw pow_succ},
rw mul_succ,
rw n_ih,
rw pow_add,
refl,
end
-- * Level 8
lemma add_squared (a b : mynat) :
(a + b) ^ (2 : mynat) = a ^ (2 : mynat) + b ^ (2 : mynat) + 2 * a * b :=
begin
rw two_eq_succ_one,
rw one_eq_succ_zero,
repeat {rw pow_succ},
repeat {rw pow_zero},
repeat {rw one_mul},
simp,
rw mul_succ,
rw <- one_eq_succ_zero,
rw mul_one,
repeat {rw add_mul},
repeat {rw mul_add},
simp,
end
|
[STATEMENT]
lemma Nonce_supply1: "\<exists>N. Nonce N \<notin> used evs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>N. Nonce N \<notin> used evs
[PROOF STEP]
by (rule Nonce_supply_lemma [THEN exE], blast) |
[STATEMENT]
lemma wf_nodeOrder: "wf(nodeOrder j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf (nodeOrder j)
[PROOF STEP]
apply (unfold nodeOrder_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf (Restr (measure (\<lambda>i. (j + N - i) mod N)) {..<N})
[PROOF STEP]
apply (rule wf_measure [THEN wf_subset], blast)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
"""
Creates and simulate a noisy circuit using cirq.ConstantQubitNoiseModel class.
"""
import cirq
import numpy as np
def noisy_circuit_demo(amplitude_damp):
"""Demonstrates a noisy circuit simulation.
"""
# q = cirq.NamedQubit('q')
q = cirq.LineQubit(0)
dm_circuit = cirq.Circuit(
cirq.X(q),
)
dm_result = cirq.DensityMatrixSimulator(noise=cirq.ConstantQubitNoiseModel(cirq.amplitude_damp(amplitude_damp))).simulate(program=dm_circuit)
kc_circuit = cirq.Circuit(
cirq.amplitude_damp(amplitude_damp)(q),
)
kc_result = cirq.KnowledgeCompilationSimulator(kc_circuit,initial_state=1,intermediate=False).simulate(kc_circuit)
print("dm_result.final_density_matrix")
print(dm_result.final_density_matrix)
print("kc_result.final_density_matrix")
print(kc_result.final_density_matrix)
np.testing.assert_almost_equal(
dm_result.final_density_matrix,
kc_result.final_density_matrix
)
dm_circuit.append(cirq.measure(q, key='after_not_gate'))
kc_circuit.append(cirq.measure(q, key='after_not_gate'))
dm_results = cirq.sample(program=dm_circuit,noise=cirq.ConstantQubitNoiseModel(cirq.amplitude_damp(amplitude_damp)),repetitions=10000)
kc_simulator = cirq.KnowledgeCompilationSimulator(kc_circuit,initial_state=1,intermediate=False)
kc_results = kc_simulator.run(kc_circuit,repetitions=10000)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("ConstantQubitNoiseModel with amplitude damping of rate",
cirq.amplitude_damp(amplitude_damp))
print('DENSITY_MATRIX_SIMULATOR: Sampling of qubit "q" after application of X gate:')
print(dm_results.histogram(key='after_not_gate'))
print('KNOWLEDGE_COMPILATION_SIMULATOR: Sampling of qubit "q" after application of X gate:')
print(kc_results.histogram(key='after_not_gate'))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
def main():
amp_damp_rates = [0, 9/25, 16/25, 1]
for amp_damp_rate in amp_damp_rates:
noisy_circuit_demo(amp_damp_rate)
print()
if __name__ == '__main__':
main()
|
cat("Goodbye, World!\n")
|
context("Test telfer")
# Create data
set.seed(seed = 128)
SS <- 1000 # number of observations
taxa <- sample(letters, SS, replace = TRUE)
site <- sample(paste('A', 1:20, sep = ''), SS, replace = TRUE)
time_period <- sample(1:3, SS, replace = TRUE)
df <- unique(data.frame(taxa, site, time_period))
# This is what the results should look like
results <- structure(list(taxa = structure(1:26, .Label = c("a", "b", "c",
"d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p",
"q", "r", "s", "t", "u", "v", "w", "x", "y", "z"), class = "factor"),
Nsite_1.x = c(7, 5, 11, 6, 8, 12, 13, 10, 11, 7, 9, 10, 10,
8, 6, 10, 10, 9, 7, 9, 7, 12, 7, 6, 11, 6),
Nsite_2.x = c(8, 8, 10, 11, 14, 12, 8, 8, 8, 10, 10, 10,
9, 11, 7, 13, 9, 10, 12, 9, 14, 11, 5, 11, 13, 8),
Telfer_1_2 = c(-0.73732519, -0.77133792, -0.09783170, 0.70566506, 1.84449356,
0.79320432, -1.39542120, -0.88966350, -0.98871825, 0.14526451, 0.02084756, -0.03627006,
-0.45903684, 0.49957587, -1.23170170, 1.26438351, -0.45903684, 0.02084756, 1.02785421,
-0.39539529, 1.98143673, 0.30605205, -2.22583487, 0.70566506, 1.25996498,
-0.72891844), Nsite_1.y = c(7, 5, 11, 6, 8, 12, 13, 10, 11, 7, 9, 10, 10, 8,
6, 10, 10, 9, 7, 9, 7, 12, 7, 6, 11, 6),
Nsite_3.x = c(10, 8, 11, 11, 7, 11, 12, 9, 7, 11, 10, 9, 9, 7, 11, 5, 10, 12, 9, 8, 7, 10, 10, 11, 12, 8),
Telfer_1_3 = c(0.4282569, -0.9490873, 0.6579022, 1.2282920, -1.2888609, 0.6135323,
1.0747447, -0.2624847, -1.2994921, 1.0052868, 0.2779805, -0.2624847,
-0.2624847, -1.2888609, 1.2282920, -2.4181066,
0.2248821, 1.2932700, -0.1487729, -0.7373091, -1.3469912, 0.1331646,
0.4282569, 1.2282920, 1.1469913, -0.7816810),
Nsite_2.y = c(8, 8, 10, 11, 14, 12, 8, 8, 8, 10, 10, 10, 9, 11, 7, 13, 9, 10, 12, 9, 14, 11, 5, 11, 13, 8),
Nsite_3.y = c(10, 8, 11, 11, 7, 11, 12, 9, 7, 11, 10, 9, 9, 7, 11, 5, 10, 12, 9, 8, 7, 10, 10, 11, 12, 8),
Telfer_2_3 = c(0.241879867, -1.128220065, 0.905222087, 0.882140381, -0.835189914, 0.855157996,
1.611979800, -0.436860690, -1.846284033, 0.905222087, 0.325816517,
-0.253589053, -0.340138161, -1.278488893, 0.896127341, -2.002866510,
0.290092553, 1.495399688, -0.112055477, -0.982085822, -0.835189914, 0.352123266,
0.006522637, 0.882140381, 1.275183707, -1.128220065)),
.Names = c("taxa",
"Nsite_1.x", "Nsite_2.x", "Telfer_1_2", "Nsite_1.y", "Nsite_3.x",
"Telfer_1_3", "Nsite_2.y", "Nsite_3.y", "Telfer_2_3"), row.names = c(NA,
-26L), class = "data.frame")
######################
test_that("Test errors and warnings", {
expect_error(temp <- telfer(taxa = 'tom', site = df$site, time_period = df$time_period),
'The following arguements are not of equal length: taxa, site, time_period')
expect_error(temp <- telfer(taxa = df$taxa, site = head(df$site,-1), time_period = df$time_period),
'The following arguements are not of equal length: taxa, site, time_period')
expect_error(temp <- telfer(taxa = df$taxa, site = df$site, time_period = tail(df$time_period, -1)),
'The following arguements are not of equal length: taxa, site, time_period')
expect_error(temp <- telfer(taxa = df$taxa, site = df$site, time_period = df$time_period, minSite = 100),
'No taxa satisfy the minSite criteria when comparing time period 1 and 2')
expect_error(temp <- telfer(taxa = df$taxa, site = df$site, time_period = df$time_period, minSite = TRUE),
'minSite must be numeric or integer')
expect_warning(temp <- telfer(taxa, site, time_period),
'269 out of 1000 observations will be removed as duplicates')
})
test_that("The function works", {
TelferResult <- telfer(df$taxa, df$site, df$time_period)
expect_true(all.equal(TelferResult, results, tolerance = .0000001))
expect_is(TelferResult, class = 'data.frame')
TelferResult <- telfer(df$taxa, df$site, df$time_period, useIterations = FALSE)
expect_is(TelferResult, class = 'data.frame')
})
|
module Data.Permutation where
open import Prelude
open import Data.Fin as Fin hiding (_==_; _<_)
open import Data.Nat
open import Data.Vec
open import Logic.Identity
open import Logic.Base
import Logic.ChainReasoning
-- What is a permutation?
-- Answer 1: A bijection between Fin n and itself
data Permutation (n : Nat) : Set where
permutation :
(Ο Οβ»ΒΉ : Fin n -> Fin n) ->
(forall {i} -> Ο (Οβ»ΒΉ i) β‘ i) ->
Permutation n
module Permutation {n : Nat}(P : Permutation n) where
private
Ο' : Permutation n -> Fin n -> Fin n
Ο' (permutation x _ _) = x
Οβ»ΒΉ' : Permutation n -> Fin n -> Fin n
Οβ»ΒΉ' (permutation _ x _) = x
proof : (P : Permutation n) -> forall {i} -> Ο' P (Οβ»ΒΉ' P i) β‘ i
proof (permutation _ _ x) = x
Ο : Fin n -> Fin n
Ο = Ο' P
Οβ»ΒΉ : Fin n -> Fin n
Οβ»ΒΉ = Οβ»ΒΉ' P
module Proofs where
ΟΟβ»ΒΉ-id : {i : Fin n} -> Ο (Οβ»ΒΉ i) β‘ i
ΟΟβ»ΒΉ-id = proof P
open module Chain = Logic.ChainReasoning.Poly.Homogenous _β‘_ (\x -> refl) (\x y z -> trans)
Οβ»ΒΉ-inj : (i j : Fin n) -> Οβ»ΒΉ i β‘ Οβ»ΒΉ j -> i β‘ j
Οβ»ΒΉ-inj i j h =
chain> i
=== Ο (Οβ»ΒΉ i) by sym ΟΟβ»ΒΉ-id
=== Ο (Οβ»ΒΉ j) by cong Ο h
=== j by ΟΟβ»ΒΉ-id
-- Generalise
lem : {n : Nat}(f g : Fin n -> Fin n)
-> (forall i -> f (g i) β‘ i)
-> (forall i -> g (f i) β‘ i)
lem {zero} f g inv ()
lem {suc n} f g inv i = ?
where
gzβ gs : {i : Fin n} -> g fzero β’ g (fsuc i)
gzβ gs {i} gz=gs = fzeroβ fsuc $
chain> fzero
=== f (g fzero) by sym (inv fzero)
=== f (g (fsuc i)) by cong f gz=gs
=== fsuc i by inv (fsuc i)
zβ f-thin-gz : {i : Fin n} -> fzero β’ f (thin (g fzero) i)
zβ f-thin-gz {i} z=f-thin-gz = ?
-- f (g fzero)
-- = fzero
-- = f (thin (g fzero) i)
g' : Fin n -> Fin n
g' j = thick (g fzero) (g (fsuc j)) gzβ gs
f' : Fin n -> Fin n
f' j = thick fzero (f (thin (g fzero) j)) ?
g'f' : forall j -> g' (f' j) β‘ j
g'f' = lem {n} f' g' ?
Οβ»ΒΉΟ-id : forall {i} -> Οβ»ΒΉ (Ο i) β‘ i
Οβ»ΒΉΟ-id = ?
-- Answer 2: A Vec (Fin n) n with no duplicates
{-
infixr 40 _β
_ _β¦_,_
infixr 20 _β_
data Permutation : Nat -> Set where
Ξ΅ : Permutation zero
_β
_ : {n : Nat} -> Fin (suc n) -> Permutation n -> Permutation (suc n)
_β¦_,_ : {n : Nat}(i j : Fin (suc n)) -> Permutation n -> Permutation (suc n)
fzero β¦ j , Ο = j β
Ο
fsuc i β¦ j , j' β
Ο = thin j j' β
i β¦ ? , Ο
indices : {n : Nat} -> Permutation n -> Vec (Fin n) n
indices Ξ΅ = []
indices (i β
Ο) = i :: map (thin i) (indices Ο)
-- permute (i β
Ο) xs with xs [!] i where
-- permuteβ (i β
Ο) .(insert i x xs) (ixV x xs) = x :: permute Ο xs
permute : {n : Nat}{A : Set} -> Permutation n -> Vec A n -> Vec A n
permute (i β
Ο) xs = permute' Ο i xs (xs [!] i)
where
permute' : {n : Nat}{A : Set} -> Permutation n -> (i : Fin (suc n))(xs : Vec A (suc n)) ->
IndexView i xs -> Vec A (suc n)
permute' Ο i .(insert i x xs') (ixV x xs') = x :: permute Ο xs'
delete : {n : Nat} -> Fin (suc n) -> Permutation (suc n) -> Permutation n
delete fzero (j β
Ο) = Ο
delete {zero} (fsuc ()) _
delete {suc _} (fsuc i) (j β
Ο) = ? β
delete i Ο
identity : {n : Nat} -> Permutation n
identity {zero } = Ξ΅
identity {suc n} = fzero β
identity
_β»ΒΉ : {n : Nat} -> Permutation n -> Permutation n
Ξ΅ β»ΒΉ = Ξ΅
(i β
Ο) β»ΒΉ = ?
_β_ : {n : Nat} -> Permutation n -> Permutation n -> Permutation n
Ξ΅ β Οβ = Ξ΅
i β
Οβ β Οβ = (indices Οβ ! i) β
(Οβ β delete i Οβ)
-}
|
{-# LINE 8 "SpectrumAnalysis.lhs" #-}
-- This code was automatically generated by lhs2tex --code, from the file
-- HSoM/SpectrumAnalysis.lhs. (See HSoM/MakeCode.bat.)
{-# LINE 18 "SpectrumAnalysis.lhs" #-}
{-# LANGUAGE Arrows #-}
module Euterpea.Music.Signal.SpectrumAnalysis where
import Euterpea
import Euterpea.Experimental (fftA)
import Data.Complex (Complex ((:+)), polar)
import Data.Maybe (listToMaybe, catMaybes)
{-# LINE 560 "SpectrumAnalysis.lhs" #-}
dft :: RealFloat a => [Complex a] -> [Complex a]
dft xs =
let lenI = length xs
lenR = fromIntegral lenI
lenC = lenR :+ 0
in [ let i = -2 * pi * fromIntegral k / lenR
in (1/lenC) * sum [ (xs!!n) * exp (0 :+ i * fromIntegral n)
| n <- [0,1..lenI-1] ]
| k <- [0,1..lenI-1] ]
{-# LINE 583 "SpectrumAnalysis.lhs" #-}
mkTerm :: Int -> Double -> [Complex Double]
mkTerm num n = let f = 2 * pi / fromIntegral num
in [ sin (n * f * fromIntegral i) / n :+ 0
| i <- [0,1..num-1] ]
mkxa, mkxb, mkxc :: Int-> [Complex Double]
mkxa num = mkTerm num 1
mkxb num = zipWith (+) (mkxa num) (mkTerm num 3)
mkxc num = zipWith (+) (mkxb num) (mkTerm num 5)
{-# LINE 610 "SpectrumAnalysis.lhs" #-}
printComplexL :: [Complex Double] -> IO ()
printComplexL xs =
let f (i,rl:+im) =
do putStr (spaces (3 - length (show i)) )
putStr (show i ++ ": (" )
putStr (niceNum rl ++ ", " )
putStr (niceNum im ++ ")\n" )
in mapM_ f (zip [0..length xs - 1] xs)
niceNum :: Double -> String
niceNum d =
let d' = fromIntegral (round (1e10 * d)) / 1e10
(dec, fra) = break (== '.') (show d')
(fra',exp) = break (== 'e') fra
in spaces (3 - length dec) ++ dec ++ take 11 fra'
++ exp ++ spaces (12 - length fra' - length exp)
spaces :: Int -> String
spaces n = take n (repeat ' ')
{-# LINE 679 "SpectrumAnalysis.lhs" #-}
mkPulse :: Int -> [Complex Double]
mkPulse n = 100 : take (n-1) (repeat 0)
{-# LINE 721 "SpectrumAnalysis.lhs" #-}
x1 num = let f = pi * 2 * pi / fromIntegral num
in map (:+ 0) [ sin (f * fromIntegral i)
| i <- [0,1..num-1] ]
{-# LINE 757 "SpectrumAnalysis.lhs" #-}
mkPolars :: [Complex Double] -> [Complex Double]
mkPolars = map ((\(m,p)-> m:+p) . polar)
|
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad
! This file was ported from Lean 3 source module topology.partial
! leanprover-community/mathlib commit 4c19a16e4b705bf135cf9a80ac18fcc99c438514
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Topology.ContinuousOn
import Mathlib.Order.Filter.Partial
/-!
# Partial functions and topological spaces
In this file we prove properties of `Filter.Ptendsto` etc in topological spaces. We also introduce
`Pcontinuous`, a version of `Continuous` for partially defined functions.
-/
open Filter
open Topology
variable {Ξ± Ξ² : Type _} [TopologicalSpace Ξ±]
theorem rtendsto_nhds {r : Rel Ξ² Ξ±} {l : Filter Ξ²} {a : Ξ±} :
Rtendsto r l (π a) β β s, IsOpen s β a β s β r.core s β l :=
all_mem_nhds_filter _ _ (fun _s _t => id) _
#align rtendsto_nhds rtendsto_nhds
theorem rtendsto'_nhds {r : Rel Ξ² Ξ±} {l : Filter Ξ²} {a : Ξ±} :
Rtendsto' r l (π a) β β s, IsOpen s β a β s β r.preimage s β l := by
rw [rtendsto'_def]
apply all_mem_nhds_filter
apply Rel.preimage_mono
#align rtendsto'_nhds rtendsto'_nhds
theorem ptendsto_nhds {f : Ξ² β. Ξ±} {l : Filter Ξ²} {a : Ξ±} :
Ptendsto f l (π a) β β s, IsOpen s β a β s β f.core s β l :=
rtendsto_nhds
#align ptendsto_nhds ptendsto_nhds
theorem ptendsto'_nhds {f : Ξ² β. Ξ±} {l : Filter Ξ²} {a : Ξ±} :
Ptendsto' f l (π a) β β s, IsOpen s β a β s β f.preimage s β l :=
rtendsto'_nhds
#align ptendsto'_nhds ptendsto'_nhds
/-! ### Continuity and partial functions -/
variable [TopologicalSpace Ξ²]
/-- Continuity of a partial function -/
def Pcontinuous (f : Ξ± β. Ξ²) :=
β s, IsOpen s β IsOpen (f.preimage s)
#align pcontinuous Pcontinuous
theorem open_dom_of_pcontinuous {f : Ξ± β. Ξ²} (h : Pcontinuous f) : IsOpen f.Dom := by
rw [β PFun.preimage_univ]; exact h _ isOpen_univ
#align open_dom_of_pcontinuous open_dom_of_pcontinuous
theorem pcontinuous_iff' {f : Ξ± β. Ξ²} :
Pcontinuous f β β {x y} (h : y β f x), Ptendsto' f (π x) (π y) := by
constructor
Β· intro h x y h'
simp only [ptendsto'_def, mem_nhds_iff]
rintro s β¨t, tsubs, opent, ytβ©
exact β¨f.preimage t, PFun.preimage_mono _ tsubs, h _ opent, β¨y, yt, h'β©β©
intro hf s os
rw [isOpen_iff_nhds]
rintro x β¨y, ys, fxyβ© t
rw [mem_principal]
intro (h : f.preimage s β t)
change t β π x
apply mem_of_superset _ h
have h' : β s β π y, f.preimage s β π x := by
intro s hs
have : Ptendsto' f (π x) (π y) := hf fxy
rw [ptendsto'_def] at this
exact this s hs
show f.preimage s β π x
apply h'
rw [mem_nhds_iff]
exact β¨s, Set.Subset.refl _, os, ysβ©
#align pcontinuous_iff' pcontinuous_iff'
theorem continuousWithinAt_iff_ptendsto_res (f : Ξ± β Ξ²) {x : Ξ±} {s : Set Ξ±} :
ContinuousWithinAt f s x β Ptendsto (PFun.res f s) (π x) (π (f x)) :=
tendsto_iff_ptendsto _ _ _ _
#align continuous_within_at_iff_ptendsto_res continuousWithinAt_iff_ptendsto_res
|
[STATEMENT]
lemma (in UP_cring) taylor_term_closed:
assumes "p \<in> carrier P"
assumes "a \<in> carrier R"
shows "taylor_term a p i \<in> carrier (UP R)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. taylor_term a p i \<in> carrier (UP R)
[PROOF STEP]
unfolding taylor_term_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. taylor_expansion R a p i \<odot>\<^bsub>UP R\<^esub> X_poly_minus R a [^]\<^bsub>UP R\<^esub> i \<in> carrier (UP R)
[PROOF STEP]
using P.nat_pow_closed P_def taylor_closed taylor_def X_minus_closed assms(1) assms(2) smult_closed
[PROOF STATE]
proof (prove)
using this:
?x \<in> carrier P \<Longrightarrow> ?x [^]\<^bsub>P\<^esub> ?n \<in> carrier P
P \<equiv> UP R
\<lbrakk>?f \<in> carrier P; ?a \<in> carrier R\<rbrakk> \<Longrightarrow> T\<^bsub>?a\<^esub> ?f \<in> carrier P
taylor \<equiv> taylor_expansion R
?a \<in> carrier R \<Longrightarrow> X_poly_minus R ?a \<in> carrier P
p \<in> carrier P
a \<in> carrier R
\<lbrakk>?a \<in> carrier R; ?x \<in> carrier P\<rbrakk> \<Longrightarrow> ?a \<odot>\<^bsub>P\<^esub> ?x \<in> carrier P
goal (1 subgoal):
1. taylor_expansion R a p i \<odot>\<^bsub>UP R\<^esub> X_poly_minus R a [^]\<^bsub>UP R\<^esub> i \<in> carrier (UP R)
[PROOF STEP]
by (simp add: cfs_closed) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.