text
stringlengths 0
3.34M
|
---|
normAbinom <- function() { # normAbinom.r
x <- seq(5, 50, by=5)
p.binom <- pbinom(x, 50, 0.5)
p.norm <- pnorm(x, 25, sqrt(12.5))
diff <- p.binom-p.norm
list(p.binom = round(p.binom,4), p.norm = round(p.norm,4), diff = round(diff,4))
} # end function
normAbinom()
|
(*
Title: The pi-calculus
Author/Maintainer: Jesper Bengtson (jebe.dk), 2012
*)
theory Weak_Late_Bisim_Subst_Pres
imports Weak_Late_Bisim_Subst Weak_Late_Bisim_Pres
begin
lemma tauPres:
fixes P :: pi
and Q :: pi
assumes "P \<approx>\<^sup>s Q"
shows "\<tau>.(P) \<approx>\<^sup>s \<tau>.(Q)"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.tauPres)
lemma inputPres:
fixes P :: pi
and Q :: pi
and a :: name
and x :: name
assumes PeqQ: "P \<approx>\<^sup>s Q"
shows "a<x>.P \<approx>\<^sup>s a<x>.Q"
proof(auto simp add: substClosed_def)
fix \<sigma> :: "(name \<times> name) list"
{
fix P Q a x \<sigma>
assume "P \<approx>\<^sup>s Q"
then have "P[<\<sigma>>] \<approx>\<^sup>s Q[<\<sigma>>]" by(rule partUnfold)
then have "\<forall>y. (P[<\<sigma>>])[x::=y] \<approx> (Q[<\<sigma>>])[x::=y]"
apply(auto simp add: substClosed_def)
by(erule_tac x="[(x, y)]" in allE) auto
moreover assume "x \<sharp> \<sigma>"
ultimately have "(a<x>.P)[<\<sigma>>] \<approx> (a<x>.Q)[<\<sigma>>]" using weakBisimEqvt
by(force intro: Weak_Late_Bisim_Pres.inputPres)
}
note Goal = this
obtain y::name where "y \<sharp> P" and "y \<sharp> Q" and "y \<sharp> \<sigma>"
by(generate_fresh "name") auto
from `P \<approx>\<^sup>s Q` have "([(x, y)] \<bullet> P) \<approx>\<^sup>s ([(x, y)] \<bullet> Q)" by(rule eqvtI)
hence "(a<y>.([(x, y)] \<bullet> P))[<\<sigma>>] \<approx> (a<y>.([(x, y)] \<bullet> Q))[<\<sigma>>]" using `y \<sharp> \<sigma>` by(rule Goal)
moreover from `y \<sharp> P` `y \<sharp> Q` have "a<x>.P = a<y>.([(x, y)] \<bullet> P)" and "a<x>.Q = a<y>.([(x, y)] \<bullet> Q)"
by(simp add: pi.alphaInput)+
ultimately show "(a<x>.P)[<\<sigma>>] \<approx> (a<x>.Q)[<\<sigma>>]" by simp
qed
lemma outputPres:
fixes P :: pi
and Q :: pi
assumes "P \<approx>\<^sup>s Q"
shows "a{b}.P \<approx>\<^sup>s a{b}.Q"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.outputPres)
assumes "P \<approx>\<^sup>s Q"
shows "[a\<frown>b]P \<approx>\<^sup>s [a\<frown>b]Q"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.matchPres)
lemma mismatchPres:
fixes P :: pi
and Q :: pi
and a :: name
and b :: name
assumes "P \<approx>\<^sup>s Q"
shows "[a\<noteq>b]P \<approx>\<^sup>s [a\<noteq>b]Q"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.mismatchPres)
lemma parPres:
fixes P :: pi
and Q :: pi
and R :: pi
assumes "P \<approx>\<^sup>s Q"
shows "P \<parallel> R \<approx>\<^sup>s Q \<parallel> R"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.parPres)
assumes PeqQ: "P \<approx>\<^sup>s Q"
shows "<\<nu>x>P \<approx>\<^sup>s <\<nu>x>Q"
proof(auto simp add: substClosed_def)
fix s::"(name \<times> name) list"
have Res: "\<And>P Q x s. \<lbrakk>P[<s>] \<approx> Q[<s>]; x \<sharp> s\<rbrakk> \<Longrightarrow> (<\<nu>x>P)[<s>] \<approx> (<\<nu>x>Q)[<s>]"
by(force intro: Weak_Late_Bisim_Pres.resPres)
have "\<exists>c::name. c \<sharp> (P, Q, s)" by(blast intro: name_exists_fresh)
then obtain c::name where cFreshP: "c \<sharp> P" and cFreshQ: "c \<sharp> Q" and cFreshs: "c \<sharp> s"
by(force simp add: fresh_prod)
from PeqQ have "P[<([(x, c)] \<bullet> s)>] \<approx> Q[<([(x, c)] \<bullet> s)>]" by(simp add: substClosed_def)
hence "([(x, c)] \<bullet> P[<([(x, c)] \<bullet> s)>]) \<approx> ([(x, c)] \<bullet> Q[<([(x, c)] \<bullet> s)>])" by(rule Weak_Late_Bisim.eqvtI)
hence "([(x, c)] \<bullet> P)[<s>] \<approx> ([(x, c)] \<bullet> Q)[<s>]" by simp
hence "(<\<nu>c>([(x, c)] \<bullet> P))[<s>] \<approx> (<\<nu>c>([(x, c)] \<bullet> Q))[<s>]" using cFreshs by(rule Res)
moreover from cFreshP cFreshQ have "<\<nu>x>P = <\<nu>c>([(x, c)] \<bullet> P)" and "<\<nu>x>Q = <\<nu>c>([(x, c)] \<bullet> Q)"
by(simp add: alphaRes)+
ultimately show "(<\<nu>x>P)[<s>] \<approx> (<\<nu>x>Q)[<s>]" by simp
qed
shows "!P \<approx>\<^sup>s !Q"
using assms
by(force simp add: substClosed_def intro: Weak_Late_Bisim_Pres.bangPres)
end |
import numpy as np
class Scaling:
# This function scales input in a robust way. The input parameters define the target value of a certain magnitude percentile and define whether the data should also be centered before scaling.
# By default, the data is not centered and is scaled so that the 67th percentile is at 0.3. Thus, Gaussian distributed data is scaled so that almost all data is within the interval [-1, 1]
def __init__(self, percentile=None, val_at_percentile=None, centering=False):
self.mu = None
self.maxval = None
self.centering = centering
self.factor = None
if percentile is not None:
self.percentile = percentile
else:
self.percentile = 67
if val_at_percentile is not None:
self.val_at_percentile = val_at_percentile
else:
self.val_at_percentile = 0.3
def scale_reference(self, X, Omega=None, dimensions=None):
X_scaled = X.copy().astype(np.double) # copy input, such that the scaling parameters can be determined without actually applying the scaling
if self.centering:
# decide whether input is just a vector or a matrix
if X.shape.__len__() == 1:
self.mu = np.median(X)
X_scaled -= self.mu
else:
# estimate the median as a column vector across all features
if Omega is None:
self.mu = np.atleast_2d(np.median(X, axis=1)).T
X_scaled -= self.mu
else:
if dimensions is not None:
m = dimensions[0]
self.mu = np.zeros((m, 1))
for i in xrange(m):
print i
ix = np.where(Omega[0] == i)
median = np.median(X[ix])
X_scaled[ix] -= median
self.mu[i, 0] = median
else:
print "ERROR: Missing dimensions"
return X
self.maxval = np.percentile(np.abs(X_scaled), self.percentile)
self.factor = self.maxval / self.val_at_percentile
X_scaled /= self.factor
return X_scaled
def scale(self, X, Omega=None, dimensions=None):
if self.centering:
if Omega is None:
X -= self.mu
else:
if dimensions is not None:
m = dimensions[0]
for i in xrange(m):
ix = np.where(Omega[0] == i)
X[ix] -= self.mu[i, 0]
else:
print "ERROR: Missing dimensions"
return X
X /= self.factor
def rescale(self, X, Omega=None, dimensions=None):
X *= self.factor
if self.centering:
if Omega is None:
X += self.mu
else:
if dimensions is not None:
m = dimensions[0]
for i in xrange(m):
ix = np.where(Omega[0] == i)
X[ix] += self.mu[i, 0]
else:
print "ERROR: Cannot rescale without knowing the dimensions"
|
from __future__ import annotations
import unittest
from .function_tests import FunctionTestCase
import ATL
from ATL import num
import numpy as np
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
class TestBlur(unittest.TestCase, FunctionTestCase):
def gen_func(self):
@ATL.func
def blur( w : size, h : size, img : num[h,w] ):
blur_x[j:h,i:w] = 0.25 * ( (i-1 >= 0)*img[j,i-1]
+ 2*img[j,i ]
+ (i+1 < w)*img[j,i+1] )
blur_y[j:h,i:w] = 0.25 * ( (j-1 >= 0)*blur_x[j-1,i]
+ 2*blur_x[j ,i]
+ (j+1 < h)*blur_x[j+1,i] )
return blur_y
return blur
def gen_deriv_sig(self):
return { 'img' : 'dimg' }
def gen_deriv(self):
@ATL.func
def dblur( w : size, h : size, img : num[h,w], dimg : num[h,w] ):
blur_x[j:h,i:w] = 0.25 * ( (i-1 >= 0)*img[j,i-1]
+ 2*img[j,i ]
+ (i+1 < w)*img[j,i+1] )
dblur_x[j:h,i:w] = 0.25 * ( (i-1 >= 0)*dimg[j,i-1]
+ 2*dimg[j,i ]
+ (i+1 < w)*dimg[j,i+1] )
blur_y[j:h,i:w] = 0.25 * ( (j-1 >= 0)*blur_x[j-1,i]
+ 2*blur_x[j ,i]
+ (j+1 < h)*blur_x[j+1,i] )
dblur_y[j:h,i:w] = 0.25 * ( (j-1 >= 0)*dblur_x[j-1,i]
+ 2*dblur_x[j ,i]
+ (j+1 < h)*dblur_x[j+1,i] )
return (blur_y, dblur_y)
return dblur
def rand_input(self):
w, h = self.rand.randint(10,20), self.rand.randint(10,20)
img = self.rand.rand_ndarray([h,w])
return (w,h,img)
def rand_deriv_input(self):
w, h, img = self.rand_input()
dimg = self.rand.rand_ndarray([h,w])
return ((w,h,img),(dimg,))
def rand_deriv_inout(self):
indata, din = self.rand_deriv_input()
w, h = indata[0:2]
d_out = self.rand.rand_ndarray([h,w])
return (indata,din,d_out)
def rand_perf_inout(self):
w, h = 1000,1000
img = self.rand.rand_ndarray([h,w])
dimg = self.rand.rand_ndarray([h,w])
d_out = self.rand.rand_ndarray([h,w])
return ((w,h,img),(dimg,),d_out)
def data_zeros(self):
w, h = 4, 4
indata = (w,h,np.zeros([h,w],order='F'))
outdata = np.zeros([h,w],order='F')
return indata, outdata
def data_checker_2(self):
# a small checker pattern
w, h = 8, 6
img = np.zeros([h,w],order='F')
predict = np.zeros([h,w],order='F')
for i in range(0,w):
for j in range(0,h):
imod = (i//2) % 2
jmod = (j//2) % 2
val = 1.0 if imod == jmod else 0.0
img[j,i] = val
pval = (2/16 + 2/8 + 1/4) if val == 1.0 else (2/16 + 2/8)
# edge correction
on_ibd = (i == 0 or i == w-1)
on_jbd = (j == 0 or j == h-1)
if on_ibd:
if val == 1.0: pval -= 1/16
else: pval -= (1/16 + 1/8)
if on_jbd:
if val == 1.0: pval -= 1/16
else: pval -= (1/16 + 1/8)
if on_ibd and on_jbd:
if val == 1.0: pval += 1/16
else: pval += 0
predict[j,i] = pval
return (w,h,img), predict
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
if __name__ == '__main__':
unittest.main()
|
State Before: k : Type u_2
V : Type u_1
P : Type u_3
inst✝² : Ring k
inst✝¹ : AddCommGroup V
inst✝ : Module k V
S : AffineSpace V P
p₁ p₂ : P
⊢ direction ⊥ = ⊥ State After: no goals Tactic: rw [direction_eq_vectorSpan, bot_coe, vectorSpan_def, vsub_empty, Submodule.span_empty] |
[STATEMENT]
lemma arr_lunit [simp]:
assumes "ide a"
shows "arr \<l>[a]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. arr \<l>[a]
[PROOF STEP]
using assms lunit_in_hom
[PROOF STATE]
proof (prove)
using this:
ide a
ide ?a \<Longrightarrow> \<guillemotleft>\<l>[?a] : \<I> \<otimes> ?a \<rightarrow> ?a\<guillemotright>
goal (1 subgoal):
1. arr \<l>[a]
[PROOF STEP]
by auto |
lemma (in order_topology) at_within_Icc_at: "a < x \<Longrightarrow> x < b \<Longrightarrow> at x within {a..b} = at x" |
If $c > 0$, then $-\frac{b}{c} < a$ if and only if $-b < ca$. |
[STATEMENT]
lemma lincomb_conv_take_right : "rs \<bullet>\<cdot> ms = rs \<bullet>\<cdot> take (length rs) ms"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rs \<bullet>\<cdot> ms = rs \<bullet>\<cdot> take (length rs) ms
[PROOF STEP]
using lincomb_Nil lincomb_Cons
[PROOF STATE]
proof (prove)
using this:
?rs = [] \<or> ?ms = [] \<Longrightarrow> ?rs \<bullet>\<cdot> ?ms = (0::'m)
(?r # ?rs) \<bullet>\<cdot> (?m # ?ms) = ?r \<cdot> ?m + ?rs \<bullet>\<cdot> ?ms
goal (1 subgoal):
1. rs \<bullet>\<cdot> ms = rs \<bullet>\<cdot> take (length rs) ms
[PROOF STEP]
by (induct rs ms rule: list_induct2') auto |
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, Johannes Hölzl, Mario Carneiro, Patrick Massot
-/
import data.prod.pprod
import data.set.countable
import order.filter.basic
/-!
# Filter bases
A filter basis `B : filter_basis α` on a type `α` is a nonempty collection of sets of `α`
such that the intersection of two elements of this collection contains some element of
the collection. Compared to filters, filter bases do not require that any set containing
an element of `B` belongs to `B`.
A filter basis `B` can be used to construct `B.filter : filter α` such that a set belongs
to `B.filter` if and only if it contains an element of `B`.
Given an indexing type `ι`, a predicate `p : ι → Prop`, and a map `s : ι → set α`,
the proposition `h : filter.is_basis p s` makes sure the range of `s` bounded by `p`
(ie. `s '' set_of p`) defines a filter basis `h.filter_basis`.
If one already has a filter `l` on `α`, `filter.has_basis l p s` (where `p : ι → Prop`
and `s : ι → set α` as above) means that a set belongs to `l` if and
only if it contains some `s i` with `p i`. It implies `h : filter.is_basis p s`, and
`l = h.filter_basis.filter`. The point of this definition is that checking statements
involving elements of `l` often reduces to checking them on the basis elements.
We define a function `has_basis.index (h : filter.has_basis l p s) (t) (ht : t ∈ l)` that returns
some index `i` such that `p i` and `s i ⊆ t`. This function can be useful to avoid manual
destruction of `h.mem_iff.mpr ht` using `cases` or `let`.
This file also introduces more restricted classes of bases, involving monotonicity or
countability. In particular, for `l : filter α`, `l.is_countably_generated` means
there is a countable set of sets which generates `s`. This is reformulated in term of bases,
and consequences are derived.
## Main statements
* `has_basis.mem_iff`, `has_basis.mem_of_superset`, `has_basis.mem_of_mem` : restate `t ∈ f`
in terms of a basis;
* `basis_sets` : all sets of a filter form a basis;
* `has_basis.inf`, `has_basis.inf_principal`, `has_basis.prod`, `has_basis.prod_self`,
`has_basis.map`, `has_basis.comap` : combinators to construct filters of `l ⊓ l'`,
`l ⊓ 𝓟 t`, `l ×ᶠ l'`, `l ×ᶠ l`, `l.map f`, `l.comap f` respectively;
* `has_basis.le_iff`, `has_basis.ge_iff`, has_basis.le_basis_iff` : restate `l ≤ l'` in terms
of bases.
* `has_basis.tendsto_right_iff`, `has_basis.tendsto_left_iff`, `has_basis.tendsto_iff` : restate
`tendsto f l l'` in terms of bases.
* `is_countably_generated_iff_exists_antitone_basis` : proves a filter is
countably generated if and only if it admits a basis parametrized by a
decreasing sequence of sets indexed by `ℕ`.
* `tendsto_iff_seq_tendsto ` : an abstract version of "sequentially continuous implies continuous".
## Implementation notes
As with `Union`/`bUnion`/`sUnion`, there are three different approaches to filter bases:
* `has_basis l s`, `s : set (set α)`;
* `has_basis l s`, `s : ι → set α`;
* `has_basis l p s`, `p : ι → Prop`, `s : ι → set α`.
We use the latter one because, e.g., `𝓝 x` in an `emetric_space` or in a `metric_space` has a basis
of this form. The other two can be emulated using `s = id` or `p = λ _, true`.
With this approach sometimes one needs to `simp` the statement provided by the `has_basis`
machinery, e.g., `simp only [exists_prop, true_and]` or `simp only [forall_const]` can help
with the case `p = λ _, true`.
-/
open set filter
open_locale filter classical
section sort
variables {α β γ : Type*} {ι ι' : Sort*}
/-- A filter basis `B` on a type `α` is a nonempty collection of sets of `α`
such that the intersection of two elements of this collection contains some element
of the collection. -/
structure filter_basis (α : Type*) :=
(sets : set (set α))
(nonempty : sets.nonempty)
(inter_sets {x y} : x ∈ sets → y ∈ sets → ∃ z ∈ sets, z ⊆ x ∩ y)
instance filter_basis.nonempty_sets (B : filter_basis α) : nonempty B.sets := B.nonempty.to_subtype
/-- If `B` is a filter basis on `α`, and `U` a subset of `α` then we can write `U ∈ B` as
on paper. -/
@[reducible]
instance {α : Type*}: has_mem (set α) (filter_basis α) := ⟨λ U B, U ∈ B.sets⟩
-- For illustration purposes, the filter basis defining (at_top : filter ℕ)
instance : inhabited (filter_basis ℕ) :=
⟨{ sets := range Ici,
nonempty := ⟨Ici 0, mem_range_self 0⟩,
inter_sets := begin
rintros _ _ ⟨n, rfl⟩ ⟨m, rfl⟩,
refine ⟨Ici (max n m), mem_range_self _, _⟩,
rintros p p_in,
split ; rw mem_Ici at *,
exact le_of_max_le_left p_in,
exact le_of_max_le_right p_in,
end }⟩
/-- View a filter as a filter basis. -/
def filter.as_basis (f : filter α) : filter_basis α :=
⟨f.sets, ⟨univ, univ_mem⟩, λ x y hx hy, ⟨x ∩ y, inter_mem hx hy, subset_rfl⟩⟩
/-- `is_basis p s` means the image of `s` bounded by `p` is a filter basis. -/
protected structure filter.is_basis (p : ι → Prop) (s : ι → set α) : Prop :=
(nonempty : ∃ i, p i)
(inter : ∀ {i j}, p i → p j → ∃ k, p k ∧ s k ⊆ s i ∩ s j)
namespace filter
namespace is_basis
/-- Constructs a filter basis from an indexed family of sets satisfying `is_basis`. -/
protected def filter_basis {p : ι → Prop} {s : ι → set α} (h : is_basis p s) : filter_basis α :=
{ sets := {t | ∃ i, p i ∧ s i = t},
nonempty := let ⟨i, hi⟩ := h.nonempty in ⟨s i, ⟨i, hi, rfl⟩⟩,
inter_sets := by { rintros _ _ ⟨i, hi, rfl⟩ ⟨j, hj, rfl⟩,
rcases h.inter hi hj with ⟨k, hk, hk'⟩,
exact ⟨_, ⟨k, hk, rfl⟩, hk'⟩ } }
variables {p : ι → Prop} {s : ι → set α} (h : is_basis p s)
lemma mem_filter_basis_iff {U : set α} : U ∈ h.filter_basis ↔ ∃ i, p i ∧ s i = U :=
iff.rfl
end is_basis
end filter
namespace filter_basis
/-- The filter associated to a filter basis. -/
protected def filter (B : filter_basis α) : filter α :=
{ sets := {s | ∃ t ∈ B, t ⊆ s},
univ_sets := let ⟨s, s_in⟩ := B.nonempty in ⟨s, s_in, s.subset_univ⟩,
sets_of_superset := λ x y ⟨s, s_in, h⟩ hxy, ⟨s, s_in, set.subset.trans h hxy⟩,
inter_sets := λ x y ⟨s, s_in, hs⟩ ⟨t, t_in, ht⟩,
let ⟨u, u_in, u_sub⟩ := B.inter_sets s_in t_in in
⟨u, u_in, set.subset.trans u_sub $ set.inter_subset_inter hs ht⟩ }
lemma mem_filter_iff (B : filter_basis α) {U : set α} : U ∈ B.filter ↔ ∃ s ∈ B, s ⊆ U :=
iff.rfl
lemma mem_filter_of_mem (B : filter_basis α) {U : set α} : U ∈ B → U ∈ B.filter:=
λ U_in, ⟨U, U_in, subset.refl _⟩
lemma eq_infi_principal (B : filter_basis α) : B.filter = ⨅ s : B.sets, 𝓟 s :=
begin
have : directed (≥) (λ (s : B.sets), 𝓟 (s : set α)),
{ rintros ⟨U, U_in⟩ ⟨V, V_in⟩,
rcases B.inter_sets U_in V_in with ⟨W, W_in, W_sub⟩,
use [W, W_in],
simp only [ge_iff_le, le_principal_iff, mem_principal, subtype.coe_mk],
exact subset_inter_iff.mp W_sub },
ext U,
simp [mem_filter_iff, mem_infi_of_directed this]
end
protected lemma generate (B : filter_basis α) : generate B.sets = B.filter :=
begin
apply le_antisymm,
{ intros U U_in,
rcases B.mem_filter_iff.mp U_in with ⟨V, V_in, h⟩,
exact generate_sets.superset (generate_sets.basic V_in) h },
{ rw sets_iff_generate,
apply mem_filter_of_mem }
end
end filter_basis
namespace filter
namespace is_basis
variables {p : ι → Prop} {s : ι → set α}
/-- Constructs a filter from an indexed family of sets satisfying `is_basis`. -/
protected def filter (h : is_basis p s) : filter α := h.filter_basis.filter
protected lemma mem_filter_iff (h : is_basis p s) {U : set α} :
U ∈ h.filter ↔ ∃ i, p i ∧ s i ⊆ U :=
begin
erw [h.filter_basis.mem_filter_iff],
simp only [mem_filter_basis_iff h, exists_prop],
split,
{ rintros ⟨_, ⟨i, pi, rfl⟩, h⟩,
tauto },
{ tauto }
end
lemma filter_eq_generate (h : is_basis p s) : h.filter = generate {U | ∃ i, p i ∧ s i = U} :=
by erw h.filter_basis.generate ; refl
end is_basis
/-- We say that a filter `l` has a basis `s : ι → set α` bounded by `p : ι → Prop`,
if `t ∈ l` if and only if `t` includes `s i` for some `i` such that `p i`. -/
protected structure has_basis (l : filter α) (p : ι → Prop) (s : ι → set α) : Prop :=
(mem_iff' : ∀ (t : set α), t ∈ l ↔ ∃ i (hi : p i), s i ⊆ t)
section same_type
variables {l l' : filter α} {p : ι → Prop} {s : ι → set α} {t : set α} {i : ι}
{p' : ι' → Prop} {s' : ι' → set α} {i' : ι'}
lemma has_basis_generate (s : set (set α)) :
(generate s).has_basis (λ t, set.finite t ∧ t ⊆ s) (λ t, ⋂₀ t) :=
⟨begin
intro U,
rw mem_generate_iff,
apply exists_congr,
tauto
end⟩
/-- The smallest filter basis containing a given collection of sets. -/
def filter_basis.of_sets (s : set (set α)) : filter_basis α :=
{ sets := sInter '' { t | set.finite t ∧ t ⊆ s},
nonempty := ⟨univ, ∅, ⟨⟨finite_empty, empty_subset s⟩, sInter_empty⟩⟩,
inter_sets := begin
rintros _ _ ⟨a, ⟨fina, suba⟩, rfl⟩ ⟨b, ⟨finb, subb⟩, rfl⟩,
exact ⟨⋂₀ (a ∪ b), mem_image_of_mem _ ⟨fina.union finb, union_subset suba subb⟩,
by rw sInter_union⟩,
end }
/-- Definition of `has_basis` unfolded with implicit set argument. -/
lemma has_basis.mem_iff (hl : l.has_basis p s) : t ∈ l ↔ ∃ i (hi : p i), s i ⊆ t :=
hl.mem_iff' t
lemma has_basis.eq_of_same_basis (hl : l.has_basis p s) (hl' : l'.has_basis p s) : l = l' :=
begin
ext t,
rw [hl.mem_iff, hl'.mem_iff]
end
lemma has_basis_iff : l.has_basis p s ↔ ∀ t, t ∈ l ↔ ∃ i (hi : p i), s i ⊆ t :=
⟨λ ⟨h⟩, h, λ h, ⟨h⟩⟩
lemma has_basis.ex_mem (h : l.has_basis p s) : ∃ i, p i :=
let ⟨i, pi, h⟩ := h.mem_iff.mp univ_mem in ⟨i, pi⟩
protected lemma has_basis.nonempty (h : l.has_basis p s) : nonempty ι :=
nonempty_of_exists h.ex_mem
protected lemma is_basis.has_basis (h : is_basis p s) : has_basis h.filter p s :=
⟨λ t, by simp only [h.mem_filter_iff, exists_prop]⟩
lemma has_basis.mem_of_superset (hl : l.has_basis p s) (hi : p i) (ht : s i ⊆ t) : t ∈ l :=
(hl.mem_iff).2 ⟨i, hi, ht⟩
lemma has_basis.mem_of_mem (hl : l.has_basis p s) (hi : p i) : s i ∈ l :=
hl.mem_of_superset hi $ subset.refl _
/-- Index of a basis set such that `s i ⊆ t` as an element of `subtype p`. -/
noncomputable def has_basis.index (h : l.has_basis p s) (t : set α) (ht : t ∈ l) :
{i : ι // p i} :=
⟨(h.mem_iff.1 ht).some, (h.mem_iff.1 ht).some_spec.fst⟩
lemma has_basis.property_index (h : l.has_basis p s) (ht : t ∈ l) : p (h.index t ht) :=
(h.index t ht).2
lemma has_basis.set_index_mem (h : l.has_basis p s) (ht : t ∈ l) : s (h.index t ht) ∈ l :=
h.mem_of_mem $ h.property_index _
lemma has_basis.set_index_subset (h : l.has_basis p s) (ht : t ∈ l) : s (h.index t ht) ⊆ t :=
(h.mem_iff.1 ht).some_spec.snd
lemma has_basis.is_basis (h : l.has_basis p s) : is_basis p s :=
{ nonempty := let ⟨i, hi, H⟩ := h.mem_iff.mp univ_mem in ⟨i, hi⟩,
inter := λ i j hi hj, by simpa [h.mem_iff]
using l.inter_sets (h.mem_of_mem hi) (h.mem_of_mem hj) }
lemma has_basis.filter_eq (h : l.has_basis p s) : h.is_basis.filter = l :=
by { ext U, simp [h.mem_iff, is_basis.mem_filter_iff] }
lemma has_basis.eq_generate (h : l.has_basis p s) : l = generate { U | ∃ i, p i ∧ s i = U } :=
by rw [← h.is_basis.filter_eq_generate, h.filter_eq]
lemma generate_eq_generate_inter (s : set (set α)) :
generate s = generate (sInter '' { t | set.finite t ∧ t ⊆ s}) :=
by erw [(filter_basis.of_sets s).generate, ← (has_basis_generate s).filter_eq] ; refl
lemma of_sets_filter_eq_generate (s : set (set α)) : (filter_basis.of_sets s).filter = generate s :=
by rw [← (filter_basis.of_sets s).generate, generate_eq_generate_inter s] ; refl
protected lemma _root_.filter_basis.has_basis {α : Type*} (B : filter_basis α) :
has_basis (B.filter) (λ s : set α, s ∈ B) id :=
⟨λ t, B.mem_filter_iff⟩
lemma has_basis.to_has_basis' (hl : l.has_basis p s) (h : ∀ i, p i → ∃ i', p' i' ∧ s' i' ⊆ s i)
(h' : ∀ i', p' i' → s' i' ∈ l) : l.has_basis p' s' :=
begin
refine ⟨λ t, ⟨λ ht, _, λ ⟨i', hi', ht⟩, mem_of_superset (h' i' hi') ht⟩⟩,
rcases hl.mem_iff.1 ht with ⟨i, hi, ht⟩,
rcases h i hi with ⟨i', hi', hs's⟩,
exact ⟨i', hi', subset.trans hs's ht⟩
end
lemma has_basis.to_has_basis (hl : l.has_basis p s) (h : ∀ i, p i → ∃ i', p' i' ∧ s' i' ⊆ s i)
(h' : ∀ i', p' i' → ∃ i, p i ∧ s i ⊆ s' i') : l.has_basis p' s' :=
hl.to_has_basis' h $ λ i' hi', let ⟨i, hi, hss'⟩ := h' i' hi' in hl.mem_iff.2 ⟨i, hi, hss'⟩
lemma has_basis.to_subset (hl : l.has_basis p s) {t : ι → set α} (h : ∀ i, p i → t i ⊆ s i)
(ht : ∀ i, p i → t i ∈ l) : l.has_basis p t :=
hl.to_has_basis' (λ i hi, ⟨i, hi, h i hi⟩) ht
lemma has_basis.eventually_iff (hl : l.has_basis p s) {q : α → Prop} :
(∀ᶠ x in l, q x) ↔ ∃ i, p i ∧ ∀ ⦃x⦄, x ∈ s i → q x :=
by simpa using hl.mem_iff
lemma has_basis.frequently_iff (hl : l.has_basis p s) {q : α → Prop} :
(∃ᶠ x in l, q x) ↔ ∀ i, p i → ∃ x ∈ s i, q x :=
by simp [filter.frequently, hl.eventually_iff]
lemma has_basis.exists_iff (hl : l.has_basis p s) {P : set α → Prop}
(mono : ∀ ⦃s t⦄, s ⊆ t → P t → P s) :
(∃ s ∈ l, P s) ↔ ∃ (i) (hi : p i), P (s i) :=
⟨λ ⟨s, hs, hP⟩, let ⟨i, hi, his⟩ := hl.mem_iff.1 hs in ⟨i, hi, mono his hP⟩,
λ ⟨i, hi, hP⟩, ⟨s i, hl.mem_of_mem hi, hP⟩⟩
lemma has_basis.forall_iff (hl : l.has_basis p s) {P : set α → Prop}
(mono : ∀ ⦃s t⦄, s ⊆ t → P s → P t) :
(∀ s ∈ l, P s) ↔ ∀ i, p i → P (s i) :=
⟨λ H i hi, H (s i) $ hl.mem_of_mem hi,
λ H s hs, let ⟨i, hi, his⟩ := hl.mem_iff.1 hs in mono his (H i hi)⟩
lemma has_basis.ne_bot_iff (hl : l.has_basis p s) :
ne_bot l ↔ (∀ {i}, p i → (s i).nonempty) :=
forall_mem_nonempty_iff_ne_bot.symm.trans $ hl.forall_iff $ λ _ _, nonempty.mono
lemma has_basis.eq_bot_iff (hl : l.has_basis p s) :
l = ⊥ ↔ ∃ i, p i ∧ s i = ∅ :=
not_iff_not.1 $ ne_bot_iff.symm.trans $ hl.ne_bot_iff.trans $
by simp only [not_exists, not_and, ← ne_empty_iff_nonempty]
lemma basis_sets (l : filter α) : l.has_basis (λ s : set α, s ∈ l) id :=
⟨λ t, exists_mem_subset_iff.symm⟩
lemma as_basis_filter (f : filter α) : f.as_basis.filter = f :=
by ext t; exact exists_mem_subset_iff
lemma has_basis_self {l : filter α} {P : set α → Prop} :
has_basis l (λ s, s ∈ l ∧ P s) id ↔ ∀ t ∈ l, ∃ r ∈ l, P r ∧ r ⊆ t :=
begin
simp only [has_basis_iff, exists_prop, id, and_assoc],
exact forall_congr (λ s, ⟨λ h, h.1, λ h, ⟨h, λ ⟨t, hl, hP, hts⟩, mem_of_superset hl hts⟩⟩)
end
lemma has_basis.comp_of_surjective (h : l.has_basis p s) {g : ι' → ι} (hg : function.surjective g) :
l.has_basis (p ∘ g) (s ∘ g) :=
⟨λ t, h.mem_iff.trans hg.exists⟩
lemma has_basis.comp_equiv (h : l.has_basis p s) (e : ι' ≃ ι) : l.has_basis (p ∘ e) (s ∘ e) :=
h.comp_of_surjective e.surjective
/-- If `{s i | p i}` is a basis of a filter `l` and each `s i` includes `s j` such that
`p j ∧ q j`, then `{s j | p j ∧ q j}` is a basis of `l`. -/
lemma has_basis.restrict (h : l.has_basis p s) {q : ι → Prop}
(hq : ∀ i, p i → ∃ j, p j ∧ q j ∧ s j ⊆ s i) :
l.has_basis (λ i, p i ∧ q i) s :=
begin
refine ⟨λ t, ⟨λ ht, _, λ ⟨i, hpi, hti⟩, h.mem_iff.2 ⟨i, hpi.1, hti⟩⟩⟩,
rcases h.mem_iff.1 ht with ⟨i, hpi, hti⟩,
rcases hq i hpi with ⟨j, hpj, hqj, hji⟩,
exact ⟨j, ⟨hpj, hqj⟩, subset.trans hji hti⟩
end
/-- If `{s i | p i}` is a basis of a filter `l` and `V ∈ l`, then `{s i | p i ∧ s i ⊆ V}`
is a basis of `l`. -/
lemma has_basis.restrict_subset (h : l.has_basis p s) {V : set α} (hV : V ∈ l) :
l.has_basis (λ i, p i ∧ s i ⊆ V) s :=
h.restrict $ λ i hi, (h.mem_iff.1 (inter_mem hV (h.mem_of_mem hi))).imp $
λ j hj, ⟨hj.fst, subset_inter_iff.1 hj.snd⟩
lemma has_basis.has_basis_self_subset {p : set α → Prop} (h : l.has_basis (λ s, s ∈ l ∧ p s) id)
{V : set α} (hV : V ∈ l) : l.has_basis (λ s, s ∈ l ∧ p s ∧ s ⊆ V) id :=
by simpa only [and_assoc] using h.restrict_subset hV
theorem has_basis.ge_iff (hl' : l'.has_basis p' s') : l ≤ l' ↔ ∀ i', p' i' → s' i' ∈ l :=
⟨λ h i' hi', h $ hl'.mem_of_mem hi',
λ h s hs, let ⟨i', hi', hs⟩ := hl'.mem_iff.1 hs in mem_of_superset (h _ hi') hs⟩
theorem has_basis.le_iff (hl : l.has_basis p s) : l ≤ l' ↔ ∀ t ∈ l', ∃ i (hi : p i), s i ⊆ t :=
by simp only [le_def, hl.mem_iff]
theorem has_basis.le_basis_iff (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
l ≤ l' ↔ ∀ i', p' i' → ∃ i (hi : p i), s i ⊆ s' i' :=
by simp only [hl'.ge_iff, hl.mem_iff]
lemma has_basis.ext (hl : l.has_basis p s) (hl' : l'.has_basis p' s')
(h : ∀ i, p i → ∃ i', p' i' ∧ s' i' ⊆ s i)
(h' : ∀ i', p' i' → ∃ i, p i ∧ s i ⊆ s' i') : l = l' :=
begin
apply le_antisymm,
{ rw hl.le_basis_iff hl',
simpa using h' },
{ rw hl'.le_basis_iff hl,
simpa using h },
end
lemma has_basis.inf' (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
(l ⊓ l').has_basis (λ i : pprod ι ι', p i.1 ∧ p' i.2) (λ i, s i.1 ∩ s' i.2) :=
⟨begin
intro t,
split,
{ simp only [mem_inf_iff, exists_prop, hl.mem_iff, hl'.mem_iff],
rintros ⟨t, ⟨i, hi, ht⟩, t', ⟨i', hi', ht'⟩, rfl⟩,
use [⟨i, i'⟩, ⟨hi, hi'⟩, inter_subset_inter ht ht'] },
{ rintros ⟨⟨i, i'⟩, ⟨hi, hi'⟩, H⟩,
exact mem_inf_of_inter (hl.mem_of_mem hi) (hl'.mem_of_mem hi') H }
end⟩
lemma has_basis.inf {ι ι' : Type*} {p : ι → Prop} {s : ι → set α} {p' : ι' → Prop}
{s' : ι' → set α} (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
(l ⊓ l').has_basis (λ i : ι × ι', p i.1 ∧ p' i.2) (λ i, s i.1 ∩ s' i.2) :=
(hl.inf' hl').to_has_basis (λ i hi, ⟨⟨i.1, i.2⟩, hi, subset.rfl⟩)
(λ i hi, ⟨⟨i.1, i.2⟩, hi, subset.rfl⟩)
lemma has_basis_infi {ι : Type*} {ι' : ι → Type*} {l : ι → filter α}
{p : Π i, ι' i → Prop} {s : Π i, ι' i → set α} (hl : ∀ i, (l i).has_basis (p i) (s i)) :
(⨅ i, l i).has_basis (λ If : set ι × Π i, ι' i, If.1.finite ∧ ∀ i ∈ If.1, p i (If.2 i))
(λ If : set ι × Π i, ι' i, ⋂ i ∈ If.1, s i (If.2 i)) :=
⟨begin
intro t,
split,
{ simp only [mem_infi', (hl _).mem_iff],
rintros ⟨I, hI, V, hV, -, hVt, -⟩,
choose u hu using hV,
refine ⟨⟨I, u⟩, ⟨hI, λ i _, (hu i).1⟩, _⟩,
rw hVt,
exact Inter_mono (λ i, Inter_mono $ λ hi, (hu i).2) },
{ rintros ⟨⟨I, f⟩, ⟨hI₁, hI₂⟩, hsub⟩,
refine mem_of_superset _ hsub,
exact (bInter_mem hI₁).mpr (λ i hi, mem_infi_of_mem i $ (hl i).mem_of_mem $ hI₂ _ hi) }
end⟩
lemma has_basis_infi_of_directed' {ι : Type*} {ι' : ι → Sort*}
[nonempty ι]
{l : ι → filter α} (s : Π i, (ι' i) → set α) (p : Π i, (ι' i) → Prop)
(hl : ∀ i, (l i).has_basis (p i) (s i)) (h : directed (≥) l) :
(⨅ i, l i).has_basis (λ (ii' : Σ i, ι' i), p ii'.1 ii'.2) (λ ii', s ii'.1 ii'.2) :=
begin
refine ⟨λ t, _⟩,
rw [mem_infi_of_directed h, sigma.exists],
exact exists_congr (λ i, (hl i).mem_iff)
end
lemma has_basis_infi_of_directed {ι : Type*} {ι' : Sort*}
[nonempty ι]
{l : ι → filter α} (s : ι → ι' → set α) (p : ι → ι' → Prop)
(hl : ∀ i, (l i).has_basis (p i) (s i)) (h : directed (≥) l) :
(⨅ i, l i).has_basis (λ (ii' : ι × ι'), p ii'.1 ii'.2) (λ ii', s ii'.1 ii'.2) :=
begin
refine ⟨λ t, _⟩,
rw [mem_infi_of_directed h, prod.exists],
exact exists_congr (λ i, (hl i).mem_iff)
end
lemma has_basis_binfi_of_directed' {ι : Type*} {ι' : ι → Sort*}
{dom : set ι} (hdom : dom.nonempty)
{l : ι → filter α} (s : Π i, (ι' i) → set α) (p : Π i, (ι' i) → Prop)
(hl : ∀ i ∈ dom, (l i).has_basis (p i) (s i)) (h : directed_on (l ⁻¹'o ge) dom) :
(⨅ i ∈ dom, l i).has_basis (λ (ii' : Σ i, ι' i), ii'.1 ∈ dom ∧ p ii'.1 ii'.2)
(λ ii', s ii'.1 ii'.2) :=
begin
refine ⟨λ t, _⟩,
rw [mem_binfi_of_directed h hdom, sigma.exists],
refine exists_congr (λ i, ⟨_, _⟩),
{ rintros ⟨hi, hti⟩,
rcases (hl i hi).mem_iff.mp hti with ⟨b, hb, hbt⟩,
exact ⟨b, ⟨hi, hb⟩, hbt⟩ },
{ rintros ⟨b, ⟨hi, hb⟩, hibt⟩,
exact ⟨hi, (hl i hi).mem_iff.mpr ⟨b, hb, hibt⟩⟩ }
end
lemma has_basis_binfi_of_directed {ι : Type*} {ι' : Sort*}
{dom : set ι} (hdom : dom.nonempty)
{l : ι → filter α} (s : ι → ι' → set α) (p : ι → ι' → Prop)
(hl : ∀ i ∈ dom, (l i).has_basis (p i) (s i)) (h : directed_on (l ⁻¹'o ge) dom) :
(⨅ i ∈ dom, l i).has_basis (λ (ii' : ι × ι'), ii'.1 ∈ dom ∧ p ii'.1 ii'.2)
(λ ii', s ii'.1 ii'.2) :=
begin
refine ⟨λ t, _⟩,
rw [mem_binfi_of_directed h hdom, prod.exists],
refine exists_congr (λ i, ⟨_, _⟩),
{ rintros ⟨hi, hti⟩,
rcases (hl i hi).mem_iff.mp hti with ⟨b, hb, hbt⟩,
exact ⟨b, ⟨hi, hb⟩, hbt⟩ },
{ rintros ⟨b, ⟨hi, hb⟩, hibt⟩,
exact ⟨hi, (hl i hi).mem_iff.mpr ⟨b, hb, hibt⟩⟩ }
end
lemma has_basis_principal (t : set α) : (𝓟 t).has_basis (λ i : unit, true) (λ i, t) :=
⟨λ U, by simp⟩
lemma has_basis_pure (x : α) : (pure x : filter α).has_basis (λ i : unit, true) (λ i, {x}) :=
by simp only [← principal_singleton, has_basis_principal]
lemma has_basis.sup' (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
(l ⊔ l').has_basis (λ i : pprod ι ι', p i.1 ∧ p' i.2) (λ i, s i.1 ∪ s' i.2) :=
⟨begin
intros t,
simp only [mem_sup, hl.mem_iff, hl'.mem_iff, pprod.exists, union_subset_iff, exists_prop,
and_assoc, exists_and_distrib_left],
simp only [← and_assoc, exists_and_distrib_right, and_comm]
end⟩
lemma has_basis.sup {ι ι' : Type*} {p : ι → Prop} {s : ι → set α} {p' : ι' → Prop}
{s' : ι' → set α} (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
(l ⊔ l').has_basis (λ i : ι × ι', p i.1 ∧ p' i.2) (λ i, s i.1 ∪ s' i.2) :=
(hl.sup' hl').to_has_basis (λ i hi, ⟨⟨i.1, i.2⟩, hi, subset.rfl⟩)
(λ i hi, ⟨⟨i.1, i.2⟩, hi, subset.rfl⟩)
lemma has_basis_supr {ι : Sort*} {ι' : ι → Type*} {l : ι → filter α}
{p : Π i, ι' i → Prop} {s : Π i, ι' i → set α} (hl : ∀ i, (l i).has_basis (p i) (s i)) :
(⨆ i, l i).has_basis (λ f : Π i, ι' i, ∀ i, p i (f i)) (λ f : Π i, ι' i, ⋃ i, s i (f i)) :=
has_basis_iff.mpr $ λ t, by simp only [has_basis_iff, (hl _).mem_iff, classical.skolem,
forall_and_distrib, Union_subset_iff, mem_supr]
lemma has_basis.sup_principal (hl : l.has_basis p s) (t : set α) :
(l ⊔ 𝓟 t).has_basis p (λ i, s i ∪ t) :=
⟨λ u, by simp only [(hl.sup' (has_basis_principal t)).mem_iff, pprod.exists, exists_prop, and_true,
unique.exists_iff]⟩
lemma has_basis.sup_pure (hl : l.has_basis p s) (x : α) :
(l ⊔ pure x).has_basis p (λ i, s i ∪ {x}) :=
by simp only [← principal_singleton, hl.sup_principal]
lemma has_basis.inf_principal (hl : l.has_basis p s) (s' : set α) :
(l ⊓ 𝓟 s').has_basis p (λ i, s i ∩ s') :=
⟨λ t, by simp only [mem_inf_principal, hl.mem_iff, subset_def, mem_set_of_eq,
mem_inter_iff, and_imp]⟩
lemma has_basis.inf_basis_ne_bot_iff (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
ne_bot (l ⊓ l') ↔ ∀ ⦃i⦄ (hi : p i) ⦃i'⦄ (hi' : p' i'), (s i ∩ s' i').nonempty :=
(hl.inf' hl').ne_bot_iff.trans $ by simp [@forall_swap _ ι']
lemma has_basis.inf_ne_bot_iff (hl : l.has_basis p s) :
ne_bot (l ⊓ l') ↔ ∀ ⦃i⦄ (hi : p i) ⦃s'⦄ (hs' : s' ∈ l'), (s i ∩ s').nonempty :=
hl.inf_basis_ne_bot_iff l'.basis_sets
lemma has_basis.inf_principal_ne_bot_iff (hl : l.has_basis p s) {t : set α} :
ne_bot (l ⊓ 𝓟 t) ↔ ∀ ⦃i⦄ (hi : p i), (s i ∩ t).nonempty :=
(hl.inf_principal t).ne_bot_iff
lemma has_basis.disjoint_basis_iff (hl : l.has_basis p s) (hl' : l'.has_basis p' s') :
disjoint l l' ↔ ∃ i (hi : p i) i' (hi' : p' i'), disjoint (s i) (s' i') :=
not_iff_not.mp $ by simp only [disjoint_iff, ← ne.def, ← ne_bot_iff, hl.inf_basis_ne_bot_iff hl',
not_exists, bot_eq_empty, ne_empty_iff_nonempty, inf_eq_inter]
lemma inf_ne_bot_iff :
ne_bot (l ⊓ l') ↔ ∀ ⦃s : set α⦄ (hs : s ∈ l) ⦃s'⦄ (hs' : s' ∈ l'), (s ∩ s').nonempty :=
l.basis_sets.inf_ne_bot_iff
lemma inf_principal_ne_bot_iff {s : set α} :
ne_bot (l ⊓ 𝓟 s) ↔ ∀ U ∈ l, (U ∩ s).nonempty :=
l.basis_sets.inf_principal_ne_bot_iff
lemma mem_iff_inf_principal_compl {f : filter α} {s : set α} :
s ∈ f ↔ f ⊓ 𝓟 sᶜ = ⊥ :=
begin
refine not_iff_not.1 ((inf_principal_ne_bot_iff.trans _).symm.trans ne_bot_iff),
exact ⟨λ h hs, by simpa [empty_not_nonempty] using h s hs,
λ hs t ht, inter_compl_nonempty_iff.2 $ λ hts, hs $ mem_of_superset ht hts⟩,
end
lemma not_mem_iff_inf_principal_compl {f : filter α} {s : set α} :
s ∉ f ↔ ne_bot (f ⊓ 𝓟 sᶜ) :=
(not_congr mem_iff_inf_principal_compl).trans ne_bot_iff.symm
@[simp] lemma disjoint_principal_right {f : filter α} {s : set α} :
disjoint f (𝓟 s) ↔ sᶜ ∈ f :=
by rw [mem_iff_inf_principal_compl, compl_compl, disjoint_iff]
@[simp] lemma disjoint_principal_left {f : filter α} {s : set α} :
disjoint (𝓟 s) f ↔ sᶜ ∈ f :=
by rw [disjoint.comm, disjoint_principal_right]
@[simp] lemma disjoint_principal_principal {s t : set α} :
disjoint (𝓟 s) (𝓟 t) ↔ disjoint s t :=
by simp [disjoint_iff_subset_compl_left]
alias disjoint_principal_principal ↔ _ disjoint.filter_principal
@[simp] lemma disjoint_pure_pure {x y : α} :
disjoint (pure x : filter α) (pure y) ↔ x ≠ y :=
by simp only [← principal_singleton, disjoint_principal_principal, disjoint_singleton]
lemma le_iff_forall_inf_principal_compl {f g : filter α} :
f ≤ g ↔ ∀ V ∈ g, f ⊓ 𝓟 Vᶜ = ⊥ :=
forall₂_congr $ λ _ _, mem_iff_inf_principal_compl
lemma inf_ne_bot_iff_frequently_left {f g : filter α} :
ne_bot (f ⊓ g) ↔ ∀ {p : α → Prop}, (∀ᶠ x in f, p x) → ∃ᶠ x in g, p x :=
by simpa only [inf_ne_bot_iff, frequently_iff, exists_prop, and_comm]
lemma inf_ne_bot_iff_frequently_right {f g : filter α} :
ne_bot (f ⊓ g) ↔ ∀ {p : α → Prop}, (∀ᶠ x in g, p x) → ∃ᶠ x in f, p x :=
by { rw inf_comm, exact inf_ne_bot_iff_frequently_left }
lemma has_basis.eq_binfi (h : l.has_basis p s) :
l = ⨅ i (_ : p i), 𝓟 (s i) :=
eq_binfi_of_mem_iff_exists_mem $ λ t, by simp only [h.mem_iff, mem_principal]
lemma has_basis.eq_infi (h : l.has_basis (λ _, true) s) :
l = ⨅ i, 𝓟 (s i) :=
by simpa only [infi_true] using h.eq_binfi
lemma has_basis_infi_principal {s : ι → set α} (h : directed (≥) s) [nonempty ι] :
(⨅ i, 𝓟 (s i)).has_basis (λ _, true) s :=
⟨begin
refine λ t, (mem_infi_of_directed (h.mono_comp _ _) t).trans $
by simp only [exists_prop, true_and, mem_principal],
exact λ _ _, principal_mono.2
end⟩
/-- If `s : ι → set α` is an indexed family of sets, then finite intersections of `s i` form a basis
of `⨅ i, 𝓟 (s i)`. -/
lemma has_basis_infi_principal_finite {ι : Type*} (s : ι → set α) :
(⨅ i, 𝓟 (s i)).has_basis (λ t : set ι, t.finite) (λ t, ⋂ i ∈ t, s i) :=
begin
refine ⟨λ U, (mem_infi_finite _).trans _⟩,
simp only [infi_principal_finset, mem_Union, mem_principal, exists_prop,
exists_finite_iff_finset, finset.set_bInter_coe]
end
lemma has_basis_binfi_principal {s : β → set α} {S : set β} (h : directed_on (s ⁻¹'o (≥)) S)
(ne : S.nonempty) :
(⨅ i ∈ S, 𝓟 (s i)).has_basis (λ i, i ∈ S) s :=
⟨begin
refine λ t, (mem_binfi_of_directed _ ne).trans $ by simp only [mem_principal],
rw [directed_on_iff_directed, ← directed_comp, (∘)] at h ⊢,
apply h.mono_comp _ _,
exact λ _ _, principal_mono.2
end⟩
lemma has_basis_binfi_principal' {ι : Type*} {p : ι → Prop} {s : ι → set α}
(h : ∀ i, p i → ∀ j, p j → ∃ k (h : p k), s k ⊆ s i ∧ s k ⊆ s j) (ne : ∃ i, p i) :
(⨅ i (h : p i), 𝓟 (s i)).has_basis p s :=
filter.has_basis_binfi_principal h ne
lemma has_basis.map (f : α → β) (hl : l.has_basis p s) :
(l.map f).has_basis p (λ i, f '' (s i)) :=
⟨λ t, by simp only [mem_map, image_subset_iff, hl.mem_iff, preimage]⟩
lemma has_basis.comap (f : β → α) (hl : l.has_basis p s) :
(l.comap f).has_basis p (λ i, f ⁻¹' (s i)) :=
⟨begin
intro t,
simp only [mem_comap, exists_prop, hl.mem_iff],
split,
{ rintros ⟨t', ⟨i, hi, ht'⟩, H⟩,
exact ⟨i, hi, subset.trans (preimage_mono ht') H⟩ },
{ rintros ⟨i, hi, H⟩,
exact ⟨s i, ⟨i, hi, subset.refl _⟩, H⟩ }
end⟩
lemma comap_has_basis (f : α → β) (l : filter β) :
has_basis (comap f l) (λ s : set β, s ∈ l) (λ s, f ⁻¹' s) :=
⟨λ t, mem_comap⟩
lemma has_basis.prod_self (hl : l.has_basis p s) :
(l ×ᶠ l).has_basis p (λ i, s i ×ˢ s i) :=
⟨begin
intro t,
apply mem_prod_iff.trans,
split,
{ rintros ⟨t₁, ht₁, t₂, ht₂, H⟩,
rcases hl.mem_iff.1 (inter_mem ht₁ ht₂) with ⟨i, hi, ht⟩,
exact ⟨i, hi, λ p ⟨hp₁, hp₂⟩, H ⟨(ht hp₁).1, (ht hp₂).2⟩⟩ },
{ rintros ⟨i, hi, H⟩,
exact ⟨s i, hl.mem_of_mem hi, s i, hl.mem_of_mem hi, H⟩ }
end⟩
lemma mem_prod_self_iff {s} : s ∈ l ×ᶠ l ↔ ∃ t ∈ l, t ×ˢ t ⊆ s :=
l.basis_sets.prod_self.mem_iff
lemma has_basis.sInter_sets (h : has_basis l p s) :
⋂₀ l.sets = ⋂ i (hi : p i), s i :=
begin
ext x,
simp only [mem_Inter, mem_sInter, filter.mem_sets, h.forall_mem_mem],
end
variables {ι'' : Type*} [preorder ι''] (l) (s'' : ι'' → set α)
/-- `is_antitone_basis s` means the image of `s` is a filter basis such that `s` is decreasing. -/
@[protect_proj] structure is_antitone_basis extends is_basis (λ _, true) s'' : Prop :=
(antitone : antitone s'')
/-- We say that a filter `l` has an antitone basis `s : ι → set α`, if `t ∈ l` if and only if `t`
includes `s i` for some `i`, and `s` is decreasing. -/
@[protect_proj] structure has_antitone_basis (l : filter α) (s : ι'' → set α)
extends has_basis l (λ _, true) s : Prop :=
(antitone : antitone s)
end same_type
section two_types
variables {la : filter α} {pa : ι → Prop} {sa : ι → set α}
{lb : filter β} {pb : ι' → Prop} {sb : ι' → set β} {f : α → β}
lemma has_basis.tendsto_left_iff (hla : la.has_basis pa sa) :
tendsto f la lb ↔ ∀ t ∈ lb, ∃ i (hi : pa i), maps_to f (sa i) t :=
by { simp only [tendsto, (hla.map f).le_iff, image_subset_iff], refl }
lemma has_basis.tendsto_right_iff (hlb : lb.has_basis pb sb) :
tendsto f la lb ↔ ∀ i (hi : pb i), ∀ᶠ x in la, f x ∈ sb i :=
by simpa only [tendsto, hlb.ge_iff, mem_map, filter.eventually]
lemma has_basis.tendsto_iff (hla : la.has_basis pa sa) (hlb : lb.has_basis pb sb) :
tendsto f la lb ↔ ∀ ib (hib : pb ib), ∃ ia (hia : pa ia), ∀ x ∈ sa ia, f x ∈ sb ib :=
by simp [hlb.tendsto_right_iff, hla.eventually_iff]
lemma tendsto.basis_left (H : tendsto f la lb) (hla : la.has_basis pa sa) :
∀ t ∈ lb, ∃ i (hi : pa i), maps_to f (sa i) t :=
hla.tendsto_left_iff.1 H
lemma tendsto.basis_right (H : tendsto f la lb) (hlb : lb.has_basis pb sb) :
∀ i (hi : pb i), ∀ᶠ x in la, f x ∈ sb i :=
hlb.tendsto_right_iff.1 H
lemma tendsto.basis_both (H : tendsto f la lb) (hla : la.has_basis pa sa)
(hlb : lb.has_basis pb sb) :
∀ ib (hib : pb ib), ∃ ia (hia : pa ia), ∀ x ∈ sa ia, f x ∈ sb ib :=
(hla.tendsto_iff hlb).1 H
lemma has_basis.prod'' (hla : la.has_basis pa sa) (hlb : lb.has_basis pb sb) :
(la ×ᶠ lb).has_basis (λ i : pprod ι ι', pa i.1 ∧ pb i.2) (λ i, sa i.1 ×ˢ sb i.2) :=
(hla.comap prod.fst).inf' (hlb.comap prod.snd)
lemma has_basis.prod {ι ι' : Type*} {pa : ι → Prop} {sa : ι → set α} {pb : ι' → Prop}
{sb : ι' → set β} (hla : la.has_basis pa sa) (hlb : lb.has_basis pb sb) :
(la ×ᶠ lb).has_basis (λ i : ι × ι', pa i.1 ∧ pb i.2) (λ i, sa i.1 ×ˢ sb i.2) :=
(hla.comap prod.fst).inf (hlb.comap prod.snd)
lemma has_basis.prod' {la : filter α} {lb : filter β} {ι : Type*} {p : ι → Prop}
{sa : ι → set α} {sb : ι → set β}
(hla : la.has_basis p sa) (hlb : lb.has_basis p sb)
(h_dir : ∀ {i j}, p i → p j → ∃ k, p k ∧ sa k ⊆ sa i ∧ sb k ⊆ sb j) :
(la ×ᶠ lb).has_basis p (λ i, sa i ×ˢ sb i) :=
begin
simp only [has_basis_iff, (hla.prod hlb).mem_iff],
refine λ t, ⟨_, _⟩,
{ rintros ⟨⟨i, j⟩, ⟨hi, hj⟩, hsub : sa i ×ˢ sb j ⊆ t⟩,
rcases h_dir hi hj with ⟨k, hk, ki, kj⟩,
exact ⟨k, hk, (set.prod_mono ki kj).trans hsub⟩ },
{ rintro ⟨i, hi, h⟩,
exact ⟨⟨i, i⟩, ⟨hi, hi⟩, h⟩ },
end
lemma has_antitone_basis.prod {f : filter α} {g : filter β}
{s : ℕ → set α} {t : ℕ → set β} (hf : has_antitone_basis f s) (hg : has_antitone_basis g t) :
has_antitone_basis (f ×ᶠ g) (λ n, s n ×ˢ t n) :=
begin
have h : has_basis (f ×ᶠ g) _ _ := has_basis.prod' hf.to_has_basis hg.to_has_basis _,
swap,
{ intros i j,
simp only [true_and, forall_true_left],
exact ⟨max i j, hf.antitone (le_max_left _ _), hg.antitone (le_max_right _ _)⟩, },
refine ⟨h, λ n m hn_le_m, set.prod_mono _ _⟩,
exacts [hf.antitone hn_le_m, hg.antitone hn_le_m]
end
lemma has_basis.coprod {ι ι' : Type*} {pa : ι → Prop} {sa : ι → set α} {pb : ι' → Prop}
{sb : ι' → set β} (hla : la.has_basis pa sa) (hlb : lb.has_basis pb sb) :
(la.coprod lb).has_basis (λ i : ι × ι', pa i.1 ∧ pb i.2)
(λ i, prod.fst ⁻¹' sa i.1 ∪ prod.snd ⁻¹' sb i.2) :=
(hla.comap prod.fst).sup (hlb.comap prod.snd)
end two_types
end filter
end sort
namespace filter
variables {α β γ ι : Type*} {ι' : Sort*}
/-- `is_countably_generated f` means `f = generate s` for some countable `s`. -/
class is_countably_generated (f : filter α) : Prop :=
(out [] : ∃ s : set (set α), s.countable ∧ f = generate s)
/-- `is_countable_basis p s` means the image of `s` bounded by `p` is a countable filter basis. -/
structure is_countable_basis (p : ι → Prop) (s : ι → set α) extends is_basis p s : Prop :=
(countable : (set_of p).countable)
/-- We say that a filter `l` has a countable basis `s : ι → set α` bounded by `p : ι → Prop`,
if `t ∈ l` if and only if `t` includes `s i` for some `i` such that `p i`, and the set
defined by `p` is countable. -/
structure has_countable_basis (l : filter α) (p : ι → Prop) (s : ι → set α)
extends has_basis l p s : Prop :=
(countable : (set_of p).countable)
/-- A countable filter basis `B` on a type `α` is a nonempty countable collection of sets of `α`
such that the intersection of two elements of this collection contains some element
of the collection. -/
structure countable_filter_basis (α : Type*) extends filter_basis α :=
(countable : sets.countable)
-- For illustration purposes, the countable filter basis defining (at_top : filter ℕ)
instance nat.inhabited_countable_filter_basis : inhabited (countable_filter_basis ℕ) :=
⟨{ countable := countable_range (λ n, Ici n),
..(default : filter_basis ℕ) }⟩
lemma has_countable_basis.is_countably_generated {f : filter α} {p : ι → Prop} {s : ι → set α}
(h : f.has_countable_basis p s) :
f.is_countably_generated :=
⟨⟨{t | ∃ i, p i ∧ s i = t}, h.countable.image s, h.to_has_basis.eq_generate⟩⟩
lemma antitone_seq_of_seq (s : ℕ → set α) :
∃ t : ℕ → set α, antitone t ∧ (⨅ i, 𝓟 $ s i) = ⨅ i, 𝓟 (t i) :=
begin
use λ n, ⋂ m ≤ n, s m, split,
{ exact λ i j hij, bInter_mono (Iic_subset_Iic.2 hij) (λ n hn, subset.refl _) },
apply le_antisymm; rw le_infi_iff; intro i,
{ rw le_principal_iff, refine (bInter_mem (finite_le_nat _)).2 (λ j hji, _),
rw ← le_principal_iff, apply infi_le_of_le j _, exact le_rfl },
{ apply infi_le_of_le i _, rw principal_mono, intro a, simp, intro h, apply h, refl },
end
lemma countable_binfi_eq_infi_seq [complete_lattice α] {B : set ι} (Bcbl : B.countable)
(Bne : B.nonempty) (f : ι → α) :
∃ (x : ℕ → ι), (⨅ t ∈ B, f t) = ⨅ i, f (x i) :=
begin
rw countable_iff_exists_surjective_to_subtype Bne at Bcbl,
rcases Bcbl with ⟨g, gsurj⟩,
rw infi_subtype',
use (λ n, g n), apply le_antisymm; rw le_infi_iff,
{ intro i, apply infi_le_of_le (g i) _, apply le_rfl },
{ intros a, rcases gsurj a with ⟨i, rfl⟩, apply infi_le }
end
lemma countable_binfi_eq_infi_seq' [complete_lattice α] {B : set ι} (Bcbl : B.countable) (f : ι → α)
{i₀ : ι} (h : f i₀ = ⊤) :
∃ (x : ℕ → ι), (⨅ t ∈ B, f t) = ⨅ i, f (x i) :=
begin
cases B.eq_empty_or_nonempty with hB Bnonempty,
{ rw [hB, infi_emptyset],
use λ n, i₀,
simp [h] },
{ exact countable_binfi_eq_infi_seq Bcbl Bnonempty f }
end
lemma countable_binfi_principal_eq_seq_infi {B : set (set α)} (Bcbl : B.countable) :
∃ (x : ℕ → set α), (⨅ t ∈ B, 𝓟 t) = ⨅ i, 𝓟 (x i) :=
countable_binfi_eq_infi_seq' Bcbl 𝓟 principal_univ
section is_countably_generated
protected lemma has_antitone_basis.mem [preorder ι] {l : filter α} {s : ι → set α}
(hs : l.has_antitone_basis s) (i : ι) : s i ∈ l :=
hs.to_has_basis.mem_of_mem trivial
/-- If `f` is countably generated and `f.has_basis p s`, then `f` admits a decreasing basis
enumerated by natural numbers such that all sets have the form `s i`. More precisely, there is a
sequence `i n` such that `p (i n)` for all `n` and `s (i n)` is a decreasing sequence of sets which
forms a basis of `f`-/
lemma has_basis.exists_antitone_subbasis {f : filter α} [h : f.is_countably_generated]
{p : ι' → Prop} {s : ι' → set α} (hs : f.has_basis p s) :
∃ x : ℕ → ι', (∀ i, p (x i)) ∧ f.has_antitone_basis (λ i, s (x i)) :=
begin
obtain ⟨x', hx'⟩ : ∃ x : ℕ → set α, f = ⨅ i, 𝓟 (x i),
{ unfreezingI { rcases h with ⟨s, hsc, rfl⟩ },
rw generate_eq_binfi,
exact countable_binfi_principal_eq_seq_infi hsc },
have : ∀ i, x' i ∈ f := λ i, hx'.symm ▸ (infi_le (λ i, 𝓟 (x' i)) i) (mem_principal_self _),
let x : ℕ → {i : ι' // p i} := λ n, nat.rec_on n (hs.index _ $ this 0)
(λ n xn, (hs.index _ $ inter_mem (this $ n + 1) (hs.mem_of_mem xn.2))),
have x_mono : antitone (λ i, s (x i)),
{ refine antitone_nat_of_succ_le (λ i, _),
exact (hs.set_index_subset _).trans (inter_subset_right _ _) },
have x_subset : ∀ i, s (x i) ⊆ x' i,
{ rintro (_|i),
exacts [hs.set_index_subset _, subset.trans (hs.set_index_subset _) (inter_subset_left _ _)] },
refine ⟨λ i, x i, λ i, (x i).2, _⟩,
have : (⨅ i, 𝓟 (s (x i))).has_antitone_basis (λ i, s (x i)) :=
⟨has_basis_infi_principal (directed_of_sup x_mono), x_mono⟩,
convert this,
exact le_antisymm (le_infi $ λ i, le_principal_iff.2 $ by cases i; apply hs.set_index_mem)
(hx'.symm ▸ le_infi (λ i, le_principal_iff.2 $
this.to_has_basis.mem_iff.2 ⟨i, trivial, x_subset i⟩))
end
/-- A countably generated filter admits a basis formed by an antitone sequence of sets. -/
lemma exists_antitone_basis (f : filter α) [f.is_countably_generated] :
∃ x : ℕ → set α, f.has_antitone_basis x :=
let ⟨x, hxf, hx⟩ := f.basis_sets.exists_antitone_subbasis in ⟨x, hx⟩
lemma exists_antitone_seq (f : filter α) [f.is_countably_generated] :
∃ x : ℕ → set α, antitone x ∧ ∀ {s}, (s ∈ f ↔ ∃ i, x i ⊆ s) :=
let ⟨x, hx⟩ := f.exists_antitone_basis in
⟨x, hx.antitone, λ s, by simp [hx.to_has_basis.mem_iff]⟩
instance inf.is_countably_generated (f g : filter α) [is_countably_generated f]
[is_countably_generated g] :
is_countably_generated (f ⊓ g) :=
begin
rcases f.exists_antitone_basis with ⟨s, hs⟩,
rcases g.exists_antitone_basis with ⟨t, ht⟩,
exact has_countable_basis.is_countably_generated
⟨hs.to_has_basis.inf ht.to_has_basis, set.countable_encodable _⟩
end
instance comap.is_countably_generated (l : filter β) [l.is_countably_generated] (f : α → β) :
(comap f l).is_countably_generated :=
let ⟨x, hxl⟩ := l.exists_antitone_basis in
has_countable_basis.is_countably_generated ⟨hxl.to_has_basis.comap _, countable_encodable _⟩
instance sup.is_countably_generated (f g : filter α) [is_countably_generated f]
[is_countably_generated g] :
is_countably_generated (f ⊔ g) :=
begin
rcases f.exists_antitone_basis with ⟨s, hs⟩,
rcases g.exists_antitone_basis with ⟨t, ht⟩,
exact has_countable_basis.is_countably_generated
⟨hs.to_has_basis.sup ht.to_has_basis, set.countable_encodable _⟩
end
end is_countably_generated
@[instance] lemma is_countably_generated_seq [encodable β] (x : β → set α) :
is_countably_generated (⨅ i, 𝓟 $ x i) :=
begin
use [range x, countable_range x],
rw [generate_eq_binfi, infi_range]
end
lemma is_countably_generated_of_seq {f : filter α} (h : ∃ x : ℕ → set α, f = ⨅ i, 𝓟 $ x i) :
f.is_countably_generated :=
let ⟨x, h⟩ := h in by rw h ; apply is_countably_generated_seq
lemma is_countably_generated_binfi_principal {B : set $ set α} (h : B.countable) :
is_countably_generated (⨅ (s ∈ B), 𝓟 s) :=
is_countably_generated_of_seq (countable_binfi_principal_eq_seq_infi h)
lemma is_countably_generated_iff_exists_antitone_basis {f : filter α} :
is_countably_generated f ↔ ∃ x : ℕ → set α, f.has_antitone_basis x :=
begin
split,
{ introI h, exact f.exists_antitone_basis },
{ rintros ⟨x, h⟩,
rw h.to_has_basis.eq_infi,
exact is_countably_generated_seq x },
end
@[instance] lemma is_countably_generated_principal (s : set α) : is_countably_generated (𝓟 s) :=
is_countably_generated_of_seq ⟨λ _, s, infi_const.symm⟩
@[instance] lemma is_countably_generated_pure (a : α) : is_countably_generated (pure a) :=
by { rw ← principal_singleton, exact is_countably_generated_principal _, }
@[instance] lemma is_countably_generated_bot : is_countably_generated (⊥ : filter α) :=
@principal_empty α ▸ is_countably_generated_principal _
@[instance] lemma is_countably_generated_top : is_countably_generated (⊤ : filter α) :=
@principal_univ α ▸ is_countably_generated_principal _
instance is_countably_generated.prod {f : filter α} {g : filter β}
[hf : f.is_countably_generated] [hg : g.is_countably_generated] :
is_countably_generated (f ×ᶠ g) :=
begin
simp_rw is_countably_generated_iff_exists_antitone_basis at hf hg ⊢,
rcases hf with ⟨s, hs⟩,
rcases hg with ⟨t, ht⟩,
refine ⟨_, hs.prod ht⟩,
end
end filter
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ># IOOS System Test: [Baseline Assessment Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes) Water Temperature
# <markdowncell>
# ### Can we find high resolution water temperature data?
#
# #### Questions
# 1. Is it possible to discover and access water temperature information in sensors or satellite (obs)?
# 2. Is it possible to discover and access water temperature information from models?
# 3. Can obs and model data be compared?
# 4. Is data high enough resolution (spatial and temporal) to support recreational activities?
# 5. Can we see effects of upwelling/downwelling in the temperature data?
#
# #### Methodology
# * Define temporal and spatial bounds of interest, as well as parameters of interest
# * Search for available service endpoints in the NGDC CSW catalog meeting search criteria
# * Extract OPeNDAP data endpoints from model datasets and SOS endpoints from observational datasets
# * Obtain observation data sets from stations within the spatial boundaries
# * Plot observation stations on a map (red marker if not enough data)
# * Using DAP (model) endpoints find all available models data sets that fall in the area of interest, for the specified time range, and extract a model grid cell closest to all the given station locations
# * Plot modelled and observed time series temperature data on same axes for comparison
#
# <headingcell level=4>
# import required libraries
# <codecell>
import datetime as dt
import numpy as np
from warnings import warn
from io import BytesIO
import folium
import netCDF4
from IPython.display import HTML
import iris
iris.FUTURE.netcdf_promote = True
from iris.exceptions import CoordinateNotFoundError, ConstraintMismatchError
import matplotlib.pyplot as plt
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import pandas as pd
from pyoos.collectors.coops.coops_sos import CoopsSos
import requests
from operator import itemgetter
from utilities import (fes_date_filter, collector2df, find_timevar, find_ij, nearxy, service_urls, mod_df,
get_coordinates, get_station_longName, inline_map)
# <headingcell level=4>
# don't assume that the notebook server was started in pylab mode
# <codecell>
%matplotlib inline
# <headingcell level=4>
# Speficy Temporal and Spatial conditions
# <codecell>
bounding_box_type = "box"
# Bounding Box [lon_min, lat_min, lon_max, lat_max]
area = {'Hawaii': [-160.0, 18.0, -154., 23.0],
'Gulf of Maine': [-72.0, 41.5, -67.0, 46.0],
'New York harbor region': [-75., 39., -71., 41.5],
'Puerto Rico': [-75, 12, -55, 26],
'East Coast': [-77, 34, -70, 40],
'North West': [-130, 38, -121, 50],
'Gulf of Mexico': [-92, 28, -84, 31],
'Arctic': [-179, 63, -140, 80],
'North East': [-74, 40, -69, 42],
'Virginia Beach': [-78, 33, -74, 38]}
bounding_box = area['Gulf of Maine']
#temporal range - last 7 days and next 2 days (forecast data)
jd_now = dt.datetime.utcnow()
jd_start, jd_stop = jd_now - dt.timedelta(days=7), jd_now + dt.timedelta(days=2)
start_date = jd_start.strftime('%Y-%m-%d %H:00')
end_date = jd_stop.strftime('%Y-%m-%d %H:00')
print start_date,'to',end_date
# <headingcell level=4>
# Specify data names of interest
# <codecell>
#put the names in a dict for ease of access
# put the names in a dict for ease of access
data_dict = {}
sos_name = 'sea_water_temperature'
data_dict["temp"] = {"names": ['sea_water_temperature',
'water_temperature',
'sea_water_potential_temperature',
'water temperature',
'potential temperature',
'*sea_water_temperature',
'Sea-Surface Temperature',
'sea_surface_temperature',
'SST'],
"sos_name":["sea_water_temperature"]}
# <headingcell level=3>
# Search CSW for datasets of interest
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
csw = CatalogueServiceWeb(endpoint,timeout=60)
# <codecell>
# convert User Input into FES filters
start, stop = fes_date_filter(start_date,end_date)
bbox = fes.BBox(bounding_box)
#use the search name to create search filter
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',
literal='*%s*' % val,
escapeChar='\\',
wildCard='*',
singleChar='?') for val in data_dict["temp"]["names"]])
# try request using multiple filters "and" syntax: [[filter1,filter2]]
filter_list = [fes.And([ bbox, start, stop, or_filt]) ]
csw.getrecords2(constraints=filter_list,maxrecords=1000,esn='full')
print str(len(csw.records)) + " csw records found"
# <markdowncell>
# #### Dap URLs
# <codecell>
dap_urls = service_urls(csw.records)
#remove duplicates and organize
dap_urls = sorted(set(dap_urls))
print "Total DAP:",len(dap_urls)
print "\n".join(dap_urls[0:5])
# <markdowncell>
# #### SOS URLs
# <codecell>
sos_urls = service_urls(csw.records,service='sos:url')
#remove duplicates and organize
sos_urls = sorted(set(sos_urls))
print "Total SOS:",len(sos_urls)
print "\n".join(sos_urls)
# <markdowncell>
# ###Get most recent observations from NOAA-COOPS stations in bounding box
# <codecell>
start_time = dt.datetime.strptime(start_date,'%Y-%m-%d %H:%M')
end_time = dt.datetime.strptime(end_date,'%Y-%m-%d %H:%M')
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Define the Coops collector
collector = CoopsSos()
print collector.server.identification.title
collector.variables = data_dict["temp"]["sos_name"]
collector.server.identification.title
# Don't specify start and end date in the filter and the most recent observation will be returned
collector.filter(bbox=bounding_box,
variables=data_dict["temp"]["sos_name"])
response = collector.raw(responseFormat="text/csv")
obs_loc_df = pd.read_csv(BytesIO(response.encode('utf-8')),
parse_dates=True,
index_col='date_time')
# Now let's specify start and end times
collector.start_time = start_time
collector.end_time = end_time
ofrs = collector.server.offerings
# <codecell>
obs_loc_df.head()
# <codecell>
stations = [sta.split(':')[-1] for sta in obs_loc_df['station_id']]
obs_lon = [sta for sta in obs_loc_df['longitude (degree)']]
obs_lat = [sta for sta in obs_loc_df['latitude (degree)']]
# <headingcell level=3>
# Request CSV response from SOS and convert to Pandas DataFrames
# <codecell>
ts_rng = pd.date_range(start=start_date, end=end_date)
ts = pd.DataFrame(index=ts_rng)
# Save all of the observation data into a list of dataframes
obs_df = []
for sta in stations:
raw_df = collector2df(collector, sta, sos_name)
# col = 'Observed Data'
# obs_df.append(pd.DataFrame(pd.concat([raw_df, ts],axis=1)))[col]
# obs_df[-1].name = raw_df.name
col = 'Observed Data'
concatenated = pd.concat([raw_df, ts], axis=1)[col]
obs_df.append(pd.DataFrame(concatenated))
obs_df[-1].name = raw_df.name
# <markdowncell>
# ### Plot the Observation Stations on Map
# <codecell>
min_data_pts = 20
# Find center of bounding box
lat_center = abs(bounding_box[3]-bounding_box[1])/2 + bounding_box[1]
lon_center = abs(bounding_box[0]-bounding_box[2])/2 + bounding_box[0]
m = folium.Map(location=[lat_center, lon_center], zoom_start=6)
n = 0
for df in obs_df:
#get the station data from the sos end point
longname = df.name
lat = obs_loc_df['latitude (degree)'][n]
lon = obs_loc_df['longitude (degree)'][n]
popup_string = ('<b>Station:</b><br>'+ longname)
if len(df) > min_data_pts:
m.simple_marker([lat, lon], popup=popup_string)
else:
#popup_string += '<br>No Data Available'
popup_string += '<br>Not enough data available<br>requested pts: ' + str(min_data_pts ) + '<br>Available pts: ' + str(len(Hs_obs_df[n]))
m.circle_marker([lat, lon], popup=popup_string, fill_color='#ff0000', radius=10000, line_color='#ff0000')
n += 1
m.line(get_coordinates(bounding_box,bounding_box_type), line_color='#FF0000', line_weight=5)
inline_map(m)
# <markdowncell>
# ### Plot water temperature for each station
# <codecell>
for df in obs_df:
if len(df) > min_data_pts:
fig, axes = plt.subplots(figsize=(20,5))
df['Observed Data'].plot()
axes.set_title(df.name)
axes.set_ylabel('Temperature (C)')
# <markdowncell>
# ###Get model output from OPeNDAP URLS
# Try to open all the OPeNDAP URLS using Iris from the British Met Office. If we can open in Iris, we know it's a model result.
# <codecell>
name_in_list = lambda cube: cube.standard_name in data_dict['temp']['names']
constraint = iris.Constraint(cube_func=name_in_list)
# <codecell>
# Create list of model DataFrames for each station
model_df = []
for df in obs_df:
model_df.append(pd.DataFrame(index=ts.index))
model_df[-1].name = df.name
# Use only data within 0.10 degrees (about 10 km)
max_dist = 0.10
# Use only data where the standard deviation of the time series exceeds 0.01 m (1 cm).
# This eliminates flat line model time series that come from land points that should have had missing values.
min_var = 0.01
for url in dap_urls:
try:
print url
a = iris.load_cube(url, constraint)
# take first 30 chars for model name
mod_name = a.attributes['title'][0:30]
r = a.shape
timevar = find_timevar(a)
lat = a.coord(axis='Y').points
lon = a.coord(axis='X').points
jd = timevar.units.num2date(timevar.points)
start = timevar.units.date2num(jd_start)
istart = timevar.nearest_neighbour_index(start)
stop = timevar.units.date2num(jd_stop)
istop = timevar.nearest_neighbour_index(stop)
# Only proceed if we have data in the range requested.
if istart != istop:
nsta = len(stations)
if len(r) == 4:
print('[Structured grid model]:', url)
zc = a.coord(axis='Z').points
zlev = max(enumerate(zc),key=itemgetter(1))[0]
d = a[0, 0, :, :].data
# Find the closest non-land point from a structured grid model.
if len(lon.shape) == 1:
lon, lat = np.meshgrid(lon, lat)
j, i, dd = find_ij(lon, lat, d, obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.01 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, zlev, j[n], i[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = obs_df[n].name
model_df[n] = pd.concat([model_df[n], c], axis=1)
model_df[n].name = name
elif len(r) == 3:
zc = a.coord(axis='Z').points
zlev = max(enumerate(zc),key=itemgetter(1))[0]
print('[Unstructured grid model]:', url)
# Find the closest point from an unstructured grid model.
index, dd = nearxy(lon.flatten(), lat.flatten(),
obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.1 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, zlev, index[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = obs_df[n].name
model_df[n] = pd.concat([model_df[n], c], axis=1)
model_df[n].name = name
elif len(r) == 1:
print('[Data]:', url)
except (ValueError, RuntimeError, CoordinateNotFoundError,
ConstraintMismatchError) as e:
warn("\n%s\n" % e)
pass
# <markdowncell>
# ### Plot Modeled vs Obs Water Temperature
# <codecell>
%matplotlib inline
count = 0
for df in obs_df:
if not model_df[count].empty and not df.empty:
fig, ax = plt.subplots(figsize=(20,5))
# Plot the model data
model_df[count].plot(ax=ax, title=model_df[count].name)
# Overlay the obs data (resample to hourly instead of 6 mins!)
df['Observed Data'].resample('H', how='mean').plot(ax=ax, title=df.name, color='k', linewidth=2)
ax.set_ylabel('Sea Water Temperature (C)')
ax.legend(loc='right')
plt.show()
count += 1
# <markdowncell>
# ###Conclusions
#
# * Observed water temperature data can be obtained through CO-OPS stations
# * It is possible to obtain modeled forecast water temperature data, just not in all locations.
# * It was not possible to compare the obs and model data because the model data was only available as a forecast.
# * The observed data was available in high resolution (6 mins), making it useful to support recreational activities like surfing.
# * When combined with wind direction and speed, it may be possible to see the effects of upwelling/downwelling on water temperature.
# <codecell>
|
Formal statement is: lemma continuous_complex_iff: "continuous F f \<longleftrightarrow> continuous F (\<lambda>x. Re (f x)) \<and> continuous F (\<lambda>x. Im (f x))" Informal statement is: A complex-valued function is continuous if and only if its real and imaginary parts are continuous. |
Third Division 1 : 1971 – 72
|
program plothalo
external halodens,diskdensf,bulgedens
character*60 filename
character ans
integer*4 ibuf1(15)
filename='dbh.dat'
c call readdiskdf(filename,ibuf1)
call readharmfile(filename,ibuf1)
open(20,file='scales',status='old',err=5)
read(20,*) rscale
close(20)
goto 6
5 write(*,*) 'No files "scales" found.'
write(*,*) 'Will use default length and speed scales instead.'
rscale=1.
6 continue
write(*,*) 'Scale for distances: ',rscale,' kpc.'
write(*,*) 'Will make panels for inner and outer halo density.'
write(*,*) 'Inner panel: bin size [kpc], no. of bins (max 100)?'
read(*,*) din,nin
write(*,*) 'Outer panel: bin size [kpc], no. of bins (max 100)?'
read(*,*) dout,nout
write(*,*) 'Will also make a panel for bulge density.'
write(*,*) 'Bulge panel: bin size [kpc], no. of bins (max 100)?'
read(*,*) dbu,nbu
1 call pgbeg(0,'?',1,1)
write(*,*)
call pgvport(0.05,0.32,0.6,0.93)
call contourden(nin,din,halodens,1.,rscale)
call pglabel('R [kpc]','z [kpc]','Inner Halo Density')
call pgvport(0.37,0.64,0.6,0.93)
call contourden(nbu,dbu,bulgedens,1.,rscale)
call pglabel('R [kpc]','z [kpc]','Bulge Density')
call pgvport(0.69,0.96,0.6,0.93)
call contourden(nout,dout,halodens,1.,rscale)
call pglabel('R [kpc]','z [kpc]','Outer Halo Density')
call pgvport(0.05,0.96,0.1,0.42)
call contourden(nin,din,diskdensf,0.3,rscale)
call pglabel('R [kpc]','z [kpc]','Disk Density')
call modstamp
call pgend
write(*,*) 'Density contour levels jump by factor 2.'
write(*,*) 'Same plot, different device?'
read(*,'(a)') ans
if (ans.ne.'n' .and. ans.ne.'N') goto 1
end
|
<a href="https://colab.research.google.com/github/johnlex07/Datascience300/blob/main/ejercicio1(24_11_2021).ipynb" target="_parent"></a>
Ejericicio 1.
Creen un vector a entre 5 y 6 con intervalo de 0.2 y guardenlo en formato datos.npy y luego carguenlo con nombre b y muestrelo
```python
from google.colab import drive
import os
drive.mount('/content/gdrive')
```
Mounted at /content/gdrive
```python
%cd '/content/gdrive/MyDrive/bootcamp/Semana 1-20211126T000634Z-001'
```
/content/gdrive/MyDrive/bootcamp/Semana 1-20211126T000634Z-001
```python
import numpy as np
a=np.arange(5,6,0.2)
print(a.tolist())
a.tofile('datos.npy')
b=np.fromfile('datos.npy')
print(b)
```
[5.0, 5.2, 5.4, 5.6000000000000005, 5.800000000000001]
[5. 5.2 5.4 5.6 5.8]
Ejercicio 2
Cree un una matriz A con np.array
\begin{equation}
\begin{bmatrix}
1 & 3 & 5\\
11 & 9 & 7\\
13 & 15 & 17
\end{bmatrix}
\end{equation}
Luego de eso utilize el metodo np.sort con el argumento axis =1 or axis=0
A que conclusiones llegan?
```python
A=np.array([[1,3,5],[11,9,7],[13,15,17]])
print(A)
B=np.sort(A,axis=0)
print(B)
C=np.sort(A,axis=1)
print(C)
```
[[ 1 3 5]
[11 9 7]
[13 15 17]]
[[ 1 3 5]
[11 9 7]
[13 15 17]]
[[ 1 3 5]
[ 7 9 11]
[13 15 17]]
Ejercicio 3
Cree una matriz de 3x3 B, como la siguiente:
\begin{equation}
\begin{bmatrix}
9 & 1 & 2\\
1 & 9 & 5\\
2 & 5 & 9
\end{bmatrix}
\end{equation}
1. Convierta la matriz a un vector de 1x9
2. Ordene de de menor a mayor el vector
3. Aplique un filtro para extraer solo los numeros menores que 9
```python
A=np.array([[9,1,2],[1,9,5],[2,5,9]])
print(A)
print('parte 1')
print(A.flatten())
print('parte 2')
b=np.sort(A.flatten())
print(b)
print('parte 3')
c=numpy.where(b<9)
print(c)
```
[[9 1 2]
[1 9 5]
[2 5 9]]
parte 1
[9 1 2 1 9 5 2 5 9]
parte 2
[1 1 2 2 5 5 9 9 9]
parte 3
(array([0, 1, 2, 3, 4, 5]),)
Ejercicio 4
Cree una matriz de 3x3 C, como la siguiente:
\begin{equation}
\begin{bmatrix}
1 & 4 & 7\\
2 & 5 & 8\\
3 & 6 & 9
\end{bmatrix}
\end{equation}
1. Calcule la media por cada columna
2. Calcule la media por cada fila
3. Obtenga la media por columnas dandole el doble de peso a la columna con mayores valores de la variable
5. Calcule el percentil 25 por cada columna
```python
C=np.array([[1,4,7],[2,5,8],[3,6,9]])
print(C)
print('parte 1')
print(np.mean(C,axis=1))
print('parte 2')
print(np.mean(C,axis=0))
print('parte 3')
print(np.average(C,axis=1, weights=(1,1,2)))
print('parte 4')
print(np.percentile(C,25,axis=1))
```
[[1 4 7]
[2 5 8]
[3 6 9]]
parte 1
[4. 5. 6.]
parte 2
[2. 5. 8.]
parte 3
[4.75 5.75 6.75]
parte 4
[2.5 3.5 4.5]
|
module Mimi
using Classes
using DataFrames
using DataStructures
using Distributions
using Electron
using JSON
using NamedArrays
using StringBuilders
export
@defcomp,
@defsim,
@defcomposite,
MarginalModel,
Model,
add_comp!,
# components,
connect_param!,
create_marginal_model,
dim_count,
dim_keys,
dim_key_dict,
disconnect_param!,
explore,
getdataframe,
gettime,
get_param_value,
get_var_value,
hasvalue,
is_first,
is_last,
is_time,
is_timestep,
modeldef,
name,
# parameters,
parameter_dimensions,
parameter_names,
plot_comp_graph,
replace_comp!,
set_dimension!,
set_leftover_params!,
set_param!,
TimestepIndex,
TimestepValue,
update_param!,
update_params!,
# variables,
variable_dimensions,
variable_names
include("core/delegate.jl")
include("core/types/includes.jl")
#
# After loading types and delegation macro, the rest can be loaded in any order.
#
include("core/build.jl")
include("core/connections.jl")
include("core/defs.jl")
include("core/defcomp.jl")
include("core/defmodel.jl")
include("core/defcomposite.jl")
include("core/dimensions.jl")
include("core/instances.jl")
include("core/references.jl")
include("core/time.jl")
include("core/time_arrays.jl")
include("core/model.jl")
include("core/order.jl")
include("core/paths.jl")
include("core/show.jl")
include("mcs/mcs.jl") # need mcs types for explorer and utils
include("explorer/explore.jl")
include("utils/getdataframe.jl")
include("utils/graph.jl")
include("utils/lint_helper.jl")
include("utils/misc.jl")
include("utils/plotting.jl")
# Load built-in components
include("components/adder.jl")
include("components/connector.jl")
end # module
|
[STATEMENT]
lemma fac_ext_hd:"u \<le>f w \<Longrightarrow> u \<le>f a#w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<le>f w \<Longrightarrow> u \<le>f a # w
[PROOF STEP]
by (metis sublist_Cons_right) |
% Author: Hans Skaug
% Copyright (c) 2008-2017
% ADMB Foundation and Regents of the University of California
\documentclass{admbmanual}
\makeatletter\@twosidefalse\makeatother
\hypersetup{urlcolor=black}
\newcommand{\citeasnoun}{\cite}
\newcommand{\scREML}{\textsc{reml}}
\newcommand{\scMCMC}{\textsc{mcmc}}
\newcommand{\scNLME}{\textsc{nlme}}
\newcommand{\scBUGS}{\textsc{bugs}}
\newcommand{\scWinBUGS}{Win\textsc{bugs}}
\newcommand{\scGAM}{\textsc{gam}}
\newcommand{\scGLM}{\textsc{glm}}
\newcommand{\scGLMM}{\textsc{glmm}}
\newcommand{\scLIDAR}{\textsc{lidar}}
\newcommand\admbversion{12.0}
\newcommand\admbyear{2017}
\newcommand\admbdate{2017-12-20}
\makeindex
\begin{document}
\title{%
\largetitlepart{Random Effects in\\\ADM}
\smalltitlepart{ADMB-RE User Guide}
\vspace{4.5ex}\textsf{\textit{Version \admbversion~~(\admbdate)\\[3pt]
~% Revised manual~~(yyyy-mm-dd)
}}\vspace{3ex}
}
\author{\textsf{\textit{Hans Skaug \& David Fournier}}}
\manualname{Admb-Re}
\maketitle
~\vfill
\noindent ADMB Foundation, Honolulu.\\\\
\noindent This is the manual for AD Model Builder with Random Effects (ADMB-RE)
version \admbversion.\\\\
\noindent Copyright \copyright\ 2004--\admbyear\ Hans Skaug \& David~Fournier\\\\
\noindent The latest edition of the manual is available at:\\
\url{http://www.admb-project.org/docs/manuals/}
\tableofcontents
\chapter*{Preface}
A comment about notation:
Important points are emphasized with a star
\begin{itemize}
\item[$\bigstar$] like this.
\end{itemize}
Please submit all comments and complaints by email to
\href{mailto:[email protected]}{[email protected]}.
\chapter{Introduction}
This document is a user's guide to random-effects modelling in \ADM\ (\scAB).
Random effects are a feature of \scAB, in the same way as profile likelihoods
are, but are sufficiently complex to merit a separate user manual. The work on
the random-effects ``module'' (\scAR) started around 2003. The pre-existing part
of \scAB\ (and its evolution) is referred to as ``ordinary'' \scAB\ in the
following.
Before you start with random effects, it is recommended that you have some
experience with ordinary \scAB. This manual tries to be self-contained, but it
is clearly an advantage if you have written (and successfully run) a few
\textsc{tpl} files. Ordinary \scAB\ is described in the \scAB\
manual~\cite{admb_manual}, which is available from
\href{mailto:admb-project.org}{admb-project.org}. If you are new to \scAB, but
have experience with \cplus\ (or a similar programming language), you may
benefit from taking a look at the quick references in Appendix~\ref{sec:quick}.
\scAR\ is very flexible. The term ``random effect'' seems to indicate that it
only can handle mixed-effect regression type models, but this is very
misleading. ``Latent variable'' would have been a more precise term. It can be
argued that \scAR\ is the most flexible latent variable framework around. All
the mixed-model stuff in software packages, such as allowed by R, Stata,
\textsc{spss}, etc., allow only very specific models to be fit. Also, it is
impossible to change the distribution of the random effects if, say, you wanted
to do that. The \textsc{nlmixed} macro in \textsc{sas} is more flexible, but
cannot handle state-space models or models with crossed random effects.
\scWinBUGS\ is the only exception, and its ability to handle discrete latent
variables is a bit more flexible than is \scAR's. However, \scWinBUGS\ does all
its computations using \scMCMC\ exclusively, while \scAR\ lets the user choose
between maximum likelihood estimation (which, in general, is much faster) and
\scMCMC.
An important part of the \scAR\ documentation is the example collection, which
is described in Appendix~\ref{sec:example_collection}. As with the example
collections for \scAB\ and \scAD, you will find fully worked examples, including
data and code for the model. The examples have been selected to illustrate the
various aspects of \scAR, and are frequently referred to throughout this manual.
\section{Summary of features}
Why use \ADM\ for creating nonlinear random-effects models? The answer consists
of three words: ``flexibility,'' ``speed,'' and ``accuracy.'' To illustrate
these points. a number of examples comparing \scAR\ with two existing packages:
\textsc{nlme}, which runs on R and S-Plus, and \scWinBUGS. In general, \scNLME\
is rather fast and it is good for the problems for which it was designed, but it
is quite inflexible. What is needed is a tool with at least the computational
power of \textsc{nlme} yet the flexibility to deal with arbitrary nonlinear
random-effects models. In Section~\ref{lognormal}, we consider a thread from the
R user list, where a discussion took place about extending a model to use random
effects with a log-normal, rather than normal, distribution. This appeared to be
quite difficult. With \scAR, this change takes one line of code. \scWinBUGS, on
the other hand, is very flexible, and many random-effects models can be easily
formulated in it. However, it can be very slow. Furthermore, it is necessary to
adopt a Bayesian perspective, which may be a problem for some applications. A
model which runs 25~times faster under \scAB\ than under \scWinBUGS\ may be
found in Section~\ref{sec:logistic_example}.
\subsection{Model formulation}
With \scAB, you can formulate and fit a large class of nonlinear statistical
models. With \scAR, you can include random effects in your model. Examples of
such models include:
\begin{itemize}
\item Generalized linear mixed models (logistic and Poisson regression).
\item Nonlinear mixed models (growth curve models, pharmacokinetics).
\item State space models (nonlinear Kalman filters).
\item Frailty models in survival analysis.
\item Nonparametric smoothing.
\item Semiparametric modelling.
\item Frailty models in survival analysis.
\item Bayesian hierarchical models.
\item General nonlinear random-effects models (fisheries catch-at-age models).
\end{itemize}
You formulate the likelihood function in a template file, using a language that
resembles \cplus. The file is compiled into an executable program (on Linux or
Windows). The whole \cplus\ language is to your disposal, giving you great
flexibility with respect to model formulation.
\subsection{Computational basis of \scAR}
\begin{itemize}
\item Hyper-parameters (variance components, etc.) estimated by maximum
likelihood.
\item Marginal likelihood evaluated by the Laplace approximation, (adaptive)
importance sampling or Gauss-Hermite integration.
\item Exact derivatives calculated using Automatic Differentiation.
\item Sampling from the Bayesian posterior using \scMCMC\ (Metropolis-Hastings
algorithm).
\item Most of the features of ordinary \scAB\ (matrix arithmetic and standard
errors, etc.) are available.
\item Sparse matrix libraries, useful for Markov random fields and crossed
random effects, are available.
\end{itemize}
\subsection{The strengths of \scAR}
\begin{itemize}
\item \textit{Flexibility:} You can fit a large variety of models within a
single framework.
\item \textit{Convenience:} Computational details are transparent. Your only
responsibility is to formulate the log-likelihood.
\item \textit{Computational efficiency:} \scAR\ is up to 50 times faster than
\scWinBUGS.
\item \textit{Robustness:} With exact derivatives, you can fit highly
nonlinear models.
\item \textit{Convergence diagnostic:} The gradient of the likelihood function
provides a clear convergence diagnostic.
\end{itemize}
\subsection{Program interface}
\begin{itemize}
\item\textit{Model formulation}: You fill in a \cplus-based template using
your favorite text editor.
\item \textit{Compilation}: You turn your model into an executable program
using a \cplus\ compiler (which you need to install separately).
\item\textit{Platforms}: Windows, Linux and Mac.
\end{itemize}
\subsection{How to obtain \scAR}
\scAR\ is a module for \scAB. Both can be obtained from
\href{admb-project.org}{admb-project.org}.
\chapter{The Language and the Program}
\section{What is ordinary \scAB?}
\scAB\ is a software package for doing parameter estimation in nonlinear models.
It combines a flexible mathematical modelling language (built on \cplus) with a
powerful function minimizer (based on Automatic Differentiation). The following
features of \scAB\ make it very useful for building and fitting nonlinear models
to data:
\begin{itemize}
\item Vector-matrix arithmetic and vectorized operations for common
mathematical functions.
\item Reading and writing vector and matrix objects to a file.
\item Fitting the model is a stepwise manner (with ``phases''), where more and
more parameters become active in the minimization.
\item Calculating standard deviations of arbitrary functions of the model
parameters by the ``delta method.''
\item \scMCMC\ sampling around the posterior mode.
\end{itemize}
To use random effects in \scAB, it is recommended that you have some experience
in writing ordinary \scAB\ programs. In this section, we review, for the benefit
of the reader without this experience, the basic constructs of \scAB.
Model fitting with \scAB\ has three stages: 1) model formulation, 2) compilation
and 3) program execution. The model fitting process is typically iterative:
after having looked at the output from stage~3, one goes back to stage~1 and
modifies some aspect of the program.
\subsection{Writing an \scAB\ program}
% \XX{\fontindexentry{sc}{tpl} file}{writing}
\index{TPL@\textsc{tpl} file!writing}
To fit a statistical model to data, we must carry out certain fundamental tasks,
such as reading data from file, declaring the set of parameters that should be
estimated, and finally giving a mathematical description of the model. In \scAB,
you do all of this by filling in a template, which is an ordinary text file with
the file-name extension \texttt{.tpl} (and hence the template file is known as
the \textsc{tpl}~file). You therefore need a text editor---such as \textit{vi}
under Linux, or Notepad under Windows---to write the \textsc{tpl}~file. The
first \textsc{tpl}~file to which the reader of the ordinary \scAB\ manual is
exposed is \texttt{simple.tpl} (listed in Section~\ref{sec:code example} below).
We shall use \texttt{simple.tpl} as our generic \textsc{tpl}~file, and we shall
see that introduction of random effects only requires small changes to the
program.
A \textsc{tpl}~file is divided into a number of ``sections,'' each representing
one of the fundamental tasks mentioned above. See
Table~\ref{tab:required-sections} for the required sections.
\begin{table}[htbp]
\begin{center}
\begin{tabular}%
{@{\vrule height 12pt depth 6pt width0pt}@{\extracolsep{1em}} ll}
\hline
\textbf{Name}
& \textbf{Purpose} \\
\hline\\[-16pt]
\texttt{DATA\_SECTION}
& Declare ``global'' data objects, initialization from file.\\
\texttt{PARAMETER\_SECTION}
& Declare independent parameters. \\
\texttt{PROCEDURE\_SECTION}
& Specify model and objective function in \cplus. \\[3pt]
\hline
\end{tabular}
\end{center}
\caption{Required sections.}
\label{tab:required-sections}
\end{table}
More details are given when we later look at \texttt{simple.tpl}, and a quick
reference card is available in Appendix~\ref{sec:quick}.
\subsection{Compiling an \scAB\ program}
% \XX{\fontindexentry{sc}{tpl} file}{compiling}
\index{TPL@\textsc{tpl} file!compiling}
After having finished writing \texttt{simple.tpl}, we want to convert it into an
executable program. This is done in a \textsc{DOS}-window under Windows, and in
an ordinary terminal window under Linux. To compile \texttt{simple.tpl}, we
would, under both platforms, give the command:
\begin{lstlisting}
$ admb -r simple
\end{lstlisting}
Here, \texttt{\$} is the command line prompt (which may be a different symbol,
or symbols, on your computer), and \texttt{-r} is an option telling the program
\texttt{admb} that your model contains random effects. The program \texttt{admb}
accepts another option, \texttt{-s}, which produces the ``safe'' (but slower)
version of the executable program. The \texttt{-s} option should be used in a
debugging phase, but it should be skipped when the final production version of
the program is generated.
The compilation process really consists of two steps.
In the first step, \texttt{simple.tpl} is converted to a \cplus\ program by a
translator called \texttt{tpl2rem} in the case of \scAR, and \texttt{tpl2cpp}
in the case of ordinary \scAB\ (see Appendix~\ref{sec:quick}). An error message
from \texttt{tpl2rem} consists of a single line of text, with a reference to the
line in the \textsc{tpl}~file where the error occurs. If successful, the first
compilation step results in the \cplus\ file \texttt{simple.cpp}.
In the second step, \texttt{simple.cpp} is compiled and linked using an ordinary
\cplus\ compiler (which is not part of \scAB). Error messages during this phase
typically consist of long printouts, with references to line numbers in
\texttt{simple.cpp}. To track down syntax errors, it may occasionally be useful
to look at the content of \texttt{simple.cpp}. When you understand what is wrong
in \texttt{simple.cpp}, you should go back and correct \texttt{simple.tpl} and
re-enter the command \texttt{admb -r simple}. When all errors have been removed,
the result will be an executable file, which is called either
\texttt{simple.exe} under Windows or \texttt{simple} under Linux. The
compilation process is illustrated in Section~\ref{sec:compiling}.
\subsection{Running an \scAB-program}
\index{TPL@\textsc{tpl} file!compiling}
The executable program is run in the same window as it was compiled. Note that
data are not usually part of the \scAB\ program (e.g., \texttt{simple.tpl}).
Instead, data are being read from a file with the file name
extension \texttt{.dat} (e.g., \texttt{simple.dat}). This brings us to the
naming convention used by \scAB\ programs for input and output files: the
executable automatically infers file names by adding an extension to its own
name. The most important files are listed in Table~\ref{tab:important-files}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lll}
\hline
~ & \textbf{File name} & \textbf{Contents} \\
\hline\\[-17pt]
Input & \texttt{simple.dat} & Data for the analysis \\
~ & \texttt{simple.pin} & Initial parameter values\\
\hline
Output & \texttt{simple.par} & Parameter estimates \\
~ & \texttt{simple.std} & Standard deviations \\
~ & \texttt{simple.cor} & Parameter correlations \\
\hline
\end{tabular}
\end{center}
\caption{File naming convention.}
\label{tab:important-files}
\end{table}
You can use command line options to modify the behavior of the program at
runtime. The available command line options can be listed by typing:
\begin{lstlisting}
$ simple -?
\end{lstlisting}
(or whatever your executable is called in place of \texttt{simple}). The command
line options that are specific to \scAR\ are listed in
Appendix~\ref{sec:command_line_options}, and are discussed in detail under the
various sections. An option you probably will like to use during an
experimentation phase is \texttt{-est}, which turns off calculation of standard
deviations, and hence reduces the running time of the program.
\subsection{\scAB-\textsc{ide}: easy and efficient user interface}
The graphical user interface to \scAB\ by Arni Magnusson simplifies the process
of building and running the model, especially for the
beginner~\cite{admb_news_july09}. Among other things, it provides syntax
highlighting and links error messages from the \cplus\ compiler to the
\texttt{.cpp}~ file.
\subsection{Initial values}
The initial values can be provided in different ways (see the ordinary \scAB\
manual). Here, we only describe the \texttt{.pin}~file approach. The
\texttt{.pin}~file should contain legal values (within the bounds) for all the
parameters, including the random effects. The values must be given in the same
order as the parameters are defined in the \texttt{.tpl} file. The easiest way
of generating a \texttt{.pin}~file with the right structure is to first run the
program with a \texttt{-maxfn 0} option (for this, you do not need a
\texttt{.pin} file), then copy the resulting \texttt{.p01} file into
\texttt{.pin} file, and then edit it to provide the correct numeric values. More
information about what initial values for random effects really mean is given in
Section~\ref{sec:hood}.
\section{Why random effects?}
Many people are familiar with the method of least squares for parameter
estimation. Far fewer know about random effects modeling. The use of random
effects requires that we adopt a statistical point of view, where the sum of
squares is interpreted as being part of a likelihood function. When data are
correlated, the method of least squares is sub-optimal, or even biased. But
relax---random effects come to rescue! \index{random effects}
The classical motivation of random effects is:
\begin{itemize}
\item To create parsimonious and interpretable correlation structures.
\item To account for additional variation or overdispersion.
\end{itemize}
We shall see, however, that random effects are useful in a much wider context,
for instance, in non-parametric smoothing. % (\ref{}).
\subsection{Statistical prerequisites}
To use random effects in \scAB, you must be familiar with the notion of a random
variable and, in particular, with the normal distribution. In case you are not,
please consult a standard textbook in statistics. The notation $u\sim
N(\mu,\sigma^2)$ is used throughout this manual, and means that $u$ has a normal
(Gaussian) distribution with expectation $\mu$ and variance $\sigma^2$. The
distribution placed on the random effects is called the ``prior,'' which is a
term borrowed from Bayesian statistics.
A central concept that originates from generalized linear models is that of a
``linear predictor.'' Let $x_1,\ldots,x_p$ denote observed covariates
(explanatory variables), and let $\beta_1,\ldots,\beta_p$ be the corresponding
regression parameters to be estimated. Many of the examples in this manual
involve a linear predictor $\eta_i=\beta_1x_{1,i}+\cdots+\beta_px_{p,i}$, which
we also will write in vector form as $\mathbf{\eta}=\mathbf{X\beta}$.
\index{linear predictor}
\subsection{Frequentist or Bayesian statistics?}
A pragmatic definition of a ``frequentist'' is a person who prefers to estimate
parameters by the method of maximum likelihood. Similarly, a ``Bayesian'' is a
person who uses \scMCMC\ techniques to generate samples from the posterior
distribution (typically with noninformative priors on hyper-parameters), and
from these samples generates some summary statistic, such as the posterior mean.
With its \texttt{-mcmc} runtime option, \scAB\ lets you switch freely between
the two worlds. The approaches complement each other rather than being
competitors. A maximum likelihood fit ($\textrm{point estimate} +
\textrm{covariance matrix}$) is a step-1 analysis. For some purposes, step-1
analysis is sufficient. In other situations, one may want to see posterior
distributions for the parameters. In such situations, the established covariance
matrix (inverse Hessian of the log-likelihood) is used by \scAB\ to implement an
efficient Metropolis-Hastings algorithm (which you invoke with \texttt{-mcmc}).
\subsection{A simple example}
We use the \texttt{simple.tpl} example from the ordinary \scAB\ manual to
exemplify the use of random effects. The statistical model underlying this
example is the simple linear regression
\[
Y_i=ax_i+b+\varepsilon_i,\qquad i=1,\ldots,n,
\]
where $Y_i$ and $x_i$ are the data, $a$ and $b$ are the unknown parameters to be
estimated, and $\varepsilon_i\sim N(0,\sigma^2)$ is an error term.
Consider now the situation where we do not observe $x_i$ directly, but rather
observe
\[
X_i=x_i+e_i,
\]
where $e_i$ is a measurement error term. This situation frequently occurs in
observational studies, and is known as the ``error-in-variables'' problem.
Assume further that $e_i\sim N(0,\sigma_e^2)$, where $\sigma_e^2$ is the
measurement error variance. For reasons discussed below, we shall assume that we
know the value of $\sigma_e$, so we shall pretend that $\sigma_e=0.5$.
Because $x_i$ is not observed, we model it as a random effect with $x_i\sim
N(\mu,\sigma_x^2)$. In \scAR, you are allowed to make such definitions
through the new parameter type called \texttt{random\_effects\_vector}.
\index{random effects!random effects vector} (There is also a
\texttt{random\_effects\_matrix}, which allows you to define a matrix of random
effects.) \index{random effects!random effects matrix}
\begin{enumerate}
\item Why do we call $x_i$ a ``random effect,'' while we do not use this term
for $X_i$ and $Y_i$ (though they clearly are ``random'')? The point is that
$X_i$ and $Y_i$ are observed directly, while $x_i$ is not. The term ``random
effect'' comes from regression analysis, where it means a random regression
coefficient. In a more general context. ``latent random variable'' is probably
a better term.
\item The unknown parameters in our model are: $a$, $b$, $\mu$, $\sigma$,
$\sigma_x$, and $x_1,\ldots,x_n$. We have agreed to call
$x_1,\ldots,x_n$ ``random effects.'' The rest of the parameters are called
``hyper-parameters.'' Note that we place no prior distribution on the
hyper-parameters.
\item Random effects are integrated out of the likelihood, while
hyper-parameters are estimated by maximum likelihood. \index{hyper-parameter}
This approach is often called ``empirical Bayes,'' and will be considered a
frequentist method by most people. There is however nothing preventing you
from making it ``more Bayesian'' by putting priors (penalties) on the
hyper-parameters.
\item A statistician will say, ``This model is nothing but a bivariate
Gaussian distribution for $(X,Y)$, and we don't need any random effects in
this situation.'' This is formally true, because we could work out the
covariance matrix of $(X,Y)$ by hand and fit the model using ordinary \scAB.
This program would probably run much faster, but it would have taken us longer
to write the code without declaring $x_i$ to be of type
\texttt{random\_effects\_vector}. However, more important is that random
effects can be used also in non-Gaussian (nonlinear) models where we are
unable to derive an analytical expression for the distribution of $(X,Y)$.
\item Why didn't we try to estimate $\sigma_e$? Well, let us count the
parameters in the model: $a$, $b$, $\mu$, $\sigma$, $\sigma_x$, and
$\sigma_e$. There are six parameters total. We know that the bivariate
Gaussian distribution has only five parameters (the means of $X$ and $Y$ and
three free parameters in the covariate matrix). Thus, our model is not
identifiable if we also try to estimate $\sigma_e$. Instead, we pretend that
we have estimated $\sigma_e$ from some external data source. This example
illustrates a general point in random effects modelling: you must be careful
to make sure that the model is identifiable!
\end{enumerate}
\section{A code example\label{sec:code example}}
Here is the random effects version of \texttt{simple.tpl}:
\begin{lstlisting}
DATA_SECTION
init_int nobs
init_vector Y(1,nobs)
init_vector X(1,nobs)
PARAMETER_SECTION
init_number a
init_number b
init_number mu
vector pred_Y(1,nobs)
init_bounded_number sigma_Y(0.000001,10)
init_bounded_number sigma_x(0.000001,10)
random_effects_vector x(1,nobs)
objective_function_value f
PROCEDURE_SECTION // This section is pure C++
f = 0;
pred_Y=a*x+b; // Vectorized operations
// Prior part for random effects x
f += -nobs*log(sigma_x) - 0.5*norm2((x-mu)/sigma_x);
// Likelihood part
f += -nobs*log(sigma_Y) - 0.5*norm2((pred_Y-Y)/sigma_Y);
f += -0.5*norm2((X-x)/0.5);
f *= -1; // ADMB does minimization!
\end{lstlisting}
\paragraph{Comments}
\begin{enumerate}
\item Everything following \texttt{//} is a comment.
\item In the \texttt{DATA\_SECTION}, variables with a \texttt{init\_} in front
of the data type are read from file.
\item In the \texttt{PARAMETER\_SECTION}
\begin{itemize}
\item Variables with a \texttt{init\_} in front of the data type are the
hyper-parameters, i.e., the parameters to be estimated by maximum
likelihood.
\item \texttt{random\_effects\_vector} defines the random effect vector.
(There is also a type called \texttt{random\_effects\_matrix}.) There can be
more than one such object, but they must all be defined after the
hyper-parameters are---otherwise, you will get an error message from the
translator \texttt{tpl2rem}.
\item Objects that are neither hyper-parameters nor random effects are
ordinary programming variables that can be used in the
\texttt{PROCEDURE\_SECTION}. For instance, we can assign a value to the
vector \texttt{pred\_Y}.
\item The objective function should be defined as the last variable.
\end{itemize}
\item The \texttt{PROCEDURE\_SECTION} basically consists of standard \cplus\
code, the primary purpose of which is to calculate the value of the objective
function.
\begin{itemize}
\item Variables defined in \texttt{DATA\_SECTION} and
\texttt{PARAMETER\_SECTION} may be used.
\item Standard \cplus\ functions, as well as special \scAB\ functions, such
as \texttt{norm2(x)} (which calculates $\sum x_i^2$), may be used.
\item Often the operations are vectorized, as in the case of
\texttt{simple.tpl}
\item The objective function should be defined as the last variable.
\item \scAB\ does minimization, rather than optimization. Thus, the sign of
the log-likelihood function \texttt{f} is changed in the last line of the
code.
\end{itemize}
\end{enumerate}
\subsection{Parameter estimation}
We learned above that hyper-parameters are estimated by maximum likelihood, but
what if we also are interested in the value of the random effects? For this
purpose, \scAR\ offers an ``empirical Bayes'' approach, which involves fixing
the hyper-parameters at their maximum likelihood estimates, and treating the
random effects as the parameters of the model. \scAR\ automatically calculates
``maximum posterior'' estimates of the random effects for you. Estimates of both
hyper-parameters and random effects are written to \texttt{simple.par}.
\section{The flexibility of \scAR\label{lognormal}}
Say that you doubt the distributional assumption $x_i\sim N(\mu,\sigma_x^2)$
made in \texttt{simple.tpl}, and that you want to check if a skewed
distribution gives a better fit. You could, for instance, take
\[
x_i=\mu +\sigma_x\exp (z_i),\qquad z_i\sim N(0,1).
\]
Under this model, the standard deviation of $x_i$ is proportional to, but not
directly equal to, $\sigma_x$. It is easy to make this modification in
\texttt{simple.tpl}. In the \texttt{PARAMETER\_SECTION}, we replace the
declaration of \texttt{x} by
\begin{lstlisting}
vector x(1,nobs)
random_effects_vector z(1,nobs)
\end{lstlisting}
and in the \texttt{PROCEDURE\_SECTION} we replace the prior on \texttt{x} by
\begin{lstlisting}
f = - 0.5*norm2(z);
x = mu + sigma_x*exp(z);
\end{lstlisting}
This example shows one of the strengths of \scAR: it is very easy to modify
models. In principle, you can implement any random effects model you can think
of, but as we shall discuss later, there are limits to the number of random
effects you can declare.
\chapter{Random Effects Modeling}
\label{ch:random-effects-modeling}
This chapter describes all \scAR\ features except those related to
``separability,'' which are dealt with in Chapter~\ref{separability}.
Separability, or the Markov property, as it is called in statistics, is a
property possessed by many model classes. It allows \scAR\ to generate more
efficient executable programs. However, most \scAR\ concepts and techniques are
better learned and understood without introducing separability. Throughout much
of this chapter, we will refer to the program \texttt{simple.tpl} from
Section~\ref{sec:code example}.
\section{The objective function}
As with ordinary \scAB, the user specifies an objective function in terms of
data and parameters. However, in \scAR, the objective function must have the
interpretation of being a (negative) log-likelihood. One typically has a
hierarchical specification of the model, where at the top layer, data are
assumed to have a certain probability distribution conditionally on the random
effects (and the hyper-parameters), and at the next level, the random effects
are assigned a prior distribution (typically, normal). Because conditional
probabilities are multiplied to yield the joint distribution of data and random
effects, the objective function becomes a sum of (negative) log-likelihood
contributions, and the following rule applies:
\begin{itemize}
\item[$\bigstar$]
The order in which the different log-likelihood contributions are added to the
objective function does not matter.
\end{itemize}
An addition to this rule is that all programming variables have their values
assigned before they enter in a prior or a likelihood expression. \scWinBUGS\
users must take care when porting their programs to \scAB, because this is not
required in \scWinBUGS.
The reason why the {\it negative} log-likelihood is used is that for historical
reasons, \scAB\ does minimization (as opposed to maximization). In complex
models, with contributions to the log-likelihood coming from a variety of data
sources and random effects priors, it is recommended that you collect the
contributions to the objective function using the \ttminuseq\ operator of
\cplus, i.e.,
\begin{lstlisting}
f -= -nobs*log(sigma_x) - 0.5*norm2((x-mu)/sigma_x);
\end{lstlisting}
By using \ttminuseq\ instead of \ttpluseq, you do not have to change the sign of
every likelihood expression---which would be a likely source of error. When none
of the advanced features of Chapter~\ref{separability} are used, you are allowed
to switch the sign of the objective function at the end of the program:
\begin{lstlisting}
f *= -1; // ADMB does minimization!
\end{lstlisting}
so that in fact, \texttt{f} can hold the value of the log-likelihood until the
last line of the program.
It is OK to ignore constant terms ($0.5\log(2\pi)$, for the normal distribution)
as we did in \texttt{simple.tpl}. This only affects the objective function
value, not any other quantity reported in the \texttt{.par} and \texttt{.std}
files (not even the gradient value).
\section{The random effects distribution (prior)}
In \texttt{simple.tpl}, we declared $x_1,\ldots,x_n$ to be of type
\texttt{random\_effects\_vector}. This statement tells \scAB\ that
$x_1,\ldots,x_n$ should be treated as random effects (i.e., be the targets for
the Laplace approximation), but it does not say anything about what distribution
the random effects should have. We assumed that $x_i\sim N(\mu,\sigma_x^2)$,
and (without saying it explicitly) that the $x_i$s were statistically
independent. We know that the corresponding prior contribution to the
log-likelihood is
\[
-n\log (\sigma_x)-\frac{1}{2\sigma_x^2}\sum_{i=1}\left(x_i-\mu \right)^2
\]
with \scAB\ implementation
\begin{lstlisting}
f += -nobs*log(sigma_x) - 0.5*norm2((x-mu)/sigma_x);
\end{lstlisting}
Both the assumption about independence and normality can be generalized---as we
shortly will do---but first we introduce a transformation technique that forms
the basis for much of what follows later.
\subsection{Scaling of random effects}
A frequent source of error when writing \scAR\ programs is that priors get
wrongly specified. The following trick can make the code easier to read, and has
the additional advantage of being numerically stable for small values of
$\sigma_x$. From basic probability theory, we know that if $u\sim N(0,1)$,
then $x=\sigma_xu+\mu$ will have a $N(\mu,\sigma_x^2)$ distribution. The
corresponding \scAB\ code would be
\begin{lstlisting}
f += - 0.5*norm2(u);
x = sigma_x*u + mu;
\end{lstlisting}
(This, of course, requires that we change the type of \texttt{x} from
\texttt{random\_effects\_vector} to \texttt{vector}, and that \texttt{u} is
declared as a \texttt{random\_effects\_vector}.)
The trick here was to start with a $N(0,1)$ distributed random effect \texttt{u}
and to generate random effects \texttt{x} with another distribution. This is a
special case of a transformation. Had we used a non-linear transformation, we
would have gotten an \texttt{x} with a non-Gaussian distribution. The way we
obtain correlated random effects is also transformation based. However, as we
shall see in Chapter~\ref{separability}, transformation may ``break'' the
separability of the model, so there are limitations as to what transformations
can do for you.
\section{Correlated random effects\label{sec:correlated}}
\index{random effects!correlated}
In some situations, you will need correlated random effects, and as part of your
problem, you may want to estimate the elements of the covariance matrix. A
typical example is mixed regression, where the intercept random effect ($u_i$)
is correlated with the slope random effect~($v_i$):
\[
y_{ij}=(a+u_i)+\left(b+v_i\right)x_{ij}+\varepsilon_{ij}.
\]
(If you are not familiar with the notation, please consult an introductory book
on mixed regression, such as~\citeasnoun{pinh:bate:2000}.) In this case, we can
define the correlation matrix
\[
C=\left[\begin{array}{cc}
1 & \rho \\
\rho & 1\end{array}\right],
\]
and we want to estimate $\rho$ along with the variances of $u_i$ and $v_i$.
Here, it is trivial to ensure that $C$ is positive-definite, by requiring
$-1<\rho<1$, but in higher dimensions, this issue requires more careful
consideration.
To ensure that $C$ is positive-definite, you can parameterize the problem in
terms of the Cholesky factor $L$, i.e., $C=LL^\prime$, where $L$ is a lower
diagonal matrix with positive diagonal elements. There are $q(q-1)/2)$ free
parameters (the non-zero elements of $L$) to be estimated, where $q$ is the
dimension of $C$. Since $C$ is a correlation matrix, we must ensure that its
diagonal elements are unity. An example with $q=4$ is
\begin{lstlisting}
PARAMETER_SECTION
matrix L(1,4,1,4) // Cholesky factor
init_vector a(1,6) // Free parameters in C
init_bounded_vector B(1,4,0,10) // Standard deviations
PROCEDURE_SECTION
int k=1;
L(1,1) = 1.0;
for(i=2;i<=4;i++)
{
L(i,i) = 1.0;
for(j=1;j<=i-1;j++)
L(i,j) = a(k++);
L(i)(1,i) /= norm(L(i)(1,i)); // Ensures that C(i,i) = 1
}
\end{lstlisting}
Given the Cholesky factor $L$, we can proceed in different directions. One
option is to use the same transformation-of-variable technique as above: Start
out with a vector $u$ of independent $N(0,1)$ distributed random effects. Then,
the vector
\begin{lstlisting}
x = L*u;
\end{lstlisting}
has correlation matrix $C=LL^\prime$. Finally, we multiply each component of
\texttt{x} by the appropriate standard deviation:
\begin{lstlisting}
y = elem_prod(x,sigma);
\end{lstlisting}
\subsection{Large structured covariance matrices}
In some situations, for instance, in spatial models, $q$ will be large ($q=100$,
say). Then it is better to use the approach outlined in
Section~\ref{gaussianprior}.
\section{Non-Gaussian random effects}
Usually, the random effects will have a Gaussian distribution, but technically
speaking, there is nothing preventing you from replacing the normality
assumption, such as
\begin{lstlisting}
f -= -nobs*log(sigma_x) - 0.5*norm2((x-mu)/sigma_x);
\end{lstlisting}
with a log gamma density, say. It can, however, be expected that the Laplace
approximation will be less accurate when you move away from normal priors.
Hence, you should instead use the transformation trick that we learned earlier,
but now with a non-linear transformation. A simple example of this yielding a
log-normal prior was given in Section~\ref{lognormal}.
Say you want $x$ to have cumulative distribution function $F(x)$. It is well
known that you achieve this by taking $x=F^{-1}(\Phi(u))$, where $\Phi$ is the
cumulative distribution function of the $N(0,1)$ distribution. For a few common
distributions, the composite transformation $F^{-1}(\Phi(u))$ has been coded up
for you in \scAR, and all you have to do is:
\begin{enumerate}
\item Define a random effect $u$ with a $N(0,1)$ distribution.
\item Transform $u$ into a new random effect $x$ using one of
\texttt{something\_deviate} functions described below.
\end{enumerate}
where \texttt{something} is the name of the distribution.
As an example, say we want to obtain a vector~\texttt{x} of gamma distributed
random effects (probability density $x^{a-1}\exp(-x)/\Gamma(a)$). We can then
use the code:
\begin{lstlisting}
PARAMETER_SECTION
init_number a // Shape parameter
init_number lambda // Scale parameter
vector x(1,n)
random_effects_vector u(1,n)
objective_function_value g
PROCEDURE_SECTION
g -= -0.5*norm2(u); // N(0,1) likelihood contr.
for (i=1;i<=n;i++)
x(i) = lambda*gamma_deviate(u(i),a);
\end{lstlisting}
See a full example
\href{http://www.otter-rsch.com/admbre/examples/gamma/gamma.html}{here}.
Similarly, to obtain beta$(a,b)$ distributed random effects, with density
$f(x)\propto x^{a-1}(1-x)^{b-1}$, we use:
\begin{lstlisting}
PARAMETER_SECTION
init_number a
init_number b
PROCEDURE_SECTION
g -= -0.5*norm2(u); // N(0,1) likelihood contr.
for (i=1;i<=n;i++)
x(i) = beta_deviate(u(i),a,b);
\end{lstlisting}
The function \texttt{beta\_deviate()} has a fourth (optional) parameter that
controls the accuracy of the calculations. To learn more about this, you will
have to dig into the source code. You find the code for \texttt{beta\_deviate()}
in the file \texttt{df1b2betdev.cpp}. The mechanism for specifying default
parameter values are found in the source file \texttt{df1b2fun.h}.
A third example is provided by the ``robust'' normal distribution with
probability density
\begin{equation*}
f(x) = 0.95\frac{1}{\sqrt{2\pi}}e^{-0.5x^2}
+ 0.05\frac{1}{c\sqrt{2\pi}}e^{-0.5(x/c)^2}
\end{equation*}
where $c$ is a ``robustness'' parameter which by default is set to $c=3$ in
\texttt{df1b2fun.h}. Note that this is a mixture distribution consisting of 95\%
$N(0,1)$ and 5\% $N(0,c^2)$. The corresponding \scAR\ code is
\begin{lstlisting}
PARAMETER_SECTION
init_number sigma // Standard deviations (almost)
number c
PROCEDURE_SECTION
g -= - 0.5*norm2(u); // N(0,1) likelihood contribution from u's
for (i=1;i<=n;i++)
{
x(i) = sigma*robust_normal_mixture_deviate(u(i),c);
}
\end{lstlisting}
\subsection{Can $a$, $b$, and $c$ be estimated?}
As indicated by the data types used above,
\begin{itemize}
\item[$\bigstar$]
$a$ and $b$ are among the parameters that are being estimated.
\item[$\bigstar$]
$c$ cannot be estimated.
\end{itemize}
It would, however, be possible to write a version of
\texttt{robust\_normal\_mixture\_deviate} where also $c$ and the mixing
proportion (fixed at $0.95$ here) can be estimated. For this, you need to look
into the file \texttt{df1b2norlogmix.cpp}. The list of distribution that can be
used is likely to be expanded in the future.
\section{Built-in data likelihoods}
In the simple \texttt{simple.tpl}, the mathematical expressions for all
log-likelihood contributions where written out in full detail. You may have
hoped that for the most common probability distributions, there were functions
written so that you would not have to remember or look up their log-likelihood
expressions. If your density is among those given in
Table~\ref{tab:distributions}, you are lucky. More functions are likely to be
implemented over time, and user contributions are welcomed!
We stress that these functions should be only be used for data likelihoods, and
in fact, they will not compile if you try to let $X$ be a random effect. So, for
instance, if you have observations $x_i$ that are Poisson distributed with
expectation $\mu_i$, you would write
\begin{lstlisting}
for (i=1;i<=n;i++)
f -= log_density_poisson(x(i),mu(i));
\end{lstlisting}
Note that functions do not accept vector arguments.
\begin{table}[htbp]
\begin{tabular}%
{@{\vrule height 12pt depth 6pt width0pt} @{\extracolsep{1em}} cccc}
\hline
\textbf{Density}
& \textbf{Expression}
& \textbf{Parameters}
& \textbf{Name} \\
\hline\\[-16pt]
Poisson
& $\frac{\mu^x}{\Gamma(x+1)}e^{-\mu}$
& $\mu>0$
& \texttt{log\_density\_poisson} \\[6pt]
Neg.\ binomial
& $\mu=E(X)$, $\tau=\frac{Var(X)}{E(X)}$
& $\mu,\tau>0$
& \texttt{log\_negbinomial\_density} \\[6pt]
\hline
\end{tabular}
\caption{Distributions that currently can be used as high-level data
distributions (for data $X$) in \scAR. The expression for the negative
binomial distribution is omitted, due to its somewhat complicated form.
Instead, the parameterization, via the overdispersion coefficient, is given.
The interested reader can look at the actual implementation in the source
file \texttt{df1b2negb.cpp}}.
\label{tab:distributions}
\end{table}
\section{Phases}
\index{phases}
A very useful feature of \scAB\ is that it allows the model to be fit in
different phases. In the first phase, you estimate only a subset of the
parameters, with the remaining parameters being fixed at their initial values.
In the second phase, more parameters are turned on, and so it goes. The phase in
which a parameter becomes active is specified in the declaration of the
parameter. By default, a parameter has phase~1. A simple example would be:
\begin{lstlisting}
PARAMETER_SECTION
init_number a(1)
random_effects_vector b(1,10,2)
\end{lstlisting}
where \texttt{a} becomes active in phase~1, while \texttt{b}\ is a vector of
length~10 that becomes active in phase~2. With random effects, we have the
following rule-of-thumb (which may not always apply):
\begin{description}
\item[Phase 1] Activate all parameters in the data likelihood, except those
related to random effects.
\item[Phase 2] Activate random effects and their standard deviations.
\item[Phase 3] Activate correlation parameters (of random effects).
\end{description}
In complicated models, it may be useful to break Phase~1 into several
sub-phases.
During program development, it is often useful to be able to completely switch
off a parameter. A parameter is inactivated when given phase $-1$, as in
\begin{lstlisting}
PARAMETER_SECTION
init_number c(-1)
\end{lstlisting}
The parameter is still part of the program, and its value will still be read
from the \texttt{pin}~file, but it does not take part in the optimization (in
any phase).
For further details about phases, please consult the section ``Carrying out the
minimization in a number of phases'' in the \scAB\ manual~\cite{admb_manual}.
\section{Penalized likelihood and empirical Bayes}
\label{sec:hood}
The main question we answer in this section is how are random effects estimated,
i.e., how are the values that enter the \texttt{.par} and \texttt{.std} files
calculated? Along the way, we will learn a little about how \scAR\ works
internally.
By now, you should be familiar with the statistical interpretation of random
effects. Nevertheless, how are they treated internally in \scAR? Since random
effects are not observed data, then they have parameter status---but we
distinguish them from hyper-parameters. In the marginal likelihood function used
internally by \scAR\ to estimate hyper-parameters, the random effects are
``integrated out.'' The purpose of the integration is to generate the marginal
probability distribution for the observed quantities, which are $X$ and $Y$ in
\texttt{simple.tpl}. In that example, we could have found an analytical
expression for the marginal distribution of $(X,Y)$, because only normal
distributions were involved. For other distributions, such as the binomial, no
simple expression for the marginal distribution exists, and hence we must rely
on \scAB\ to do the integration. In fact, the core of what \scAR\ does for you
is automatically calculate the marginal likelihood during its effort to estimate
the hyper-parameters. \index{random effects!Laplace approximation}
The integration technique used by \scAR\ is the so-called Laplace
approximation~\cite{skaug_fournier1996aam}. Somewhat simplified, the algorithm
involves iterating between the following two steps:
\begin{enumerate}
\item The ``penalized likelihood'' step: maximizing the likelihood with
respect to the random effects, while holding the value of the hyper-parameters
fixed. In \texttt{simple.tpl}, this means doing the maximization
w.r.t.~\texttt{x} only.
\item Updating the value of the hyper-parameters, using the estimates of the
random effects obtained in item~1.
\end{enumerate}
The reason for calling the objective function in step~1, a penalized likelihood,
is that the prior on the random effects acts as a penalty function.
We can now return to the role of the initial values specified for the random
effects in the \texttt{.pin} file. Unless you use the command line option
\texttt{-noinit} these values are not used (but dummy values must nevertheless
be provided in the \texttt{.pin} file). Each time step~1 above is performed the
maximization of the penalized likelihood is always initiated at $x=0$. If the
command line option \texttt{-noinit} is used the value from the \texttt{.pin}
file is used the first time step~1 above is performed. The remaining times the
value from the previous optimum is used as the starting value.
\paragraph{Empirical Bayes} is commonly used to refer to Bayesian estimates of
the random effects, with the hyper-parameters fixed at their maximum likelihood
estimates. \scAR\ uses maximum \textit{aposteriori} Bayesian estimates, as
evaluated in step~1 above. Posterior expectation is a more commonly used as
Bayesian estimator, but it requires additional calculations, and is currently
not implemented in \scAR. For more details,
see~\citeasnoun{skaug_fournier1996aam}.
The classical criticism of empirical Bayes is that the uncertainty about the
hyper-parameters is ignored, and hence that the total uncertainty about the
random effects is underestimated. \scAR\ does, however, take this into account
and uses the following formula:
\begin{equation}
\textrm{cov}(u) =
-\left[\frac{\partial^2\log p(u \mid, \hbox{data};\theta)}
{\partial u\partial u^{\prime}}\right]^{-1}+ \frac{
\partial u}{\partial\theta}
\hbox{cov}(\theta) \left(\frac{\partial u}{\partial\theta}\right)^{\prime}
\label{eq:EB_variance}
\end{equation}
where $u$ is the vector of random effect, $\theta$ is the vector of
hyper-parameters, and $\partial u/\partial\theta$ is the sensitivity of the
penalized likelihood estimator on the value of $\theta$. The first term on the
r.h.s.~is the ordinary Fisher information based variance of $u$, while the
second term accounts for the uncertainty in $\theta$.
\section{Building a random effects model that works}
In all nonlinear parameter estimation problems, there are two possible
explanations when your program does not produce meaningful results:
\begin{enumerate}
\item The underlying mathematical model is not well-defined, e.g., it may be
over-parameterized.
\item You have implemented the model incorrectly, e.g., you have forgotten a
minus sign somewhere.
\end{enumerate}
In an early phase of the code development, it may not be clear which of these is
causing the problem. With random effects, the two-step iteration scheme
described above makes it even more difficult to find the error. We therefore
advise you always to check the program on simulated data before you apply it to
your real data set. This section gives you a recipe for how to do this.
\index{penalized likelihood}
The first thing you should do after having finished the \textsc{tpl}~file is to
check that the penalized likelihood step is working correctly. In \scAB, it is
very easy to switch from a random-effects version of the program to a
penalized-likelihood version. In \texttt{simple.tpl}, we would simply redefine
the random effects vector \texttt{x} to be of type \texttt{init\_vector}. The
parameters would then be $a$, $b$, $\mu $, $\sigma $, $\sigma_x$, and
$x_1$,\ldots, $x_n$. It is not recommended, or even possible, to estimate
all of these simultaneously, so you should fix $\sigma_x$ (by giving it a
phase $-1$) at some reasonable value. The actual value at which you
fix $\sigma_x$ is not critically important, and you could even try a range of
$\sigma_x$ values. In larger models, there will be more than one parameter
that needs to be fixed. We recommend the following scheme:
\begin{enumerate}
\item Write a simulation program (in R, S-Plus, Matlab, or some other
program) that generates data from the random effects model (using some
reasonable values for the parameters) and writes to \texttt{simple.dat}.
\item Fit the penalized likelihood program with $\sigma_x$ (or the
equivalent parameters) fixed at the value used to simulate data.
\item Compare the estimated parameters with the parameter values used to
simulate data. In particular, you should plot the estimated $x_1,\ldots,x_n$
against the simulated random effects. The plotted points should center
around a straight line. If they do (to some degree of approximation), you
most likely have got a correct formulation of the penalized likelihood.
\end{enumerate}
If your program passes this test, you are ready to test the random-effects
version of the program. You redefine \texttt{x} to be of type
\texttt{random\_effects\_vector}, free up $\sigma_x$, and apply your program
again to the same simulated data set. If the program produces meaningful
estimates of the hyper-parameters, you most likely have implemented your model
correctly, and you are ready to move on to your real data!
With random effects, it often happens that the maximum likelihood estimate of a
variance component is zero ($\sigma_x=0$). Parameters bouncing against the
boundaries usually makes one feel uncomfortable, but with random effects, the
interpretation of $\sigma_x=0$ is clear and unproblematic. All it really means
is that data do not support a random effect, and the natural consequence is to
remove (or inactivate) $x_1,\ldots,x_n$, together with the corresponding
prior (and hence $\sigma_x$), from the model.
\section{\scMCMC}
\index{MCMC@\textsc{mcmc}}
There are two different \scMCMC\ methods built into \scAR: \texttt{-mcmc} and
\texttt{-mcmc2}. Both are based on the Metropolis-Hastings algorithm. The former
generates a Markov chain on the hyper-parameters only, while the latter
generates a chain on the joint vector of hyper-parameters and random effects.
(Some sort of rejection sampling could be used with \texttt{-mcmc} to generate
values also for the random effects, but this is currently not implemented). The
advantages of~\texttt{-mcmc} are:
\begin{itemize}
\item Because there typically is a small number of hyper-parameters, but a
large number of random effects, it is much easier to judge convergence of the
chain generated by~\texttt{-mcmc} than that generated by~\texttt{-mcmc2}.
\item The \texttt{-mcmc}~chain mixes faster than the \texttt{-mcmc2}~chain.
\end{itemize}
The disadvantage of the \texttt{-mcmc} option is that it is slow, because it
relies on evaluation of the marginal likelihood by the Laplace approximation. It
is recommended that you run both \texttt{-mcmc} and~\texttt{-mcmc2}
(separately), to verify that they yield the same posterior for the
hyper-parameters.
\section{Importance sampling}
The Laplace approximation may be inaccurate in some situations.
\index{importance sampling} The accuracy may be improved by adding an importance
sampling step. This is done in \scAR\ by using the command line argument
\texttt{-is N seed}, where \texttt{N} is the sample size in the importance
sampling and \texttt{seed} (optional) is used to initialize the random number
generator. Increasing \texttt{N} will give better accuracy, at the cost of a
longer run time. As a rule-of-thumb, you should start with $\texttt{N}=100$, and
increase~\texttt{N} stepwise by a factor of 2 until the parameter estimates
stabilize.
By running the model with different seeds, you can check the Monte Carlo error
in your estimates, and possibly average across the different runs to decrease
the Monte Carlo error. Replacing the \texttt{-is N seed} option with an
\texttt{-isb N seed} one gives you a ``balanced'' sample, which in general,
should reduce the Monte Carlo error.
For large values of \texttt{N}, the option \texttt{-is N seed} will require a
lot of memory, and you will see that huge temporary files are produced during
the execution of the program. The option \texttt{-isf 5} will split the
calculations relating to importance sampling into 5 (you can replace the 5 with
any number you like) batches. In combination with the techniques discussed in
Section~\ref{Memory_management}, this should reduce the storage requirements. An
example of a command line is:
\begin{lstlisting}
lessafre -isb 1000 9811 -isf 20 -cbs 50000000 -gbs 50000000
\end{lstlisting}
The \texttt{-is} option can also be used as a diagnostic tool for checking the
accuracy of the Laplace approximation. If you add the \texttt{-isdiag} (print
importance sampling), the importance sampling weights will be printed at the end
of the optimization process. If these weights do not vary much, the Laplace
approximation is probably doing well. On the other hand, if a single weight
dominates the others by several orders of magnitude, you are in trouble, and it
is likely that even \texttt{-is N} with a large value of \texttt{N} is not going
to help you out. In such situations, reformulating the model, with the aim of
making the log-likelihood closer to a quadratic function in the random effects,
is the way to go. See also the following section.
\section{\scREML\ (Restricted maximum likelihood)}
\label{sec:reml}
\index{REML@\textsc{reml}}
It is well known that maximum likelihood estimators of variance parameters can
be downwards biased. The biases arise from estimation of one or more
mean-related parameters. The simplest example of a \scREML\ estimator is the
ordinary sample variance
$$
s^2 = \frac{1}{n-1}\sum_{i=1}^n(x_i-\bar x)^2,
$$
where the divisor $(n-1)$, rather than the $n$ that occurs for the maximum
likelihood estimator, accounts for the fact that we have estimated a single mean
parameter.
There are many ways of deriving the \scREML\ correction, but in the current
context, the most natural explanation is that we integrate the likelihood
function (\textit{note:} not the log-likelihood) with respect to the mean
parameters---$\beta$, say. This is achieved in \scAR\ by defining $\beta$ as
being of type \texttt{random\_effects\_vector}, but without specifying a
distribution/prior for the parameters. It should be noted that the only thing
that the \texttt{random\_effects\_vector} statement tells \scAR\ is that the
likelihood function should be integrated with respect to $\beta$. In
linear-Gaussian models, the Laplace approximation is exact, and hence this
approach yields exact \scREML\ estimates. In nonlinear models, the notion of
\scREML\ is more difficult, but \scREML-like corrections are still being used.
For linear-Gaussian models, the \scREML\ likelihood is available in closed form.
Also, many linear models can be fitted with standard software packages. It is
typically much simpler to formulate a hierarchical model with explicit latent
variables. As mentioned, the Laplace approximation is exact for Gaussian models,
so it does not matter what way you do it.
An example of such a model is found
\href{http://otter-rsch.com/admbre/examples/bcb/bcb.html}{here}. To make the
executable program run efficiently, the command line options
\texttt{-nr 1 -sparse} should be used for linear models. Also, note that
\scREML\ estimates can be obtained, as explained in Section~\ref{sec:reml}.
\section{Improving performance}
\label{sec:improving}
In this section, we discuss certain mechanisms you can use to make an \scAR\
program run faster and more smoothly.
\subsection{Reducing the size of temporary files}
\label{Memory_management}
\index{temporary files!reducing the size}
When \scAB\ needs more temporary storage than is available in the allocated
memory buffers, it starts producing temporary files. Since writing to disk is
much slower than accessing memory, it is important to reduce the size of
temporary files as much as possible. There are several parameters (such as
\texttt{arrmblsize}) built into \scAB\ that regulate how large of memory buffers
an \scAB\ program allocates at startup. With random effects, the memory
requirements increase dramatically, and \scAR\ deals with this by producing
(when needed) six temporary files. (See Table~\ref{tab:temporary-files}.):
\index{temporary files!f1b2list1@\texttt{f1b2list1}}
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lc}
\hline
\textbf{File name} & \textbf{Command line option}\\
\hline
\texttt{f1b2list1} & \texttt{-l1 N} \\
\texttt{f1b2list12} & \texttt{-l2 N} \\
\texttt{f1b2list13} & \texttt{-l3 N} \\
\texttt{nf1b2list1} & \texttt{-nl1 N} \\
\texttt{nf1b2list12} & \texttt{-nl2 N} \\
\texttt{nf1b2list13} & \texttt{-nl3 N} \\
\hline
\end{tabular}
\end{center}
\caption{Temporary file command line options.}
\label{tab:temporary-files}
\end{table}
The table also shows the command line arguments you can use to manually set the
size (determined by~\texttt{N}) of the different memory buffers.
When you see any of these files starting to grow, you should kill your
application and restart it with the appropriate command line options. In
addition to the options shown above, there is \texttt{-ndb N}, which splits the
computations into $N$~chunks. This effectively reduces the memory requirements
by a factor of $N$---at the cost of a somewhat longer run time. $N$~must be a
divisor of the total number of random effects in the model, so that it is
possible to split the job into $N$ equally large parts. The \texttt{-ndb} option
can be used in combination with the \texttt{-l} and \texttt{-nl} options listed
above. The following rule-of-thumb for setting $N$ in~\texttt{-ndb N} can be
used: if there are a total of $m$ random effects in the model, one should
choose $N$ such that $m/N\approx 50$. For most of the models in the example
collection (Chapter~\ref{ch:random-effects-modeling}),% 3
this choice of $N$ prevents any temporary files being created.
Consider \href{http://otter-rsch.com/admbre/examples/union/union.html}{this}
model as an example. It contains only about 60 random effects, but does rather
heavy computations with these. As a consequence, large temporary files are
generated. The following command line
\begin{lstlisting}
$ ./union -l1 10000000 -l2 100000000 -l3 10000000 -nl1 10000000
\end{lstlisting}
takes away the temporary files, but requires 80Mb of memory. The command line
\begin{lstlisting}
$ ./union -est -ndb 5 -l1 10000000
\end{lstlisting}
also runs without temporary files, requires only 20Mb of memory, but runs three
times slower.
Finally, a warning about the use of these command line options. If you allocate
too much memory, your application will crash, and you will (should) get a
meaningful error message. You should monitor the memory use of your application
(using ``Task Manager'' under Windows, and the command \texttt{top} under Linux)
to ensure that you do not exceed the available memory on your computer.
\subsection{Exploiting special model structure}
If your model has special structure, such as
grouped or nested random effects, state-space structure,
crossed random effects or a general Markov strucure
you will benefit greatly from using the techniques
described in Section~\ref{separability} below.
In this case the memory options in Table~\ref{tab:temporary-files}
are less relevant (although sometimes useful), and instead the
memory use can be controlled with the classical \ADM\ command line options
\texttt{-cbs}, \texttt{-gbs} etc.
\subsection{Limited memory Newton optimization}
\index{limited memory quasi-Newton}
The penalized likelihood step (Section~\ref{sec:hood}), which forms a crucial
part of the algorithm used by \scAB\ to estimate hyper-parameters, is by default
conducted using a quasi-Newton optimization algorithm. If the number of random
effects is large---as it typically is for separable models---it may be more
efficient to use a ``limited memory quasi-Newton'' optimization algorithm. This
is done using the command line argument \texttt{-ilmn N}, where \texttt{N} is
the number of steps to keep. Typically, $\texttt{N} = 5$ is a good choice.
\chapter{Exploiting special structure (Separability)}
\label{separability}
A model is said to be ``separable'' if the likelihood can be written
as a product of terms, each involving only a small number of random effects.
Not all models are separable, and for small toy examples (less than 50~random
effects, say), we do not need to care about separability. You
need to care about separability both to reduce memory requirements and
computation time. Examples of separable models are
\begin{itemize}
\item Grouped or nested random effects
\item State-space models
\item Crossed random effects
\item Latent Markov random fields
\end{itemize}
The presence of separability allows \scAB\ to calculate the ``Hessian'' matrix
very efficiently.The Hessian $H$ is defined as the (negative) Fisher information
matrix (inverse covariance matrix) of the posterior distribution of the
random effects, and is a key component of the Laplace approximation.
How do we inform \scAR\ that the model is separable? We define
\texttt{SEPARABLE\_FUNCTION}s in the \texttt{PROCEDURE\_SECTION} to specify the
individual terms in the product that defines the likelihood function. Typically,
a \texttt{SEPARABLE\_FUNCTION} is invoked many times, with a small subset of the
random effects each time.
For separable models the Hessian is a sparse matrix which means that it contains
mostly zeros. Sparsity can be exploited by \scAB\ when manipulating the matrix
$H$, such as calculating its determinant. The actual sparsity pattern depends on
the model type:
\begin{itemize}
\item \textit{Grouped or nested random effects:} $H$ is block diagonal.
\item \textit{State-space models:} $H$ is a banded matrix with a narrow band.
\item \textit{Crossed random effects:} unstructured sparsity pattern.
\item \textit{Latent Markov random fields:} often banded, but with a wide
band.
\end{itemize}
For block diagonal and banded $H$, \scAR\ automatically will detect the
structure from the \texttt{SEPARABLE\_FUNCTION} specification, and will print
out a message such as:
\begin{lstlisting}
Block diagonal Hessian (Block size = 3)
\end{lstlisting}
at the beginning of the phase when the random effects are becoming active
parameters. For general sparsity pattern the command line option \texttt{-shess}
can be used to invoke the sparse matrix libraries for manipulation of the matrix
$H$.
\section{The first example}
A simple example is the one-way variance component model
\[
y_{ij}=\mu +\sigma_u u_i+\varepsilon_{ij},
\qquad i=1,\ldots,q,\quad j=1,\ldots,n_i
\]
where $u_i\sim N(0,1)$ is a random effect and $\varepsilon_{ij}\sim
N(0,\sigma^2)$ is an error term. The straightforward (non-separable)
implementation of this model (shown only in part) is
\begin{lstlisting}
PARAMETER_SECTION
random_effects_vector u(1,q)
PROCEDURE_SECTION
for(i=1;i<=q;i++)
{
g -= -0.5*square(u(i));
for(j=1;j<=n(i);j++)
g -= -log(sigma) - 0.5*square((y(i,j)-mu-sigma_u*u(i))/sigma);
}
\end{lstlisting}
The efficient (separable) implementation of this model is
\begin{lstlisting}
PROCEDURE_SECTION
for(i=1;i<=q;i++)
g_cluster(i,u(i),mu,sigma,sigma_u);
SEPARABLE_FUNCTION void g_cluster(int i, const dvariable& u,...)
g -= -0.5*square(u);
for(int j=1;j<=n(i);j++)
g -= -log(sigma) - 0.5*square((y(i,j)-mu-sigma_u*u)/sigma);
\end{lstlisting}
where (due to lack of space in this document) we've replaced the rest of the
argument list with ellipsis (\texttt{...}).
It is the function call \texttt{g\_cluster(i,u(i),mu,sigma,sigma\_u)} that
enables \scAR\ to identify that the posterior distribution
factors (over $i$):
\[
p(u \mid y) \propto \prod_{i=1}^q
\left\{\prod_{j=1}^{n_i}p \bigl(u_i \mid y_{ij}\bigr)\right\}
\]
and hence that the Hessian is block diagonal (with block size~1). Knowing that
the Hessian is block diagonal enables \scAR\ to do a series of univariate
Laplace approximations, rather than a single Laplace approximation in full
dimension $q$. It should then be possible to fit models where $q$ is in the
order of thousands, but this clearly depends on the complexity of the function
\texttt{g\_cluster}.
The following rules apply:
\begin{itemize}
\item[$\bigstar$] The argument list in the definition of the
\texttt{SEPARABLE\_FUNCTION} \textit{should not} be broken into several lines
of text in the \textsc{tpl}~file. This is often tempting, as the line
typically gets long, but it results in an error message from \texttt{tpl2rem}.
\item[$\bigstar$] Objects defined in the \texttt{PARAMETER\_SECTION}
\textit{must} be passed as arguments to \texttt{g\_cluster}. There is one
exception: the objective function \texttt{g} is a global object, and does not
need to be an argument. Temporary/programming variables should be defined
locally within the \texttt{SEPARABLE\_FUNCTION}.
\item[$\bigstar$] Objects defined in the \texttt{DATA\_SECTION} \textit{should
not} be passed as arguments to \texttt{g\_cluster}, as they are also global
objects.
\end{itemize}
The data types that currently can be passed as arguments to a
\texttt{SEPARABLE\_FUNCTION} are:
\begin{lstlisting}
int
const dvariable&
const dvar_vector&
const dvar_matrix&
\end{lstlisting}
with an example being:
\begin{lstlisting}
SEPARABLE_FUNCTION void f(int i, const dvariable& a, const dvar_vector& beta)
\end{lstlisting}
The qualifier \texttt{const} is required for the latter two data types, and
signals to the \cplus~compiler that the value of the variable is not going to be
changed by the function. You may also come across the type \texttt{const
prevariable\&}, which has the same meaning as \texttt{const dvariable\&}.
There are other rules that have to be obeyed:
\begin{itemize}
\item[$\bigstar$] No calculations of the log-likelihood, except calling
\texttt{SEPARABLE\_FUNCTION}, are allowed in \texttt{PROCEDURE\_SECTION}.
Hence, the only allowed use of parameters defined in
\texttt{PARAMETER\_SECTION} is to pass them as arguments to
\texttt{SEPARABLE\_FUNCTION}s. However, evaluation of \texttt{sdreport}
numbers during the \texttt{sd\_phase}, as well as MCMC calculations, are
allowed.
\end{itemize}
This rule implies that all the action has to take place inside the
\texttt{SEPARABLE\_FUNCTION}s. To minimize the number of parameters that have be
passed as arguments, the following programming practice is recommended when
using \texttt{SEPARABLE\_FUNCTION}s:
\begin{itemize}
\item[$\bigstar$] The \texttt{PARAMETER\_SECTION} should contain definitions
only of the independent parameters (those variables whose type has a
\texttt{init\_} prefix) and random effects, i.e., no temporary programming
variables.
\end{itemize}
All temporary variables should be defined locally in the
\texttt{SEPARABLE\_FUNCTION}, as shown here:
\begin{lstlisting}
SEPARABLE_FUNCTION void prior(const dvariable& log_s, const dvariable& u)
dvariable sigma_u = exp(log_s);
g -= -log_s - 0.5*square(u(i)/sigma_u);
\end{lstlisting}
See a full example
\href{http://otter-rsch.com/admbre/examples/orange/orange.html}{here}. The
orange model has block size 1.
\section{Nested or clustered random effects:\br block diagonal $H$}
\label{sec:nested}
In the above model, there was no hierarchical structure among the latent random
variables (the \texttt{u}s). A more complicated example is provided by the
following model:
\[
y_{ijk}= \sigma_v v_i + \sigma_u u_{ij}+\varepsilon_{ijk},
\qquad i=1,\ldots,q,\quad j=1,\ldots,m,\quad k=1,\ldots,n_{ij},
\]
where the random effects $v_i$ and $u_{ij}$ are independent
$N(0,1)$ distributed, and $\varepsilon_{ijk}\sim N(0,\sigma^2)$ is still the
error term. One often says that the \texttt{u}s are nested within
the~\texttt{v}s.
Another perspective is that the data can be split into independent clusters. For
$i_1\neq i_2$, $y_{i_1jk}$ and $y_{i_2jk}$ are statistically independent, so
that the likelihood factors are at the outer nesting level~($i$).
To exploit this, we use the \texttt{SEPARABLE\_FUNCTION} as follows:
\begin{lstlisting}
PARAMETER_SECTION
random_effects_vector v(1,q)
random_effects_matrix u(1,q,1,m)
PROCEDURE_SECTION
for(i=1;i<=q;i++)
g_cluster(v(i),u(i),sigma,sigma_u,sigma_v,i);
\end{lstlisting}
Each element of \texttt{v} and each row (\texttt{u(i)}) of the matrix \texttt{u}
are passed only once to the separable function \texttt{g\_cluster}. This is the
criterion \scAB\ uses to detect the block diagonal Hessian structure. Note that
\texttt{v(i)} is passed as a single value while \texttt{u(i)} is passed as a
vector to the \texttt{SEPARABLE\_FUNCTION} as follows:
\begin{lstlisting}
SEPARABLE_FUNCTION void g_cluster(const dvariable& v,const dvar_vector& u,...)
g -= -0.5*square(v);
g -= -0.5*norm2(u);
for(int j=1;j<=m;j++)
for(int k=1;k<=n(i,j);k++)
g -= -log(sigma) - 0.5*square((y(i,j,k)
-sigma_v*v - sigma_u*u(j))/sigma);
\end{lstlisting}
\begin{itemize}
\item[$\bigstar$] For a model to be detected as ``Block diagonal Hessian,''
each latent variable should be passed \textit{exactly once} as an argument to
a \texttt{SEPARABLE\_FUNCTION}.
\end{itemize}
To ensure that you have not broken this rule, you should look for an message
like this at run time:
\begin{lstlisting}
Block diagonal Hessian (Block size = 3)
\end{lstlisting}
The ``block size'' is the number of random effects in each call
to the \texttt{SEPARABLE\_FUNCTION}, which in this case is
one \texttt{v(i)} and a vector \texttt{u(i)} of length two.
It is possible that the groups or clusters (as indexed by $i$, in this case) are
of different size. Then, the ``Block diagonal Hessian'' that is printed is an
average.
The program could have improperly been structured as follows:
\begin{lstlisting}
PARAMETER_SECTION
random_effects_vector v(1,q)
random_effects_matrix u(1,q,1,m)
PROCEDURE_SECTION
for(i=1;i<=q;i++)
for(j=1;j<=m;j++)
g_cluster(v(i),u(i,j),sigma,sigma_u,sigma_u,i);
\end{lstlisting}
but this would not be detected by \scAR\ as a clustered model (because
\texttt{v(i)} is passed multiple times), and hence \scAR\ will not be able to
take advantage of block diagonal Hessiand, as indicated by
the absence of runtime message
\begin{lstlisting}
Block diagonal Hessian (Block size = 3)
\end{lstlisting}
\subsection{Gauss-Hermite quadrature}
\index{Gauss-Hermite quadrature}
In the situation where the model is separable of type ``Block diagonal
Hessian,'' (see Section~\ref{separability}), Gauss-Hermite quadrature is
available as an option to improve upon the Laplace approximation. It is invoked
with the command line option \texttt{-gh N}, where \texttt{N} is the number of
quadrature points determining the accuracy of the integral approximation. For a
block size of 1, the default choice should be $N=10$ or greater, but for larger
block sizes the computational and memory requirements very quickly limits the
range of $N$. If $N$ is chosen too large \scAR\ will crash witout giving a
meaningful error message. To avoid \scAR\ creating large temporary files the
command line options \texttt{-cbs} and \texttt{-gbs} can be used.
The \texttt{-gh N} option should be preferred over importance sampling
(\texttt{-is}).
% \subsection{Frequency weighting for multinomial likelihoods}
%
% In situations where the response variable only can take on a finite number of
% different values, it is possibly to reduce the computational burden
% enormously. % As an example, consider a situation where observation $y_i$ is
% binomially distributed with parameters $N=2$ and $p_i$. Assume that
% \begin{equation*}
% p_i=\frac{\exp (\mu +u_i)}{1+\exp (\mu +u_i)},
% \end{equation*}
% where $\mu$ is a parameter and $u_i\sim N(0,\sigma^2)$ is a random effect.
% For independent observations $y_1,\ldots,y_n$, the log-likelihood function for
% the parameter $\theta =(\mu,\sigma)$ can be written as
% \begin{equation}
% l(\theta)=\sum_{i=1}^n\log \bigl[\, p(x_i;\theta)\bigr] .
% \end{equation}
% In \scAR, $p(x_i;\theta)$ is approximated using the Laplace approximation.
% However, since $y_i$ only can take the values $0$, $1$, and $2$, we can
% rewrite the log-likelihood as
% $$
% l(\theta)=\sum_{j=0}^2n_j\log \left[ p(j;\theta)\right],
% $$
% where $n_j$ is the number $y_i$s being equal to $j$. Still, the Laplace
% approximation must be used to approximate $p(j;\theta)$, but now only for
% $j=0,1,2$, as opposed to $n$ times above. For large $n$, this can give large a
% large reduction in computing time.
%
% To implement the weighted log-likelihood~(\ref{l_w}), we define a weight
% vector $(w_1,w_2,w_3)=(n_0,n_1,n_2)$. To read the weights from file, and to
% tell \scAR\ that~\texttt{w} is a weights vector, the following code is used:
% \begin{lstlisting}
% DATA_SECTION
% init_vector w(1,3)
%
% PARAMETER_SECTION
% !! set_multinomial_weights(w);
% \end{lstlisting}
% In addition, it is necessary to explicitly multiply the likelihood
% contributions in~(\ref{l_w}) by $w$. The program must be written with
% \texttt{SEPARABLE\_FUNCTION}, as explained in Section~\ref{sec:nested}. For
% the likelihood~(\ref{l_w}), the \texttt{SEPARABLE\_FUNCTION} will be invoked
% three times.
%
% See a full example
% \href{http://www.otter-rsch.com/admbre/examples/weights/weights.html}{here}.
\section{State-space models: banded $H$}
\label{sec:state-space}\index{state-space models}
A simple state space model is
\begin{align*}
y_i &= u_i + \epsilon_i,\\
u_i &= \rho u_{i-1} + e_i,
\end{align*}
where $e_i\sim N(0,\sigma^2)$ is an innovation term. The log-likelihood
contribution coming from the state vector $(u_1,\ldots,u_n)$ is
\[
\sum_{i=2}^n \log\left(\frac{1}{\sqrt{2\pi}\sigma}
\exp\left[-\frac{(u_i-\rho u_{i-1})^2}{2\sigma^2}\right]\right),
\]
where $(u_1,\ldots,u_n)$ is the state vector. To make \scAR\ exploit this
special structure, we write a \texttt{SEPARABLE\_FUNCTION} named
\texttt{g\_conditional}, which implements the individual terms in the above sum.
This function would then be invoked as follows
\begin{lstlisting}
for(i=2;i<=n;i++)
g_conditional(u(i),u(i-1),rho,sigma);
\end{lstlisting}
See a full example
\href{http://www.otter-rsch.com/admbre/examples/polio/polio.html}{here}.
Above, we have looked at a model with a univariate state vector. For
multivariate state vectors, as in
\begin{align*}
y_i &= u_i + v_i +\epsilon_i,\\
u_i &= \rho_1 u_{i-1} + e_i,\\
v_i &= \rho_2 v_{i-1} + d_i,
\end{align*}
we would merge the $u$ and $v$ vectors into a single vector
$(u_1,v_1,u_2,v_2,\ldots,u_n,v_n)$, and define
\begin{lstlisting}
random_effects_vector u(1,m)
\end{lstlisting}
where $m=2n$. The call to the \texttt{SEPARABLE\_FUNCTION} would now look like
\begin{lstlisting}
for(i=2;i<=n;i++)
g_conditional(u(2*(i-2)+1),u(2*(i-2)+2),u(2*(i-2)+3),u(2*(i-2)+4),...);
\end{lstlisting}
where the ellipsis (\texttt{...}) denotes the arguments $\rho_1$, $\rho_2$,
$\sigma_e$, and $\sigma_d$.
\section{Crossed random effects: sparse $H$}
\index{crossed effects}
The simplest instance of a crossed random effects model is
\[
y_k = \sigma_u u_{i(k)} + \sigma_v v_{j(k)}+\varepsilon_k,
\qquad i=1,\ldots n,
\]
where $u_1,\ldots,u_n$ and $v_1,\ldots,v_M$ are random effects, and
where $i(k)\in\{1,N\}$ and $j(k)\in\{1,M\}$ are index maps. The $y$s sharing
either a $u$ or a $v$ will be dependent, and in general, no complete factoring
of the likelihood will be possible. However, it is still important to exploit
the fact that the $u$s and $v$s only enter the likelihood through pairs
$(u_{i(k)},v_{j(k)})$. Here is the code for the crossed model:
\begin{lstlisting}
for (k=1;k<=n;k++)
log_lik(k,u(i(k)),v(j(k)),mu,s,s_u,s_v);
SEPARABLE_FUNCTION void log_lik(int k, const dvariable& u,...)
g -= -log(s) - 0.5*square((y(k)-(mu + s_u*u + s_v*v))/s);
\end{lstlisting}
If only a small proportion of all the possible combinations of $u_i$ and $v_j$
actually occurs in the data, then the posterior covariance matrix of
$(u_1,\ldots,u_n,v_1,\ldots,v_M)$ will be sparse. When an executable
program produced by \scAR\ is invoked with the \texttt{-shess} command line
option, sparse matrix calculations are used.
This is useful not only for crossed models. Here are a few other applications:
\begin{itemize}
\item For the nested random effects model, as explained in
Section~\ref{sec:nested}.
\item For \scREML\ estimation. Recall that \scREML\ estimates are obtained by
making a fixed effect random, but with no prior distribution. For the nested
models in Section~\ref{sec:nested}, and the models with state-space structure
of Section~\ref{sec:state-space}, when using \scREML, \scAR\ will detect the
cluster or time series structure of the likelihood. (This has to do with the
implementation of \scAR, not the model itself.) However, the posterior
covariance will still be sparse, and the use of \texttt{-shess} is
advantageous. \index{REML@\textsc{reml}}
\end{itemize}
\section{Gaussian priors and quadratic penalties\label{gaussianprior}}
\index{prior distributions!Gaussian priors}
In most models, the prior for the random effect will be Gaussian. In some
situations, such as in spatial statistics, all the individual components of the
random effects vector will be jointly correlated. \scAB\ contains a special
feature (the \texttt{normal\_prior} keyword) for dealing efficiently with such
models. The construct used to declaring a correlated Gaussian prior is
\begin{lstlisting}
random_effects_vector u(1,n)
normal_prior S(u);
\end{lstlisting}
The first of these lines is an ordinary declaration of a random effects vector.
The second line tells \scAB\ that \texttt{u} has a multivariate Gaussian
distribution with zero expectation and covariance matrix~\texttt{S}, i.e., the
probability density of $\mathbf{u}$ is
\[
h(\mathbf{u})=\left(2\pi \right)^{-\dim(S)/2}\det (S)^{-1/2}\,
\exp \left(-\tfrac{1}{2}\mathbf{u}^{\prime} \, S^{-1}u\right) .
\]
Here, $S$ is allowed to depend on the hyper-parameters of the model. The part of
the code where \texttt{S} gets assigned its value must be placed in a
\texttt{SEPARABLE\_FUNCTION}.
\begin{itemize}
\item[$\bigstar$] The log-prior $\log \left(h\left(\mathbf{u}\right) \right)
$ is automatically subtracted from the objective function. Therefore, the
objective function must hold the negative log-likelihood when using the
\texttt{normal\_prior}.
% \item[$\bigstar$] To verify that your model really is partially separable,
% you should try replacing the \texttt{SEPARABLE\_FUNCTION} keyword with an
% ordinary \texttt{FUNCTION}. Then verify on a small subset of your data that
% the two versions of the program produce the same results. You should be able
% to observe that the \texttt{SEPARABLE\_FUNCTION} version runs faster.
\end{itemize}
See a full example
\href{http://otter-rsch.com/admbre/examples/spatial/spatial.html}{here}.
\appendix
\chapter{Example Collection}
\label{sec:example_collection}
This section contains various examples of how to use \scAR. Some of these has
been referred to earlier in the manual. The examples are grouped according to
their ``Hessian type'' (see Section~\ref{separability}). At the end of each
example, you will find a Files section containing links to webpages, where both
program code and data can be downloaded.
\section{Non-separable models}
This section contains models that do not use any of the separability stuff.
Sections~\ref{sec:gam} and~\ref{sec:lidar} illustrate how to use splines as
non-parametric components. This is currenlty a very popular technique, and fits
very nicely into the random effects framework~\cite{rupp:wand:carr:2003}. All
the models except the first are, in fact, separable, but for illustrative
purposes (the code becomes easier to read), this has been ignored.
\subsection{Mixed-logistic regression: a \scWinBUGS\ comparison}
\label{sec:logistic_example}
Mixed regression models will usually have a block diagonal Hessian due to
grouping/clustering of the data. The present model was deliberately chosen not
to be separable, in order to pose a computational challenge to both \scAR\ and
\scWinBUGS.
\subsubsection{Model description}
Let $\mathbf{y}=(y_1,\ldots,y_n)$ be a vector of dichotomous observations
($y_i\in\{0,1\}$), and let $\mathbf{u}=(u_1,\ldots,u_q)$ be a vector of
independent random effects, each with Gaussian distribution (expectation $0$ and
variance $\sigma^2$). Define the success probability $\pi_i=\Pr(y_i=1)$. The
following relationship between $\pi_i$ and explanatory variables (contained in
matrices $\mathbf{X}$ and $\mathbf{Z}$) is assumed:
\[
\log\left(\frac{\pi_i}{1-\pi_i}\right) = \mathbf{X}_i\mathbf{\beta} +
\mathbf{Z}_i\mathbf{u},
\]
where $\mathbf{X}_i$ and $\mathbf{Z}_i$ are the $i^{\textrm{th}}$ rows of the
known covariates matrices $\mathbf{X}$ ($n\times p$) and $\mathbf{Z}$ ($n\times
q$), respectively, and $\mathbf{\beta}$ is a $p$-vector of regression
parameters. Thus, the vector of fixed-effects is
$\mathbf{\theta}=(\mathbf{\beta},\log\sigma)$.
\subsubsection{Results}
The goal here is to compare computation times with \scWinBUGS\ on a simulated
data set. For this purpose, we use $n=200$, $p=5$, $q=30$, and values of the the
hyper-parameters, as shown in the Table~\ref{tab:true-values}. The matrices
$\mathbf{X}$ and $\mathbf{Z}$ were generated randomly with each element
uniformly distributed on $[-2,2\,]$. As start values for both \ADM\ and \scBUGS,
we used $\beta_{\mathrm{init},j}=-1$ and $\sigma_\mathrm{init}=4.5$. In
\scBUGS, we used a uniform $[-10,10\,]$ prior on $\beta_j$ and a standard (in
the \scBUGS\ literature) noninformative gamma prior on $\tau=\sigma^{-2}$. In
\ADM, the parameter bounds $\beta_j\in[-10,10\,]$ and $\log\sigma\in[-5,3\,]$
were used in the optimization process.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lrrrrrr}
\hline
& $\beta_1$ & $\beta_2$ & $\beta_3$ & $\beta_4$ & $\beta_5$ & $\sigma$\\
\hline\\[-16pt]
True values
& $0.0000$ & $ 0.0000$ & $0.0000$ & $0.0000$ & $ 0.0000$ & $0.1000$\\
\scAR
& $0.0300$ & $-0.0700$ & $0.0800$ & $0.0800$ & $-0.1100$ & $0.1700$\\
Std.\ dev.
& $0.1500$ & $ 0.1500$ & $0.1500$ & $0.1400$ & $ 0.1600$ & $0.0500$\\
\scWinBUGS
& $0.0390$ & $-0.0787$ & $0.0773$ & $0.0840$ & $-0.1041$ & $0.1862$\\
\hline
\end{tabular}
\end{center}
\caption{True values}
\label{tab:true-values}
\end{table}
%
On the simulated data set, \ADM\ used $27$ seconds to converge to the optimum of
likelihood surface. On the same data set, we first ran \scWinBUGS\
for $5,000$ iterations. The recommended convergence diagnostic in \scWinBUGS\ is
the Gelman-Rubin plot (see the help files available from the menus in
\scWinBUGS), which require that two Markov chains are run in parallel. From the
Gelman-Rubin plot, it was clear that convergence appeared after approximately
$2,000$ iterations. The time taken by \scWinBUGS\ to generate the first $2,000$
was approximately $700$~seconds.
See the files
\href{http://otter-rsch.com/admbre/examples/logistic/logistic.html}{here}.
% x\newpage
\subsection{Generalized additive models (\scGAM{}s)}
\label{sec:gam}
\subsubsection{Model description}
\index{nonparametric estimation!splines}
A very useful generalization of the ordinary multiple regression
\[
y_i=\mu +\beta_1x_{1,i}+\cdots +\beta_px_{p,i}+\varepsilon_i,
\]%
is the class of additive model
\begin{equation}
y_i=\mu+f_1(x_{1,i})+\cdots +f_p(x_{p,i})+\varepsilon_i.
\label{eqn:gam}
\end{equation}%
\index{GAM@\textsc{gam}}
Here, the $f_j$ are ``nonparametric'' components that can be modelled by
penalized splines. When this generalization is carried over to generalized
linear models, and we arrive at the class of \scGAM{}s~\cite{hast:tibs:1990}.
From a computational perspective, penalized splines are equivalent to random
effects, and thus GAMs fall naturally into the domain of \scAR.
For each component $f_j$ in equation~(\ref{eqn:gam}), we construct a design
matrix $\mathbf{X}$ such that $f_j(x_{i,j})=\mathbf{X}^{(i)}\mathbf{u}$, where
$\mathbf{X}^{(i)}$ is the $i^{\textrm{th}}$ row of $\mathbf{X}$ and
$\mathbf{u}$\ is a coefficient vector. We use the R function
\texttt{splineDesign} (from the \texttt{splines} library) to construct a design
matrix $\mathbf{X}$. To avoid overfitting, we add a first-order difference
penalty~\cite{eile:marx:1996}
\index{splines!difference penalty}
\begin{equation}
-\lambda^2\sum_{k=2}\left(u_k-u_{k-1}\right)^2,
\label{eqn:first_order}
\end{equation}
to the ordinary \scGLM\ log-likelihood, where $\lambda $ is a smoothing
parameter to be estimated. By viewing $\mathbf{u}$ as a random effects vector
with the above Gaussian prior, and by taking $\lambda $ as a hyper-parameter, it
becomes clear that GAM's are naturally handled in \scAR.
\subsubsection{Implementation details}
\begin{itemize}
\item A computationally more efficient implementation is obtained by moving
$\lambda $ from the penalty term to the design matrix, i.e.,
$f_j(x_{i,j})=\lambda^{-1}\mathbf{X}^{(i)}\mathbf{u}$.
\item Since equation~(\ref{eqn:first_order}) does not penalize the mean
of $\mathbf{u}$, we impose the restriction that $\sum_{k=1}u_k=0$ (see
\texttt{union.tpl} for details). Without this restriction, the model would be
over-parameterized, since we already have an overall mean $\mu $ in
equation~(\ref{eqn:gam}).
\item To speed up computations, the parameter $\mu $ (and other regression
parameters) should be given ``phase 1'' in \scAB, while the $\lambda $s and
the $\mathbf{u}$s should be given ``phase 2.''
\end{itemize}
\begin{figure}[htbp]
\centering\hskip1pt
\includegraphics[width=6in]{union_fig.pdf}
\caption{Probability of membership as a function of covariates. In each plot,
the remaining covariates are fixed at their sample means. The effective
degrees of freedom (df) are also given~\protect\cite{hast:tibs:1990}.}
\label{fig:union}
\end{figure}
\subsubsection{The Wage-union data}
The data, which are available from \href{lib.stat.cmu.edu/}{Statlib}, contain
information for each of 534 workers about whether they are members ($y_i=1$)
of a workers union or are not ($y_i=0$). We study the probability of
membership as a function of six covariates. Expressed in the notation used by
the R (S-Plus) function \texttt{gam}, the model is:
\begin{verbatim}
union ~ race + sex + south + s(wage) + s(age) + s(ed), family=binomial
\end{verbatim}
Here, \texttt{s()} denotes a spline functions with 20 knots each. For
\texttt{wage}, a cubic spline is used, while for \texttt{age} and \texttt{ed},
quadratic splines are used. The total number of random effects that arise from
the three corresponding $\mathbf{u}$ vectors is~64. Figure~\ref{fig:union} shows
the estimated nonparametric components of the model. The time taken to fit the
model was 165~seconds.
\subsubsection{Extensions}
\begin{itemize}
\item The linear predictor may be a mix of ordinary regression terms
($f_j(x)=\beta_jx$) and nonparametric terms. \scAR\ offers a unified
approach to fitting such models, in which the smoothing parameters
$\lambda_j$ and the regression parameters $\beta_j$ are estimated
simultaneously.
\item It is straightforward in \scAR\ to add ``ordinary'' random effects to
the model, for instance, to accommodate for correlation within groups of
observations, as in~\citeasnoun{lin:zhan:1999}.
\end{itemize}
See the files
\href{http://otter-rsch.com/admbre/examples/union/union.html}{here}.
\subsection{Semi-parametric estimation of mean and variance}
\label{sec:lidar}
\index{nonparametric estimation!variance function}
\subsubsection{Model description}
An assumption underlying the ordinary regression
\[
y_i=a+bx_i+\varepsilon_i^{\prime}
\]
is that all observations have the same variance, i.e.,
Var$\left(\varepsilon_i^{\prime}\right) =\sigma^2$. This assumption does
not always hold, e.g., for the data shown in the upper panel of
Figure~\ref{fig:lidar}. This example is taken
from~\citeasnoun{rupp:wand:carr:2003}.
It is clear that the variance increases to the right (for large values of $x$).
It is also clear that the mean of $y$ is not a linear function of $x$. We thus
fit the model
\[
y_i=f(x_i)+\sigma (x_i)\varepsilon_i,
\]
where $\varepsilon_i\sim N(0,1),$ and $f(x)$ and $\sigma (x)$ are modelled
nonparametrically. We take $f$ to be a penalized spline. To ensure that $\sigma
(x)>0$, we model $\log \left[ \sigma (x)\right] $, rather than $\sigma (x)$, as
a spline function. For $f$, we use a cubic spline (20 knots) with a second-order
difference penalty%
\[
-\lambda^2\sum_{k=3}^{20}\left(u_j-2u_{j-1}+u_{j-2}\right)^2,
\]
while we take $\log \left[ \sigma (x)\right]$ to be a linear spline (20 knots)
with the first-order difference penalty (see equation~\ref{eqn:first_order}).
\subsubsection{Implementation details}
Details on how to implement spline components are given in Example
\ref{sec:gam}.
\begin{itemize}
\item Parameters associated with $f$ should be given ``phase~1'' in \scAB,
while those associated with $\sigma $ should be given ``phase~2.'' The reason
is that in order to estimate the variation, one first needs to have fitted the
mean part.
\item In order to estimate the variation function, one first needs to have
fitted the mean part. Parameters associated with $f$ should thus be given
``phase 1'' in \scAB, while those associated with $\sigma$ should be given
``phase 2.''
\end{itemize}
\begin{figure}[h]
\centering\hskip1pt
\includegraphics[width=4in]{lidar_fig.pdf}
\caption{\scLIDAR\ data (upper panel) used
by~\protect\citeasnoun{rupp:wand:carr:2003}, with fitted mean. Fitted
standard deviation is shown in the lower panel.}
\label{fig:lidar}
\end{figure}
See the files
\href{http://otter-rsch.com/admbre/examples/lidar/lidar.html}{here}.
\subsection{Weibull regression in survival analysis}
\subsubsection{Model description}
\label{sec:kidney_example}
A typical setting in survival analysis is that we observe the time point $t$ at
which the death of a patient occurs. Patients may leave the study (for some
reason) before they die. In this case, the survival time is said to be
``censored,'' and $t$ refers to the time point when the patient left the study.
The indicator variable $\delta$ is used to indicate whether $t$ refers to the
death of the patient ($\delta=1$) or to a censoring event ($\delta=0$). The key
quantity in modelling the probability distribution of $t$ is the hazard
function $h(t)$, which measures the instantaneous death rate at time $t$. We
also define the cumulative hazard function $\Lambda(t)=\int_0^t \,
h(s)\,\textrm{ds}$, implicitly assuming that the study started at time $t=0$.
The log-likelihood contribution from our patient is $\delta\log(h(t))-H(t)$. A
commonly used model for $h(t)$ is Cox's proportional hazard model, in which the
hazard rate for the $i^{\textrm{th}}$ patient is assumed to be on the form
\[
h_it = h_0(t)\exp(\eta_i\mathbf), \qquad i=1,\ldots n.
\]
Here, $h_0(t)$ is the ``baseline'' hazard function (common to all patients) and
$\eta_i=\mathbf{X}_i\mathbf{\beta}$, where $\mathbf{X}_i$ is a covariate vector
specific to the $i^{\textrm{th}}$ patient and $\mathbf{\beta}$ is a vector of
regression parameters. In this example, we shall assume that the baseline hazard
belongs to the Weibull family: $h_0(t)=rt^{r-1}$ for $r>0$.
In the collection of examples following the distribution of \scWinBUGS, this
model is used to analyse a data set on times to kidney infection for a set of
$n=38$ patients (see \textit{Kidney:\ Weibull regression with random effects} in
the Examples list at
\href{http://www.mrc-bsu.cam.ac.uk/bugs/examples/readme.shtml}%
{The Bugs Project}). The data set contains two observations per patient (the
time to first and second recurrence of infection). In addition, there are three
covariates: \emph{age} (continuous), \emph{sex} (dichotomous), and
\emph{type of disease} (categorical, four levels). There is also an
individual-specific random effect $u_i\sim N(0,\sigma^2)$. Thus, the linear
predictor becomes
\[
\eta_i = \beta_0 + \beta_\mathrm{sex} \cdot \mathrm{sex}_i +
\beta_\mathrm{age} \cdot \mathrm{age}_i +
\mathbf{\beta}_\mathrm{D}\,\mathbf{x}_i + u_i,
\]
where $\mathbf{\beta}_\mathrm{D}=(\beta_1,\beta_2,\beta_3)$ and $\mathbf{x}_i$
is a dummy vector coding for the disease type. Parameter estimates are shown in
Table~\ref{kidney-parameter-estimates}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lrrrrrrrr}
\hline
~ & $\beta_0$ & $\beta_\mathrm{age}$ & $\beta_1$
& $\beta_2$ & $\beta_3$ & $\beta_\mathrm{sex}$ & $r$ & $\sigma$\\
\hline\\[-16pt]
\scAR\ & $-4.3440$ & $ 0.0030$ & $0.1208$
& 0.6058 & $-1.1423$ & $-1.8767$ & $1.1624$ & $0.5617$\\
Std.\ dev. & $ 0.8720$ & $ 0.0137$ & $0.5008$
& 0.5011 & $ 0.7729$ & $ 0.4754$ & $0.1626$ & $0.2970$\\
BUGS & $-4.6000$ & $ 0.0030$ & $0.1329$
& 0.6444 & $-1.1680$ & $-1.9380$ & $1.2150$ & $0.6374$\\
Std.\ dev. & $ 0.8962$ & $ 0.0148$ & $0.5393$
& 0.5301 & $ 0.8335$ & $ 0.4854$ & $0.1623$ & $0.3570$\\
\hline
\end{tabular}
\end{center}
\caption{Parameter estimates for Weibull regression with random effects.}
\label{kidney-parameter-estimates}
\end{table}
See the files
\href{http://otter-rsch.com/admbre/examples/kidney/kidney.html}{here}.
\section{Block-diagonal Hessian}
This section contains models with grouped or nested random effects.
\subsection{Nonlinear mixed models: an \scNLME\ comparison}
\label{sec:orange}
\subsubsection{Model description}
The orange tree growth data was used by~\citeasnoun[Ch.~8.2]{pinh:bate:2000} to
illustrate how a logistic growth curve model with random effects can be fit with
the S-Plus function \texttt{nlme}. The data contain measurements made at seven
occasions for each of five orange trees. See Table~\ref{tab:orange-trees}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{ll}
$t_{ij}$
& Time when the $j^{\textrm{th}}$ measurement was made on tree $i$.\\
$y_{ij}$
& Trunk circumference of tree $i$ when measured at time $t_{ij}$. \\
\end{tabular}
\end{center}
\caption{Orange tree data.}
\label{tab:orange-trees}
\end{table}
\\The following logistic model is used:
\[
y_{ij}=\frac{\phi_1+u_i}{1+\exp \left[ -\left(t_{ij}-\phi_2\right)
/\phi_3\right]}+\varepsilon_{ij},
\]%
where $(\phi_1,\phi_2,\phi_3)$ are hyper-parameters,
$u_i\sim N(0,\sigma_u^2)$ is a random effect, and
$\varepsilon_{ij}\sim N(0,\sigma^2)$ is the residual noise term.
\subsubsection{Results}
Parameter estimates are shown in Table~\ref{tab:parameter-estimates}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lrrrrr}
\hline
~ & $\phi_1$ & $\phi_2$ & $\phi_3$ & $\sigma$ & $\sigma_u$\\
\hline\\[-16pt]
\scAR & 192.1 & 727.9 & 348.1 & 7.843 & 31.65\\
Std.\ dev. & 15.7 & 35.2 & 27.1 & 1.013 & 10.26\\
\texttt{nlme} & 191.0 & 722.6 & 344.2 & 7.846 & 31.48\\
\hline
\end{tabular}
\end{center}
\caption{Parameter estimates.}
\label{tab:parameter-estimates}
\end{table}
The difference between the estimates obtained with \scAR\ and \texttt{nlme} is
small. The difference is caused by the fact that the two approaches use
different approximations to the likelihood function. (\scAR\ uses the Laplace
approximation, and for \texttt{nlme}, the reader is referred to~\cite[Ch.
7]{pinh:bate:2000}.)
The computation time for \scAB\ was 0.58~seconds, while the computation time for
\texttt{nlme} (running under S-Plus~6.1) was 1.6~seconds.
See the files
\href{http://otter-rsch.com/admbre/examples/orange/orange.html}{here}.
\subsection{Pharmacokinetics: an \scNLME\ comparison}
\label{sec:pheno}
\subsubsection{Model description}
The ``one-compartment open model'' is commonly used in pharmacokinetics. It can
be described as follows. A patient receives a dose $D$ of some substance at
time $t_d$. The concentration $c_t$ at a later time point $t$ is governed by
the equation
\[
c_t=\tfrac{D}{V}\exp \left[ -\tfrac{Cl}{V}(t-t_d)\right]
\]
where $V$ and $Cl$ are parameters (the so-called ``Volume of concentration'' and
the ``Clearance''). Doses given at different time points contribute additively
to $c_t$. This model has been fitted to a data set using the S-Plus routine
\texttt{nlme}, see~\citeasnoun[Ch.~6.4]{pinh:bate:2000}, with the linear
predictor
\begin{align*}
\log\left(V\right) &=\beta_1+\beta_2W+u_V, \\
\log\left(Cl\right) &=\beta_3+\beta_4W+u_{Cl},
\end{align*}
where $W$ is a continuous covariate, while $u_V\sim N(0,\sigma_V^2)$ and
$u_{Cl}\sim N(0,\sigma_{Cl}^2)$ are random effects. The model specification is
completed by the requirement that the observed concentration $y$ in the patient
is related to the true concentration by $y=c_t+\varepsilon $, where $\varepsilon
\sim N(0,\sigma^2)$ is a measurement error term.
\subsubsection{Results}
Estimates of hyper-parameters are shown in Table~\ref{tab:hyper-estimates}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lrrrrrrr}
\hline
~ & $\beta_1$ & $\beta_2$ & $\beta_3$
& $\beta_4$ & $\sigma$ & $\sigma_V$ & $\sigma_{Cl}$\\
\hline\\[-17pt]
\scAR & $-5.99$ & $0.622$ & $-0.471$
& $0.532$ & $ 2.72$ & $0.171$ & $ 0.227$\\
Std.\ dev. & $ 0.13$ & $0.076$ & $ 0.067$
& $0.040$ & $ 0.23$ & $0.024$ & $ 0.054$\\
\texttt{nlme} & $-5.96$ & $0.620$ & $-0.485$
& $0.532$ & $ 2.73$ & $0.173$ & $ 0.216$\\
\hline
\end{tabular}
\end{center}
\caption{Hyper-parameter estimates: pharmacokinetics.}
\label{tab:hyper-estimates}
\end{table}
The differences between the estimates obtained with \scAR\ and \texttt{nlme} are
caused by the fact that the two methods use different approximations of the
likelihood function. \scAR\ uses the Laplace approximation, while the method
used by \texttt{nlme} is described in~\citeasnoun[Ch.~7]{pinh:bate:2000}.
The time taken to fit the model by \scAR\ was 17~seconds, while the computation
time for \texttt{nlme} (under S-Plus~6.1) was 7~seconds.
See the files
\href{http://otter-rsch.com/admbre/examples/pheno/pheno.html}{here}.
\subsection{Frequency weighting in \scAR}
\label{seq:frequency_example}
\subsubsection{Model description}
Let $X_i$ be binomially distributed with parameters $N=2$ and $p_i$, and
further assume that
\begin{equation}
p_i=\frac{\exp (\mu +u_i)}{1+\exp (\mu +u_i)},
\end{equation}%
where $\mu $ is a parameter and $u_i\sim N(0,\sigma^2)$ is a random effect.
Assuming independence, the log-likelihood function for the parameter $\theta
=(\mu,\sigma)$ can be written as
\begin{equation}
l(\theta)=\sum_{i=1}^n\log \bigl[\, p(x_i;\theta)\bigr] .
\end{equation}%
In \scAR, $p(x_i;\theta)$ is approximated using the Laplace approximation.
However, since $x_i$ only can take the values $0$, $1$, and $2$, we can
rewrite the log-likelihood as
\begin{equation}
l(\theta)=\sum_{j=0}^2n_j\log \bigl[\, p(j;\theta)\bigr], \label{l_w}
\end{equation}%
where $n_j$ is the number $x_i$ being equal to $j$. Still, the Laplace
approximation must be used to approximate $p(j;\theta)$, but now only for
$j=0,1,2$, as opposed to $j = 1,\dots n$, as above. For large $n$, this can give
large savings.
To implement the log-likelihood (\ref{l_w}) in \scAR, you must organize your
code into a \texttt{SEPARABLE\_FUNCTION} (see the section ``Nested models'' in
the \scAR\ manual). Then you should do the following:
\begin{itemize}
\item Formulate the objective function in the weighted form (\ref{l_w}).
\item Include the statement
\begin{lstlisting}
!! set_multinomial_weights(w);
\end{lstlisting}
in the \texttt{PARAMETER\_SECTION}. The variable \texttt{w} is a vector (with
indexes starting at~1) containing the weights, so in our case,
$w=(n_0,n_1,n_2)$.
\end{itemize}
See the files
\href{http://otter-rsch.com/admbre/examples/weights/weights.html}{here}.
\subsection{Ordinal-logistic regression}
\label{sec:socatt_example}
\subsubsection{Model description}
In this model, the response variable $y$ takes on values from the ordered set
$\{y^{(s)},s=1,\ldots,S-1\}$, where $y^{(1)}<y^{(2)}<\cdots<y^{(S)}$. For
$s=1,\ldots,S-1$, define $P_s=P(y\leq y^{(s)})$ and $\kappa_s=\log
[P_s/(1-P_s)]$. To allow $\kappa_s$ to depend on covariates specific to the
$i^{\textrm{th}}$ observation ($i=1,\ldots,n$), we introduce a
disturbance $\eta_i$ of $\kappa_s$:
\[
P(y_i\leq y^{(s)}) =
\frac{\exp(\kappa_s-\eta_i)}
{1+\exp(\kappa_s-\eta_i)}, \qquad s=1,\ldots,S-1.
\]
with
\[
\eta_i = \mathbf{X}_i\mathbf{\beta}+u_{j_i},
\]
where $\mathbf{X}_i$ and $\mathbf{\beta}$ play the sample role, as in earlier
examples. %Example 1-3
The $u_j$ ($j=1,\ldots,q$) are independent $N(0,\sigma^2)$ variables, and $j_i$
is the latent variable class of individual $i$.
See the files
\href{http://otter-rsch.com/admbre/examples/socatt/socatt.html}{here}.
\section{Banded Hessian (state-space)}
Here are some examples of state-space models.
\subsection{Stochastic volatility models in finance}
\subsubsection{Model description}
Stochastic volatility models are used in mathematical finance to describe the
evolution of asset returns, which typically exhibit changing variances over
time. As an illustration, we use a time series of daily pound/dollar exchange
rates $\{z_t\}$ from the period 01/10/81 to 28/6/85, previously analyzed
by~\citeasnoun{harv:ruiz:shep:1994}. The series of interest are the daily
mean-corrected returns $\{y_t\}$, given by the transformation
\[
y_t = \log z_t - \log z_{t-1} - n^{-1}\sum_{i=1}^n(\log z_t-\log z_{t-1}).
\]
The stochastic volatility model allows the variance of $y_t$ to vary smoothly
with time. This is achieved by assuming that $yt\sim N(\mu,\sigma_t^2)$, where
$\sigma_t^2=\exp(\mu_x+x_t)$. The smoothly varying component $x_t$ follows the
autoregression
\[
x_t = \beta x_{t-1} + \varepsilon_t, \qquad \varepsilon_t \sim N(0,\sigma^2).
\]
The vector of hyper-parameters is for this model is thus
$(\beta,\sigma,\mu,\mu_x)$.
See the files \href{http://otter-rsch.com/admbre/examples/sdv/sdv.html}{here}.
\subsection{A discrete valued time series: the polio data set}
\label{sec:sdv_example}
\subsubsection{Model description}
A time series of monthly numbers of poliomyelitis cases during the period
1970--1983 in the U.S.\ was analyzed by~\citeasnoun{zege:1988}. We make a
comparison to the performance of the Monte Carlo Newton-Raphson method, as
reported in~\citeasnoun{kuk:chen:1999}. We adopt their model formulation.
Let $y_i$ denote the number of polio cases in the $i^{\textrm{th}}$ period
$(i=1,\ldots,168)$. It is assumed that the distribution of $y_i$ is governed
by a latent stationary AR(1) process $\{u_i\}$ satisfying
\[
u_i = \rho u_{i-1} + \varepsilon_i,
\]
where the $\varepsilon_i\sim N(0,\sigma^2)$. % variables.
To account for trend and seasonality, the following covariate vector is
introduced:
\[
\mathbf{x}_i = \Bigg(
1,
\frac{i}{1000},
\cos\left(\frac{2\pi}{12}i\right),
\sin\left(\frac{2\pi}{12}i\right),
\cos\left(\frac{2\pi}{6}i\right),
\sin\left(\frac{2\pi}{6}i\right)
\Bigg).
\]
Conditionally on the latent process $\{u_i\}$, the counts $y_i$ are
independently Poisson distributed with intensity
\[
\lambda_i=\exp(\mathbf{x}_i{}'\mathbf{\beta}+u_i).
\]
\subsubsection{Results}
Estimates of hyper-parameters are shown in Table~\ref{tab:hyper-estimates-2}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{@{\vrule height 12pt depth 6pt width0pt} lrrrrrrrr}
\hline
~
& $\beta_1$ & $\beta_2$ & $\beta_3$ & $\beta_4$
& $\beta_5$ & $\beta_6$ & $\rho$ & $\sigma$ \\
\hline\\[-16pt]
\scAR\
& $0.242$ & $-3.81$ & $0.162$ & $-0.482$
& $0.413$ & $-0.0109$ & $0.627$ & $ 0.538$ \\
Std.\ dev.
& $0.270$ & $ 2.76$ & $0.150$ & $ 0.160$
& $0.130$ & $ 0.1300$ & $0.190$ & $ 0.150$ \\
Kuk \& Cheng~\citeasnoun{kuk:chen:1999}
& $0.244$ & $-3.82$ & $0.162$ & $-0.478$
& $0.413$ & $-0.0109$ & $0.665$ & $ 0.519$ \\
\hline
\end{tabular}
\end{center}
\caption{Hyper-parameter estimates: polio data set.}
\label{tab:hyper-estimates-2}
\end{table}
We note that % not
the standard deviation is large for several regression parameters. The \scAR\
estimates (which are based on the Laplace approximation) are very similar to the
exact maximum likelihood estimates, as obtained with the method
of~\citeasnoun{kuk:chen:1999}.
See the files
\href{http://otter-rsch.com/admbre/examples/polio/polio.html}{here}.
\section{Generally sparse Hessian}
\subsection{Multilevel Rasch model}
The multilevel Rasch model can be implemented using random effects in \scAB. As
an example, we use data on the responses of 2042~soldiers to a total of 19~items
(questions), taken from~\cite{doran2007estimating}.
This illustrates the use of crossed random effects in \scAB. Furthermore, it is
shown how the model easily can be generalized in \scAB. These more general
models cannot be fitted with standard \scGLMM\ software, such as ``lmer'' in~R.
See the files \href{http://admb-project.org/community/tutorials-and-examples/%
random-effects-example-collection/%
item-response-theory-irt-and-the-multilevel-rasch-model-1}{here}.
\chapter{Differences between \scAB\ and \scAR?}
\begin{itemize}
\item Profile likelihoods are now implemented also in random effects models,
but with the limitation that the \texttt{likeprof\_number} can only depend on
parameters, not random effects.
\item Certain functions, especially for matrix operations, have not been
implemented.
\item The assignment operator for \texttt{dvariable} behaves differently. The
code
\begin{lstlisting}
dvariable y = 1;
dvariable x = y;
\end{lstlisting}
will make \texttt{x} and \texttt{y} point to the same memory location (shallow
copy) in \scAR. Hence, changing the value of \texttt{x} automatically changes
\texttt{y}. Under \scAB, on the other hand, \texttt{x} and \texttt{y} will
refer to different memory locations (deep copy). If you want to perform a deep
copy in \scAR\ you should write:
\begin{lstlisting}
dvariable y = 1;
dvariable x;
x = y;
\end{lstlisting}
For vector and matrix objects \scAB\ and \scAR\ behave identically in that
a shallow copy is used.
\end{itemize}
\chapter{Command Line options}
\label{sec:command_line_options}
\index{command line options!\scAR-specific}
A list of command line options accepted by \scAB\ programs can be obtained using
the command line option \texttt{-?}, for instance,
\begin{lstlisting}
$ simple -?
\end{lstlisting}
Those options that are specific to \scAR\ are printed after line the ``Random
effects options if applicable.'' See Table~\ref{tab:command-line-options}.
\begin{table}[htbp]
\begin{center}
\begin{tabular*}{.95\textwidth}%
{@{\vrule height 14pt depth 10pt width0pt}@{\extracolsep{1em}} l
p{.8\textwidth}}
\hline
\textbf{Option}
& \textbf{Explanation}\\[-3pt]
\hline
\texttt{-nr N}
& maximum number of Newton-Raphson steps\\
\texttt{-imaxfn N}
& maximum number of evals in quasi-Newton inner optimization\\
\texttt{-is N}
& set importance sampling size to \texttt{N} for random effects\\
\texttt{-isf N}
& set importance sampling size funnel blocks to \texttt{N} for random
effects\\
\texttt{-isdiag}
& print importance sampling diagnostics\\
\texttt{-hybrid}
& do hybrid Monte Carlo version of \scMCMC\\
\texttt{-hbf}
& set the hybrid bounded flag for bounded parameters\\
\texttt{-hyeps}
& mean step size for hybrid Monte Carlo\\
\texttt{-hynstep}
& number of steps for hybrid Monte Carlo\\
\texttt{-noinit}
& do not initialize random effects before inner optimzation\\
\texttt{-ndi N}
& set maximum number of separable calls\\
\texttt{-ndb N}
& set number of blocks for derivatives for random effects (reduces
temporary file sizes)\\
\texttt{-ddnr}
& use high-precision Newton-Raphson for inner optimization for banded
\mbox{Hessian} case \textit{only}, even if implemented\\
\texttt{-nrdbg}
& verbose reporting for debugging Newton-Raphson\\
\texttt{-mm N}
& do minimax optimization\\
\texttt{-shess}
& use sparse Hessian structure inner optimzation\\
\hline
\texttt{-l1 N}
& set size of buffer \texttt{f1b2list1} to~\texttt{N}\\
\texttt{-l2 N}
& set size of buffer \texttt{f1b2list12} to~\texttt{N}\\
\texttt{-l3 N}
& set size of buffer \texttt{f1b2list13} to~\texttt{N}\\
\texttt{-nl1 N}
& set size of buffer \texttt{nf1b2list1} to~\texttt{N}\\
\texttt{-nl2 N}
& set size of buffer \texttt{nf1b2list12} to~\texttt{N}\\
\texttt{-nl3 N}
& set size of buffer \texttt{nf1b2list13} to~\texttt{N}\\\hline
\end{tabular*}
\end{center}
\caption{Command line options.}
\label{tab:command-line-options}
\end{table}
The options in the last section (the sections are separated by horizontal bars)
are not printed, but can still be used (see earlier).
\chapter{Quick References}\label{ch:05}
\label{sec:quick}
\section{Compiling \scAB\ programs}
\label{sec:compiling}
To compile \texttt{model.tpl} in a \textsc{DOS}/Linux terminal window, type
\begin{code}
admb [-r] [-s] model
\end{code}
where the options
\par
\begin{tabular}{@{\texttt} l l}
-r & is used to invoke the random effect module \\
-s & yields the ``safe'' version of the executable file\\
\end{tabular}
\medskip
\noindent There are two stages of compilation:
\begin{itemize}
\item Translate TPL to \cplus: \texttt{tpl2cpp} or \texttt{tpl2rem}
\item Build executable from \cplus\ code (using GCC, Visual \cplus, etc.)
\end{itemize}
\begin{center}
\includegraphics[width=11cm]{compiling-diagram}
\end{center}
\hskip-2pc\includegraphics[width=18cm]{ADMBprim.pdf}%17
\bibliographystyle{plain}
\bibliography{admbre}
\printindex
\end{document}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$include "op.mpl"
(* Xalpha *)
op_qab := 2.5654:
op_enhancement := xs -> 1:
|
(*
Copyright (C) 2017 M.A.L. Marques
Copyright (C) 2018 Susi Lehtola
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_x_lspbe_params *params;
assert(p->params != NULL);
params = (gga_x_lspbe_params * )(p->params);
*)
lspbe_f0 := s -> 1 + params_a_kappa*(1 - params_a_kappa/(params_a_kappa + params_a_mu*s^2))
- (params_a_kappa+1)*(1-exp(-params_a_alpha*s^2)):
lspbe_f := x -> lspbe_f0(X2S*x):
f := (rs, z, xt, xs0, xs1) -> gga_exchange(lspbe_f, rs, z, xs0, xs1):
|
using Test
using SparseArrays
using LinearAlgebra
using LinearAlgebraicRepresentation
using LinearAlgebraicRepresentation.Arrangement
Lar = LinearAlgebraicRepresentation
@testset "Edge fragmentation tests" begin
V = [2 2; 4 2; 3 3.5; 1 3; 5 3; 1 2; 5 2]
EV = SparseArrays.sparse(Array{Int8, 2}([
[1 1 0 0 0 0 0] #1->1,2
[0 1 1 0 0 0 0] #2->2,3
[1 0 1 0 0 0 0] #3->1,3
[0 0 0 1 1 0 0] #4->4,5
[0 0 0 0 0 1 1] #5->6,7
]))
@testset "intersect_edges" begin
inters1 = Lar.Arrangement.intersect_edges(V, EV[5, :], EV[1, :])
inters2 = Lar.Arrangement.intersect_edges(V, EV[1, :], EV[4, :])
inters3 = Lar.Arrangement.intersect_edges(V, EV[1, :], EV[2, :])
@test inters1 == [([2. 2.], 1/4),([4. 2.], 3/4)]
@test inters2 == []
@test inters3 == [([4. 2.], 1)]
end
# @testset "frag_edge" begin
# rV, rEV = Lar.Arrangement.frag_edge(V, EV, 5, [1,2,3,4,5])
# @test rV == [1.0 2.0; 5.0 2.0; 2.0 2.0; 4.0 2.0; 4.0 2.0; 2.0 2.0]
# @test Matrix(rEV) == [1 0 0 0 0 1;
# 0 0 0 0 1 1;
# 0 1 0 0 1 0]
# end
end
@testset "merge_vertices test set" begin
n0 = 1e-12
n1l = 1-1e-12
n1u = 1+1e-12
V = [ n0 n0; -n0 n0; n0 -n0; -n0 -n0;
n0 n1u; -n0 n1u; n0 n1l; -n0 n1l;
n1u n1u; n1l n1u; n1u n1l; n1l n1l;
n1u n0; n1l n0; n1u -n0; n1l -n0]
EV = Int8[1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0;
0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0;
0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0;
0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0;
0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0;
0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0;
0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0;
0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0;
0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0;
0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0;
0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0;
0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1;
1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0;
0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0;
0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0;
0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1]
EV = sparse(EV)
V, EV = Lar.Arrangement.merge_vertices!(V, EV, [])
@test V == [n0 n0; n0 n1u; n1u n1u; n1u n0]
@test Matrix(EV) == [1 1 0 0;
0 1 1 0;
0 0 1 1;
1 0 0 1]
end
@testset "biconnected_components test set" begin
EV = Int8[0 0 0 1 0 0 0 0 0 0 1 0; #1
0 0 1 0 0 1 0 0 0 0 0 0; #2
0 0 0 0 0 0 1 0 0 1 0 0; #3
1 0 0 0 1 0 0 0 0 0 0 0; #4
0 0 0 1 0 0 0 1 0 0 0 0; #5
0 0 1 0 0 0 0 0 1 0 0 0; #6
0 1 0 0 0 0 0 0 0 1 0 0; #7
0 0 0 0 1 0 0 0 0 0 0 1; #8
0 0 0 0 0 0 0 1 0 0 1 0; #9
0 0 0 0 0 1 0 0 1 0 0 0; #10
0 1 0 0 0 0 1 0 0 0 0 0; #11
0 0 0 0 1 0 0 0 0 0 1 0; #12
0 0 0 0 1 0 0 0 1 0 0 0] #13
EV = sparse(EV)
bc = Lar.Arrangement.biconnected_components(EV)
bc = Set(map(Set, bc))
@test bc == Set([Set([1,5,9]), Set([2,6,10]), Set([3,7,11])])
end
@testset "Face creation" begin
@testset "External cell individuation" begin
V = [ .5 .5; 1.5 1; 1.5 2;
2.5 2; 2.5 1; 3.5 .5;
3.5 3; 2 2.5; .5 3]
EV = Int8[-1 1 0 0 0 0 0 0 0;
0 -1 1 0 0 0 0 0 0;
0 0 -1 1 0 0 0 0 0;
0 0 0 -1 1 0 0 0 0;
0 0 0 0 -1 1 0 0 0;
0 0 0 0 0 -1 1 0 0;
0 0 0 0 0 0 -1 1 0;
0 0 0 0 0 0 0 -1 1;
-1 0 0 0 0 0 0 0 1;
0 -1 0 0 1 0 0 0 0]
EV = sparse(EV)
FE = Int8[ 0 -1 -1 -1 0 0 0 0 0 1;
1 1 1 1 1 1 1 1 -1 0;
-1 0 0 0 -1 -1 -1 -1 1 -1]
FE = sparse(FE)
@test Lar.Arrangement.get_external_cycle(V, EV, FE) == 3
end
@testset "Containment test" begin
V = [ 0 0; 4 0; 4 2; 2 4; 0 4;
.5 .5; 2.5 .5; 2.5 2.5; .5 2.5;
1 1; 1.5 1; 1 2;
2 1; 2 2; 1.5 2;
3.5 3.5; 3 3.5; 3.5 3]
EV1 = Int8[ 0 0 0 0 0 0 0 0 0 -1 1 0 0 0 0 0 0 0;
0 0 0 0 0 0 0 0 0 0 -1 1 0 0 0 0 0 0;
0 0 0 0 0 0 0 0 0 -1 0 1 0 0 0 0 0 0]
EV2 = Int8[ 0 0 0 0 0 0 0 0 0 0 0 0 -1 1 0 0 0 0;
0 0 0 0 0 0 0 0 0 0 0 0 0 -1 1 0 0 0;
0 0 0 0 0 0 0 0 0 0 0 0 -1 0 1 0 0 0]
EV3 = Int8[ 0 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 0 0 0;
0 0 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 0 0;
0 0 0 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 0;
0 0 0 0 0 -1 0 0 1 0 0 0 0 0 0 0 0 0]
EV4 = Int8[-1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
0 -1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
0 0 -1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0;
0 0 0 -1 1 0 0 0 0 0 0 0 0 0 0 0 0 0;
-1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]
EV5 = Int8[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -1 1 0;
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -1 1;
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -1 0 1]
EVs = map(sparse, [EV1, EV2, EV3, EV4, EV5])
shell1 = Int8[-1 -1 1];
shell2 = Int8[-1 -1 1];
shell3 = Int8[-1 -1 -1 1];
shell4 = Int8[-1 -1 -1 -1 1];
shell5 = Int8[-1 -1 1];
shells = map(sparsevec, [shell1, shell2, shell3, shell4, shell5])
shell_bboxes = []
n = 5
for i in 1:n
vs_indexes = (abs.(EVs[i]')*abs.(shells[i])).nzind
push!(shell_bboxes, Lar.bbox(V[vs_indexes, :]))
end
graph = Lar.Arrangement.pre_containment_test(shell_bboxes)
@test graph == [0 0 1 1 0; 0 0 1 1 0; 0 0 0 1 0; 0 0 0 0 0; 0 0 0 1 0]
graph = Lar.Arrangement.prune_containment_graph(n, V, EVs, shells, graph)
@test graph == [0 0 1 1 0; 0 0 1 1 0; 0 0 0 1 0; 0 0 0 0 0; 0 0 0 0 0]
end
@testset "Transitive reduction" begin
graph = [0 0 1 1 0; 0 0 1 1 0; 0 0 0 1 0; 0 0 0 0 0; 0 0 0 0 0]
Lar.Arrangement.transitive_reduction!(graph)
@test graph == [0 0 1 0 0; 0 0 1 0 0; 0 0 0 1 0; 0 0 0 0 0; 0 0 0 0 0]
end
@testset "Cell merging" begin
graph = [0 1; 0 0]
V = [.25 .25; .75 .25; .75 .75; .25 .75;
0 0; 1 0; 1 1; 0 1]
EV1 = Int8[-1 1 0 0 0 0 0 0;
0 -1 1 0 0 0 0 0;
0 0 -1 1 0 0 0 0;
-1 0 0 1 0 0 0 0;
-1 0 1 0 0 0 0 0]
EV2 = Int8[ 0 0 0 0 -1 1 0 0;
0 0 0 0 0 -1 1 0;
0 0 0 0 0 0 -1 1;
0 0 0 0 -1 0 0 1]
EVs = map(sparse, [EV1, EV2])
shell1 = Int8[-1 -1 -1 1 0]
shell2 = Int8[-1 -1 -1 1]
shells = map(sparsevec, [shell1, shell2])
boundary1 = Int8[ 1 1 0 0 -1;
0 0 1 -1 1]
boundary2 = Int8[ 1 1 1 -1]
boundaries = map(sparse, [boundary1, boundary2])
shell_bboxes = []
n = 2
for i in 1:n
vs_indexes = (abs.(EVs[i]')*abs.(shells[i])).nzind
push!(shell_bboxes, Lar.bbox(V[vs_indexes, :]))
end
EV, FE = Lar.Arrangement.cell_merging(2, graph, V, EVs, boundaries, shells, shell_bboxes)
selector = sparse(ones(Int8, 1, 3))
@test selector*FE == [0 0 0 0 0 1 1 1 -1]
end
end |
Formal statement is: lemma filterlim_inverse_at_right_top: "LIM x at_top. inverse x :> at_right (0::real)" Informal statement is: The limit of the sequence $1/x$ as $x$ approaches infinity is $0$ from the right. |
! Copyright (c) 2006-2011 IDRIS/CNRS
! Author: Philippe Wautelet (IDRIS/CNRS), [email protected]
! Distributed under the CeCILL 2.0 license. For full terms see the file LICENSE.
!TODO: use pack/unpack!
#ifdef IOPROC
#ifndef WITHOUTMPI
subroutine backup_amr_send
use amr_commons
use hydro_commons
use io_parameters
use pm_commons
use timer
use mpi
implicit none
integer::idx,ierr
integer,parameter::tag=TAG_BAK_AMR
integer::ilevel,ibound,ncache,istart,i,igrid,idim,ind,iskip
integer,allocatable,dimension(:)::ind_grid,iig
real(dp),allocatable,dimension(:)::xdp
if(verbose)write(*,*)'Entering backup_amr_send'
!-----------------------------------
! Output restart dump file = amr.bak
!-----------------------------------
call start_timer()
allocate(iig(19+(3*(ncpu+nboundary)+10)*nlevelmax+3*ncoarse))
allocate(xdp(19+2*noutput+ncpu))
iig(1)=ncpu
iig(2)=ndim
iig(3)=nx;iig(4)=ny;iig(5)=nz
iig(6)=nlevelmax
iig(7)=ngridmax
iig(11)=nboundary
iig(14)=ngrid_current
iig(8)=noutput
iig(9)=iout
iig(10)=ifout
iig(12)=nstep;iig(13)=nstep_coarse
iig(15)=headf;iig(16)=tailf;iig(17)=numbf;iig(18)=used_mem;iig(19)=used_mem_tot
iskip=0
do i=1,nlevelmax
iig(iskip+20:iskip+19+ncpu)=headl(:,i)
iig(iskip+20+ ncpu*nlevelmax:iskip+19+ncpu*(nlevelmax+1))=taill(:,i)
iig(iskip+20+2*ncpu*nlevelmax:iskip+19+2*ncpu*nlevelmax+ncpu)=numbl(:,i)
iig((i-1)*10+20+3*ncpu*nlevelmax:i*10+19+3*ncpu*nlevelmax)=numbtot(:,i)
iskip=iskip+ncpu
end do
idx=20+(3*ncpu+10)*nlevelmax
if(simple_boundary)then
iskip=0
do i=1,nlevelmax
idx=20+(3*ncpu+10)*nlevelmax
iig(iskip+idx:iskip+idx-1+nboundary)=headb(:,i);idx=idx+nboundary*nlevelmax
iig(iskip+idx:iskip+idx-1+nboundary)=tailb(:,i);idx=idx+nboundary*nlevelmax
iig(iskip+idx:iskip+idx-1+nboundary)=numbb(:,i);idx=idx+nboundary*nlevelmax
iskip=iskip+nboundary
end do
end if
iig(idx:idx-1+ncoarse)=son(1:ncoarse) ;idx=idx+ncoarse
iig(idx:idx-1+ncoarse)=flag1(1:ncoarse) ;idx=idx+ncoarse
iig(idx:idx-1+ncoarse)=cpu_map(1:ncoarse)
xdp(1)=boxlen
xdp(2)=t
xdp(3)=const;xdp(4)=mass_tot_0;xdp(5)=rho_tot
xdp(6)=omega_m;xdp(7)=omega_l;xdp(8)=omega_k;xdp(9)=omega_b
xdp(10)=h0;xdp(11)=aexp_ini;xdp(12)=boxlen_ini
xdp(13)=aexp;xdp(14)=hexp;xdp(15)=aexp_old;xdp(16)=epot_tot_int;xdp(17)=epot_tot_old
xdp(18)=mass_sph
xdp(19:18+noutput)=tout(1:noutput)
xdp(19+noutput:18+2*noutput)=aout(1:noutput)
if(ordering=='bisection') then
else
xdp(19+2*noutput:19+2*noutput+ncpu)=bound_key(0:ncpu)
endif
idx = 19+(3*(ncpu+nboundary)+10)*nlevelmax+3*ncoarse
call MPI_SEND(iig,size(iig),MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(xdp,size(xdp),MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
deallocate(iig,xdp)
call MPI_SEND(dtold,nlevelmax,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(dtnew,nlevelmax,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
! Write cpu boundaries
if(ordering=='bisection') then
call MPI_SEND(bisec_wall,nbinodes,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_next,nbinodes,MPI_INTEGER, 0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_indx,2*nbinodes,MPI_INTEGER, 0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_cpubox_min,ncpu*ndim,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_cpubox_max,ncpu*ndim,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
else
!Send already done (bound_key)
endif
! Write fine levels
do ilevel=1,nlevelmax
do ibound=1,nboundary+ncpu
if(ibound<=ncpu)then
ncache=numbl(ibound,ilevel)
istart=headl(ibound,ilevel)
else
ncache=numbb(ibound-ncpu,ilevel)
istart=headb(ibound-ncpu,ilevel)
end if
if(ncache>0)then
allocate(ind_grid(1:ncache),xdp(1:ncache),iig(1:ncache))
! Write grid index
igrid=istart
do i=1,ncache
ind_grid(i)=igrid
igrid=next(igrid)
end do
call MPI_SEND(ind_grid,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
! Write next index
do i=1,ncache
iig(i)=next(ind_grid(i))
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
! Write prev index
do i=1,ncache
iig(i)=prev(ind_grid(i))
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
! Write grid center
do idim=1,ndim
do i=1,ncache
xdp(i)=xg(ind_grid(i),idim)
end do
call MPI_SEND(xdp,ncache,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
end do
! Write father index
do i=1,ncache
iig(i)=father(ind_grid(i))
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
! Write nbor index
do ind=1,twondim
do i=1,ncache
iig(i)=nbor(ind_grid(i),ind)
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
end do
! Write son index
do ind=1,twotondim
iskip=ncoarse+(ind-1)*ngridmax
do i=1,ncache
iig(i)=son(ind_grid(i)+iskip)
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
end do
! Write cpu map
do ind=1,twotondim
iskip=ncoarse+(ind-1)*ngridmax
do i=1,ncache
iig(i)=cpu_map(ind_grid(i)+iskip)
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
end do
! Write refinement map
do ind=1,twotondim
iskip=ncoarse+(ind-1)*ngridmax
do i=1,ncache
iig(i)=flag1(ind_grid(i)+iskip)
end do
call MPI_SEND(iig,ncache,MPI_INTEGER,0,tag,MPI_COMM_IOGROUP,ierr)
end do
deallocate(xdp,iig,ind_grid)
end if
end do
end do
call stop_timer('AMR I/O processes backup',writing=.true.)
end subroutine backup_amr_send
subroutine backup_amr_recv
use amr_commons
use hydro_commons
use io_commons
use pm_commons
use mpi
implicit none
integer::count,i,idx,ierr,src
integer,parameter::tag=TAG_BAK_AMR
integer,dimension(MPI_STATUS_SIZE)::status
integer::ilun
integer::ilevel,ibound,ncache,idim,ind,iskip
integer,allocatable,dimension(:)::bisec_int,ind_grid,iig
logical,allocatable,dimension(:)::list_recv
real(dp),allocatable,dimension(:)::bisec_dp,xdp
character(LEN=5)::cpuchar,iochar,nchar
character(LEN=MAXLINE)::filename
if(verbose)write(*,*)'Entering backup_amr_recv'
allocate(list_recv(ncpu_iogroup-1))
list_recv(:)=.false.
count=0
do while(count<ncpu_iogroup-1)
! Allocate receive buffers
idx=19+(3*(ncpu+nboundary)+10)*nlevelmax+3*ncoarse
allocate(iig(idx))
allocate(xdp(19+2*noutput+ncpu))
! Select a source
call MPI_RECV(iig,idx,MPI_INTEGER,MPI_ANY_SOURCE,tag,MPI_COMM_IOGROUP,status,ierr)
src=status(MPI_SOURCE)
if(list_recv(src).EQV..false.)then
list_recv(src)=.true.
else
print *,'Error: unexpected message received by ',myid_world
call MPI_ABORT(MPI_COMM_WORLD,1,ierr)
end if
call MPI_RECV(xdp,19+2*noutput+ncpu,MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
! Generate filename
ilun=myid_world+10
call title(count_bak,nchar)
call title(iogroup2comp(src),cpuchar)
call title(myid_io,iochar)
filename='ionode_'//TRIM(iochar)//'/process_'//TRIM(cpuchar)//'/'
filename=TRIM(filename)//'amr_'//TRIM(nchar)//'.out'
filename=TRIM(filename)//TRIM(cpuchar)
nbfiles=nbfiles+1
filelist(nbfiles)=trim(filename)
open(unit=ilun,file=trim(scratchdir)//trim(filename),status="replace",form="unformatted",action="write",iostat=ierr)
if(ierr/=0)then
print *,'Error: open file failed in backup_amr_recv'
call MPI_ABORT(MPI_COMM_WORLD,1,ierr)
end if
!-----------------------------------
! Output restart dump file = amr.bak
!-----------------------------------
! Write grid variables
write(ilun)iig(1) !ncpu
write(ilun)iig(2) !ndim
write(ilun)iig(3),iig(4),iig(5) !nx,ny,nz
write(ilun)iig(6) !nlevelmax
write(ilun)iig(7) !ngridmax
write(ilun)iig(11) !nboundary
write(ilun)iig(14) !ngrid_current
write(ilun)xdp(1) !boxlen
! Write time variables
write(ilun)iig(8),iig(9),iig(10) !noutput,iout,ifout
write(ilun)xdp(19:18+noutput) !tout
write(ilun)xdp(19+noutput:18+2*noutput) !aout
write(ilun)xdp(2) !t
call MPI_RECV(dtold,iig(6),MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)dtold(1:nlevelmax)
call MPI_RECV(dtnew,iig(6),MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)dtnew(1:nlevelmax)
write(ilun)iig(12),iig(13) !nstep,nstep_coarse
write(ilun)xdp(3),xdp(4),xdp(5) !const,mass_tot_0,rho_tot
!omega_m,omega_l,omega_k,omega_b,h0,aexp_ini,boxlen_ini
write(ilun)xdp(6),xdp(7),xdp(8),xdp(9),xdp(10),xdp(11),xdp(12)
write(ilun)xdp(13),xdp(14),xdp(15),xdp(16),xdp(17) !aexp,hexp,aexp_old,epot_tot_int,epot_tot_old
write(ilun)xdp(18) !mass_sph
! Write levels variables
write(ilun)iig(20:19+ncpu*nlevelmax) !headl
write(ilun)iig(20+ ncpu*nlevelmax:19+2*ncpu*nlevelmax) !taill
write(ilun)iig(20+2*ncpu*nlevelmax:19+3*ncpu*nlevelmax) !numbl
iskip=0
do i=1,nlevelmax
idx=20+2*ncpu*nlevelmax
numblio(1:ncpu,i,src)=iig(iskip+idx:iskip+idx-1+ncpu)
iskip=iskip+ncpu
end do
write(ilun)iig(20+3*ncpu*nlevelmax:19+(3*ncpu+10)*nlevelmax) !numbtot
idx=20+(3*ncpu+10)*nlevelmax
! Write boundary linked list
if(simple_boundary)then
write(ilun)iig(idx:idx-1+nboundary*nlevelmax);idx=idx+nboundary*nlevelmax !headb
write(ilun)iig(idx:idx-1+nboundary*nlevelmax);idx=idx+nboundary*nlevelmax !tailb
write(ilun)iig(idx:idx-1+nboundary*nlevelmax) !numbb
iskip=0
do i=1,nlevelmax
numbb(1:nboundary,i)=iig(iskip+idx:iskip+idx-1+nboundary)
iskip=iskip+nboundary
end do
end if
! Write free memory
write(ilun)iig(15),iig(16),iig(17),iig(18),iig(19) !headf,tailf,numbf,used_mem,used_mem_tot
! Write cpu boundaries
write(ilun)ordering
if(ordering=='bisection') then
allocate(bisec_int(nbinodes*2),bisec_dp(max(nbinodes,ncpu*ndim)))
call MPI_RECV(bisec_dp,nbinodes,MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)bisec_dp(1:nbinodes) !bisec_wall(1:nbinodes)
call MPI_RECV(bisec_int,2*nbinodes,MPI_INTEGER,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)bisec_int(1:2*nbinodes) !bisec_next(1:nbinodes,1:2)
call MPI_RECV(bisec_int,nbinodes,MPI_INTEGER,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)bisec_int(1:nbinodes) !bisec_indx(1:nbinodes)
call MPI_RECV(bisec_dp,ncpu*ndim,MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)bisec_dp(1:ncpu*ndim) !bisec_cpubox_min(1:ncpu,1:ndim)
call MPI_RECV(bisec_dp,ncpu*ndim,MPI_DOUBLE_PRECISION,src,tag, &
MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)bisec_dp(1:ncpu*ndim) !bisec_cpubox_max(1:ncpu,1:ndim)
deallocate(bisec_int,bisec_dp)
else
write(ilun)xdp(19+2*noutput:19+2*noutput+ncpu) !bound_key
endif
! Write coarse level
write(ilun)iig(idx:idx-1+ncoarse);idx=idx+ncoarse !son
write(ilun)iig(idx:idx-1+ncoarse);idx=idx+ncoarse !flag1
write(ilun)iig(idx:idx-1+ncoarse) !cpu_map
deallocate(iig,xdp)
! Write fine levels
do ilevel=1,nlevelmax
do ibound=1,nboundary+ncpu
if(ibound<=ncpu)then
ncache=numblio(ibound,ilevel,src)
else
ncache=numbb(ibound-ncpu,ilevel)
end if
if(ncache>0)then
allocate(ind_grid(1:ncache),xdp(1:ncache),iig(1:ncache))
! Write grid index
call MPI_RECV(ind_grid,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)ind_grid
! Write next index
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
! Write prev index
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
! Write grid center
do idim=1,ndim
call MPI_RECV(xdp,ncache,MPI_DOUBLE_PRECISION,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)xdp
end do
! Write father index
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
! Write nbor index
do ind=1,twondim
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
end do
! Write son index
do ind=1,twotondim
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
end do
! Write cpu map
do ind=1,twotondim
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
end do
! Write refinement map
do ind=1,twotondim
call MPI_RECV(iig,ncache,MPI_INTEGER,src,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun)iig
end do
deallocate(xdp,iig,ind_grid)
end if
end do
end do
close(ilun)
count=count+1
end do
deallocate(list_recv)
end subroutine backup_amr_recv
subroutine output_info_send
use amr_commons
use hydro_commons
use io_parameters
use pm_commons
use mpi
implicit none
integer,parameter::tag=TAG_OUT_INF
integer::nx_loc,ny_loc,nz_loc,ierr
real(dp)::scale
real(dp)::scale_nH,scale_T2,scale_l,scale_d,scale_t,scale_v
real(kind=8),dimension(13)::msg
if(verbose)write(*,*)'Entering output_info_send'
! Conversion factor from user units to cgs units
call units(scale_l,scale_t,scale_d,scale_v,scale_nH,scale_T2)
! Local constants
nx_loc=nx; ny_loc=ny; nz_loc=nz
if(ndim>0)nx_loc=(icoarse_max-icoarse_min+1)
if(ndim>1)ny_loc=(jcoarse_max-jcoarse_min+1)
if(ndim>2)nz_loc=(kcoarse_max-kcoarse_min+1)
scale=boxlen/dble(nx_loc)
msg(1) = scale
msg(2) = t
msg(3) = aexp
msg(4) = omega_m
msg(5) = omega_l
msg(6) = omega_k
msg(7) = omega_b
msg(8) = scale_l
msg(9) = scale_d
msg(10) = scale_t
msg(11) = h0
msg(12) = nstep_coarse+0.1 !Not very clean but useful (one less message)
msg(13) = ndomain
call MPI_SEND(msg,size(msg),MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
if(ordering=='bisection') then
call MPI_SEND(bisec_cpubox_min,ncpu*ndim,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_cpubox_max,ncpu*ndim,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
call MPI_SEND(bisec_cpu_load,ncpu, MPI_INTEGER, 0,tag,MPI_COMM_IOGROUP,ierr)
else
call MPI_SEND(bound_key(0:ncpu),ncpu+1,MPI_DOUBLE_PRECISION,0,tag,MPI_COMM_IOGROUP,ierr)
endif
end subroutine output_info_send
subroutine output_info_recv
use amr_commons
use hydro_commons
use io_commons
use pm_commons
use mpi
implicit none
integer::ilun,icpu,ierr,idom
integer,parameter::tag=TAG_OUT_INF
character(LEN=MAXLINE)::filename
character(LEN=5)::nchar,iochar
real(kind=8),dimension(13)::msg
if(verbose)write(*,*)'Entering output_info_recv'
call MPI_RECV(msg,size(msg),MPI_DOUBLE_PRECISION,1,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
ndomain = msg(13)
! Generate filename
ilun=100
call title(count_bak,nchar)
call title(myid_io,iochar)
filename='ionode_'//TRIM(iochar)//'/process_00001/'
filename=TRIM(filename)//'info_'//TRIM(nchar)//'.txt'
nbfiles=nbfiles+1
filelist(nbfiles)=trim(filename)
open(unit=ilun,file=trim(scratchdir)//trim(filename),status="replace",form="formatted",action="write",iostat=ierr)
if(ierr/=0)then
print *,'Error: open file failed in output_info_recv'
print *,'filename=',trim(filename)
call MPI_ABORT(MPI_COMM_WORLD,1,ierr)
end if
! Write run parameters
write(ilun,'("ncpu =",I11)')ncpu
write(ilun,'("ndim =",I11)')ndim
write(ilun,'("levelmin =",I11)')levelmin
write(ilun,'("levelmax =",I11)')nlevelmax
write(ilun,'("ngridmax =",I11)')ngridmax
write(ilun,'("nstep_coarse=",I11)')int(msg(12)) !nstep_coarse
write(ilun,*)
! Write physical parameters
write(ilun,'("boxlen =",E23.15)')msg(1) !scale
write(ilun,'("time =",E23.15)')msg(2) !t
write(ilun,'("aexp =",E23.15)')msg(3) !aexp
write(ilun,'("H0 =",E23.15)')msg(11) !h0
write(ilun,'("omega_m =",E23.15)')msg(4) !omega_m
write(ilun,'("omega_l =",E23.15)')msg(5) !omega_l
write(ilun,'("omega_k =",E23.15)')msg(6) !omega_k
write(ilun,'("omega_b =",E23.15)')msg(7) !omega_b
write(ilun,'("unit_l =",E23.15)')msg(8) !scale_l
write(ilun,'("unit_d =",E23.15)')msg(9) !scale_d
write(ilun,'("unit_t =",E23.15)')msg(10) !scale_t
write(ilun,*)
! Write ordering information
write(ilun,'("ordering type=",A80)')ordering
if(ordering=='bisection') then
allocate(bisec_cpubox_min(ncpu,ndim))
allocate(bisec_cpubox_max(ncpu,ndim))
allocate(bisec_cpu_load(ncpu))
call MPI_RECV(bisec_cpubox_min,ncpu*ndim,MPI_DOUBLE_PRECISION,1,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
call MPI_RECV(bisec_cpubox_max,ncpu*ndim,MPI_DOUBLE_PRECISION,1,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
call MPI_RECV(bisec_cpu_load,ncpu,MPI_INTEGER,1,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
do icpu=1,ncpu
! write 2*ndim floats for cpu bound box
write(ilun,'(E23.15)')bisec_cpubox_min(icpu,:),bisec_cpubox_max(icpu,:)
! write 1 float for cpu load
write(ilun,'(E23.15)')dble(bisec_cpu_load(icpu))
end do
deallocate(bisec_cpubox_min,bisec_cpubox_max,bisec_cpu_load)
else
allocate(bound_key(0:ncpu))
call MPI_RECV(bound_key(0:ncpu),ncpu+1,MPI_DOUBLE_PRECISION,1,tag,MPI_COMM_IOGROUP,MPI_STATUS_IGNORE,ierr)
write(ilun,'(" DOMAIN ind_min ind_max")')
do idom=1,ndomain
write(ilun,'(I8,1X,E23.15,1X,E23.15)')idom,bound_key(idom-1),bound_key(idom)
end do
deallocate(bound_key)
endif
close(ilun)
end subroutine output_info_recv
#endif
#endif
|
lemma nonzero_norm_inverse: "a \<noteq> 0 \<Longrightarrow> norm (inverse a) = inverse (norm a)" for a :: "'a::real_normed_div_algebra" |
Require Export Relations Wellfounded.
Require Import Sat.
Require Import ZF ZFcoc ZFuniv_real.
Require Import ZFlambda.
Require Import Models SnModels.
Require GenRealSN.
Set Implicit Arguments.
(** Strong normalization proof of the Calculus of Constructions.
It is based on GenRealSN, so it does support strong eliminations.
Inhabitation of all types is obtained by adding the empty set in every
type (cf ZFuniv_real). The product is interpreted by the set of *partial*
functions.
*)
Module SN := GenRealSN.MakeModel CC_Real.
Export SN.
Hint Unfold inX.
Existing Instance in_ext.
(** Derived properties *)
Notation daimont := Sat.SatSet.daimon.
Lemma val_ok_cons_default e T i j :
val_ok e i j ->
T <> kind ->
val_ok (T::e) (V.cons empty i) (I.cons daimont j).
intros.
apply vcons_add_var; trivial.
split.
red; auto.
apply varSAT.
Qed.
Lemma El_int_prod U V i :
El (int (Prod U V) i) == cc_prod (El (int U i)) (fun x => El (int V (V.cons x i))).
simpl.
apply El_prod.
do 2 red; intros.
rewrite H0; reflexivity.
Qed.
Lemma El_int_arr U V i :
El (int (Prod U (lift 1 V)) i) == cc_arr (El (int U i)) (El (int V i)).
rewrite El_int_prod.
apply cc_prod_morph; auto with *.
red; intros.
rewrite int_cons_lift_eq; reflexivity.
Qed.
Lemma Real_int_prod U V i f :
f ∈ cc_prod (El (int U i)) (fun x => El (int V (V.cons x i))) ->
eqSAT (Real (int (Prod U V) i) f)
(piSAT (int U i) (fun x => int V (V.cons x i)) (cc_app f)).
simpl; intros.
apply Real_prod.
do 2 red; intros.
rewrite H1; reflexivity.
change (f ∈ El (int (Prod U V) i)).
rewrite El_int_prod; trivial.
Qed.
Lemma Real_int_arr U V i f :
f ∈ cc_arr (El (int U i)) (El (int V i)) ->
eqSAT (Real (int (Prod U (lift 1 V)) i) f)
(piSAT (int U i) (fun _ => int V i) (cc_app f)).
intros.
rewrite Real_int_prod.
apply piSAT_morph; auto with *.
red; intros.
apply int_cons_lift_eq.
red; intros; apply cc_app_morph; auto with *.
revert H; apply eq_elim; apply cc_prod_ext; auto with *.
red; intros.
symmetry; apply El_morph; apply int_cons_lift_eq.
Qed.
Lemma kind_ok_trivial T : kind_ok T.
exists nil.
exists T; simpl; auto with *.
exists empty; auto with *.
Qed.
Hint Resolve kind_ok_trivial.
(** ** Extendability *)
Definition cst (x:set) : term.
(* begin show *)
left; exists (fun _ =>x) (fun _ =>Lambda.K).
(* end show *)
do 2 red; reflexivity.
do 2 red; reflexivity.
red; reflexivity.
red; reflexivity.
Defined.
Definition mkSET (x:set) := cst (mkTY x (fun _ => snSAT)).
Lemma mkSET_kind e x :
typ e (mkSET x) kind.
red; intros.
split;[discriminate|].
split; trivial.
apply Lambda.sn_K.
Qed.
Lemma cst_typ e x y :
in_set x y ->
typ e (cst x) (mkSET y).
red; intros.
apply in_int_intro; intros; try discriminate.
apply and_split; intros.
simpl.
red; rewrite El_def.
apply union2_intro2; trivial.
simpl.
rewrite Real_def.
apply Lambda.sn_K.
reflexivity.
apply union2_intro2; trivial.
Qed.
Lemma cst_eq_typ e x y :
x == y ->
eq_typ e (cst x) (cst y).
red; simpl; intros; trivial.
Qed.
Lemma cst_eq_typ_inv x y :
eq_typ nil (cst x) (cst y) ->
x == y.
intros.
assert (val_ok nil (V.nil empty) (I.nil Lambda.K)).
red; intros.
destruct n; inversion H0.
apply H in H0.
simpl in H0; trivial.
Qed.
Lemma mkSET_eq_typ e x y :
x == y ->
eq_typ e (mkSET x) (mkSET y).
red; simpl; intros; trivial.
apply mkTY_ext; auto with *.
Qed.
Lemma mkSET_eq_typ_inv x y :
eq_typ nil (mkSET x) (mkSET y) ->
x == y.
intros.
assert (val_ok nil (V.nil empty) (I.nil Lambda.K)).
red; intros.
destruct n; inversion H0.
apply H in H0.
simpl in H0.
apply couple_injection in H0; destruct H0; trivial.
Qed.
Definition sub_typ_covariant : forall e U1 U2 V1 V2,
U1 <> kind ->
eq_typ e U1 U2 ->
sub_typ (U1::e) V1 V2 ->
sub_typ e (Prod U1 V1) (Prod U2 V2).
intros.
apply sub_typ_covariant; trivial.
unfold eqX, inX; intros.
rewrite El_prod in H3; trivial.
apply cc_eta_eq in H3; trivial.
Qed.
(** ** Choice *)
(*Require Import ZFcoc SATtypes.
Module Lc:=Lambda.
Definition Ch (X:term) : term.
(* begin show *)
left;
exists (fun i => mkTY (trunc (El(int X i)))
(fun _ => depSAT(fun Y=>forall x,x ∈El(int X i)->
inclSAT(cartSAT(Real (int X i) x)unitSAT) Y)
(fun Y=>Y)))
(fun j => tm X j).
(* end show *)
do 2 red; intros.
apply mkTY_ext; intros.
rewrite H; auto with *.
apply interSAT_morph_subset; simpl; intros; auto with *.
apply fa_morph; intros z.
rewrite H; reflexivity.
do 2 red; intros; apply tm_morph; auto with *.
red; intros; apply tm_liftable.
red; intros; apply tm_substitutive.
Defined.
Definition ChI (W:term) : term.
(* begin show *)
left; exists (fun i => empty) (fun j => COUPLE (tm W j) ID).
(* end show *)
do 2 red; intros; reflexivity.
do 2 red; intros.
f_equal; trivial.
apply tm_morph; auto with *.
(**)
red; intros.
unfold COUPLE; simpl.
rewrite tm_liftable.
rewrite Lc.permute_lift; reflexivity.
(**)
red; intros.
unfold COUPLE; simpl.
rewrite tm_substitutive.
rewrite Lc.commut_lift_subst; reflexivity.
Defined.
Lemma ChI_typ e X W :
X <> kind ->
typ e W X ->
typ e (ChI W) (Ch X).
unfold typ.
intros Xnk tyW; intros.
apply in_int_intro; try discriminate.
apply and_split; simpl; intros.
red; auto.
red in H0; rewrite El_def in H0.
specialize tyW with (1:=H).
apply in_int_not_kind in tyW; trivial.
destruct tyW.
rewrite Real_def; intros; auto.
2:apply interSAT_morph_subset; simpl; auto with *.
apply interSAT_intro.
exists snSAT; intros.
red; intros; apply snSAT_intro.
apply sat_sn in H4; trivial.
intros (Y,?); simpl.
apply i0 with (int W i); trivial.
apply cartSAT_intro; trivial.
apply ID_intro.
Qed.
Definition ChE (X C:term) : term.
left; exists (fun i => ZFrepl.uchoice (fun x => x ∈ Elt (int X i)))
(fun j => Lc.App (tm C j) (Lc.Abs (Lc.Abs (Lc.Ref 1)))).
admit.
admit.
admit.
admit.
Defined.
Lemma ChE_typ e W X :
X <> kind ->
typ e W (Ch X) ->
typ e (ChE X W) X.
unfold typ; intros Xnk tyW i j valok.
specialize tyW with (1:=valok).
apply in_int_not_kind in tyW;[|discriminate].
destruct tyW as (tyW,satW).
red in tyW; simpl in tyW; rewrite El_def in tyW.
simpl in satW; rewrite Real_def in satW; trivial.
apply in_int_intro; trivial; try discriminate.
apply and_split; intros.
red; simpl.
apply cc_bot_intro.
apply ZFrepl.uchoice_def.
split.
intros.
rewrite <- H; trivial.
split; intros.
admit.
admit.
simpl.
red in H; simpl in H.
set (w:=ZFrepl.uchoice (fun x => x ∈ Elt(int X i))) in *.
clearbody w.
apply depSAT_elim' in satW.
red in satW.
eapply cartSAT_case with (X:=Real(int X i) w) (Y:=unitSAT).
apply satW; intros.
intros ? h; apply h.
reflexivity.
eexact (fun _ h => h).
assert (inSAT (tm W j) ; rewrite El_def in H.
split.
(** ** Unique choice *)
Definition Tr (X:term) : term.
(* begin show *)
left;
exists (fun i => mkTY (ZFcoc.trunc (El(int X i)))
(fun _ => interSAT(fun Y:{Y|forall x,x ∈El(int X i)->
inclSAT(Real (int X i) x) Y}=>proj1_sig Y)))
(fun j => tm X j).
(* end show *)
do 2 red; intros.
apply mkTY_ext; intros.
rewrite H; auto with *.
apply interSAT_morph_subset; simpl; intros; auto with *.
apply fa_morph; intros z.
rewrite H; reflexivity.
do 2 red; intros; apply tm_morph; auto with *.
red; intros; apply tm_liftable.
red; intros; apply tm_substitutive.
Defined.
Definition TrI (W:term) : term.
(* begin show *)
left; exists (fun i => empty) (fun j => tm W j).
(* end show *)
do 2 red; intros; reflexivity.
do 2 red; intros; apply tm_morph; auto with *.
red; intros; apply tm_liftable.
red; intros; apply tm_substitutive.
Defined.
Lemma TrI_typ e X W :
X <> kind ->
typ e W X ->
typ e (TrI W) (Tr X).
unfold typ.
intros Xnk tyW; intros.
apply in_int_intro; try discriminate.
apply and_split; simpl; intros.
red; auto.
red in H0; rewrite El_def in H0.
specialize tyW with (1:=H).
apply in_int_not_kind in tyW; trivial.
destruct tyW.
rewrite Real_def; intros; auto.
apply interSAT_intro' with (F:=fun X=>X); intros.
apply sat_sn in H2; trivial.
apply (H3 (int W i)); trivial.
apply interSAT_morph_subset; simpl; auto with *.
Qed.
Definition TrE (X P F W:term) : term.
(* begin show *)
left; exists (fun i => cond_set (int W i ∈ Elt (int (Tr X) i))
(trunc_descr (El(int P i))))
(fun j => Lambda.App (tm F j) (tm W j)).
(* end show *)
do 2 red; intros; rewrite H; reflexivity.
do 2 red; intros; rewrite H; reflexivity.
red; intros.
do 2 rewrite tm_liftable.
reflexivity.
red; intros.
do 2 rewrite tm_substitutive.
reflexivity.
Defined.
Definition EQ A t1 t2 :=
Prod (Prod A prop) (Prod (App (Ref 0) (lift 1 t1)) (App (Ref 1) (lift 2 t2))).
Definition IsProp X :=
Prod X (Prod (lift 1 X) (EQ (lift 2 X) (Ref 1) (Ref 0))).
Lemma TrE_typ e X P Pp F W :
P <> kind ->
typ e Pp (IsProp P) ->
typ e F (Prod X (lift 1 P)) ->
typ e W (Tr X) ->
typ e (TrE X P F W) P.
unfold typ; intros Pnk tyPp tyF tyW i j valok.
specialize tyPp with (1:=valok); apply in_int_not_kind in tyPp;[|discriminate].
specialize tyF with (1:=valok); apply in_int_not_kind in tyF;[|discriminate].
specialize tyW with (1:=valok); apply in_int_not_kind in tyW;[|discriminate].
clear valok.
destruct tyPp as (isPp,_).
destruct tyF as (tyF,satF).
destruct tyW as (tyW,satW).
apply in_int_intro; trivial; try discriminate.
split; simpl.
red.
rewrite Elt_def.
red in tyW; simpl in tyW; rewrite El_def in tyW.
apply cc_bot_ax in tyW; destruct tyW.
admit.
rewrite cond_set_ok; trivial.
apply trunc_ind with (El(int X i)) (fun x => cc_app (int F i) x) (int W i); trivial.
admit. (*!*)
red; intros.
admit.
rewrite Elt_def.
red; intros.
*)
(***********************************************************************************************)
(** * Consistency out of the strong normalization model *)
(** Another consistency proof. *)
Theorem consistency : forall M, ~ typ List.nil M (Prod prop (Ref 0)).
red; intros.
apply model_consistency with (FF:=mkTY (singl prf_trm) (fun _ => neuSAT)) in H;
trivial.
apply sn_sort_intro.
reflexivity.
apply one_in_props.
intros.
red in H0; rewrite El_def in H0.
rewrite Real_def; auto with *.
Qed.
Print Assumptions consistency.
|
import numpy as np
import scipy.stats as stats
from .unit_conversions import lin_to_db
from itertools import permutations
import os
def init_output_dir(subdir=''):
"""
Create the output directory for figures, if needed, and return address as a prefix string
:return: path to output directory
"""
# Set up directory and filename for figures
dir_nm = 'figures'
if not os.path.exists(dir_nm):
os.makedirs(dir_nm)
# Make the requested subfolder
dir_nm = os.path.join(dir_nm, subdir)
if not os.path.exists(dir_nm):
os.makedirs(dir_nm)
return dir_nm + os.sep
def sinc_derivative(x):
"""
Returns the derivative of sinc(x), which is given
y= (x * cos(x) - sin(x)) / x^2
for x ~= 0. When x=0, y=0. The input is in radians.
NOTE: The MATLAB sinc function is defined sin(pi*x)/(pi*x). Its usage
will be different. For example, if calling
y = sinc(x)
then the corresponding derivative will be
z = sinc_derivative(pi*x);
Ported from MATLAB code.
Nicholas O'Donoughue
9 January 2021
:param x: input, radians
:return x_dot: derivative of sinc(x), in radians
"""
# Apply the sinc derivative where the mask is valid, and a zero where it is not
return np.piecewise(x,
[x == 0],
[0, lambda z: (z * np.cos(z) - np.sin(z)) / (z ** 2)])
def make_taper(taper_len: int, taper_type: str):
"""
Generate an amplitude taper of length N, according to the desired taperType, and optional set of parameters
For discussion of these, and many other windows, see the Wikipedia page:
https://en.wikipedia.org/wiki/Window_function/
Ported from MATLAB Code.
Nicholas O'Donoughue
16 January 2021
:param taper_len: Length of the taper
:param taper_type: String describing the type of taper desired. Supported options are: "uniform", "cosine",
"hanning", "hamming", "bartlett", and "blackman-harris"
:return w: Set of amplitude weights [0-1]
:return snr_loss: SNR Loss of peak return, w.r.t. uniform taper
"""
# Some constants/utilities for the window functions
def idx_centered(x):
return np.arange(x) - (x - 1) / 2
switcher = {'uniform': lambda x: np.ones(shape=(x,)),
'cosine': lambda x: np.sin(np.pi / (2 * x)) * np.cos(np.pi * (np.arange(x) - (x - 1) / 2) / x),
'hann': lambda x: np.cos(np.pi * idx_centered(x) / x) ** 2,
'hamming': lambda x: .54 + .46 * np.cos(2 * np.pi * idx_centered(x) / x),
'blackman-harris': lambda x: .42 + .5 * np.cos(2 * np.pi * idx_centered(x) / x)
+ .08 * np.cos(4 * np.pi * idx_centered(x) / x)
}
# Generate the window
taper_type = taper_type.lower()
if taper_type in switcher:
w = switcher[taper_type](taper_len)
else:
raise KeyError('Unrecognized taper type ''{}''.'.format(taper_type))
# Set peak to 1
w = w / np.max(np.fabs(w))
# Compute SNR Loss, rounded to the nearest hundredth of a dB
snr_loss = np.around(lin_to_db(np.sum(np.fabs(w) / taper_len)), decimals=2)
return w, snr_loss
def parse_reference_sensor(ref_idx, num_sensors):
"""
Accepts a reference index setting (either None, a scalar integer, or a 2 x N array of sensor pairs),
and returns matching vectors for test and reference indices.
:param ref_idx: reference index setting
:param num_sensors: Number of available sensors
:return test_idx_vec:
:return ref_idx_vec:
"""
if ref_idx is None:
# Default behavior is to use the last sensor as a common reference
test_idx_vec = np.asarray([i for i in np.arange(num_sensors - 1)])
ref_idx_vec = np.array([num_sensors - 1])
elif ref_idx == 'full':
# Generate all possible sensor pairs
perm = permutations(np.arange(num_sensors), 2)
test_idx_vec = np.asarray([x[0] for x in perm])
ref_idx_vec = np.asarray([x[1] for x in perm])
elif np.isscalar(ref_idx):
# Scalar reference index, use all other sensors as test sensors
test_idx_vec = np.asarray([i for i in np.arange(num_sensors) if i != ref_idx])
ref_idx_vec = ref_idx
else:
# Pair of vectors; first row is test sensors, second is reference
test_idx_vec = ref_idx[0, :]
ref_idx_vec = ref_idx[1, :]
return test_idx_vec, ref_idx_vec
def resample_covariance_matrix(cov, test_idx_vec, ref_idx_vec=None, test_weights=None, ref_weights=None):
"""
Resample a covariance matrix based on a set of reference and test indices. This assumes a linear combination
of the test and reference vectors. The output is an n_pair x n_pair covariance matrix for the n_pair linear
combinations.
In the resampled covariance matrix, the i,j-th entry is given
[cov_out]_ij = [cov]_bi,bj + [cov]_ai,aj - [cov]_ai,bj - [cov]_bi,aj
where: a_i, a_j are the i-th and j-th reference indices
b_i, b_j are the i-th and j-th test indices
C is the input covariance matrix
If any elements of the test_idx_vec or ref_idx_vec are set to nan, then those elements will be ignored for
covariance matrix resampling. This is used to correspond either to perfect (noise-free) measurements, or to single
sensor measurements, such as AoA, that do not require comparison with a second sensor measurement.
Nicholas O'Donoughue
21 February 2021
:param cov: n_sensor x n_sensor array representing the covariance matrix for input data. Optional: if input is a
1D array, it is assumed to be a diagonal matrix.
:param test_idx_vec: n_pair x 1 array of indices for the 'test' sensor in each pair -- or -- a valid reference
index input to parse_reference_sensor.
:param ref_idx_vec: n_pair x 1 array of indices for the 'reference' sensor in each pair. Set to None (or do not
provide) if test_idx_vec is to be passed to parse_reference_sensor.
:param test_weights: Optional, applies a scale factor to the test measurements
:param ref_weights: Optional, applies a scale factor to the reference measurements
:return:
"""
# Determine the sizes
n_sensor = np.shape(cov, axis=0)
shp_test = np.size(test_idx_vec)
shp_ref = np.size(ref_idx_vec)
n_pair_out = np.fmax(shp_test, shp_ref)
if 1 < shp_test != shp_ref > 1:
raise TypeError("Error calling covariance matrix resample. "
"Reference and test vectors must have the same shape.")
if np.any(test_idx_vec > n_sensor) or np.any(ref_idx_vec > n_sensor):
raise TypeError("Error calling covariance matrix resample. "
"Indices exceed the dimensions of the covariance matrix.")
# Parse reference and test index vector
if ref_idx_vec is None:
# Only one was provided; it must be fed to parse_reference_sensor to generate the matched pair of vectors
test_idx_vec, ref_idx_vec = parse_reference_sensor(test_idx_vec, n_sensor)
# Parse sensor weights
shp_test_wt = 1
if test_weights:
shp_test_wt = np.size(test_weights)
shp_ref_wt = 1
if ref_weights:
shp_ref_wt = np.size(ref_weights)
# Initialize output
cov_out = np.zeros((n_pair_out, n_pair_out))
a_i_wt = 1.
a_j_wt = 1.
b_i_wt = 1.
b_j_wt = 1.
# Step through reference sensors
for idx_row in np.arange(n_pair_out):
a_i = test_idx_vec[idx_row % shp_test]
b_i = ref_idx_vec[idx_row % shp_ref]
if test_weights:
a_i_wt = test_weights[idx_row % shp_test_wt]
if ref_weights:
b_i_wt = ref_weights[idx_row % shp_ref_wt]
for idx_col in np.arange(n_pair_out):
a_j = test_idx_vec[idx_col % shp_test]
b_j = ref_idx_vec[idx_col % shp_ref]
if test_weights:
a_j_wt = test_weights[idx_col % shp_test_wt]
if ref_weights:
b_j_wt = ref_weights[idx_col % shp_ref_wt]
# Parse input covariances
if np.isnan(b_i) or np.isnan(b_j):
cov_bibj = 0.
else:
cov_bibj = cov[b_i, b_j]
if np.isnan(a_i) or np.isnan(a_j):
cov_aiaj = 0.
else:
cov_aiaj = cov[a_i, a_j]
if np.isnan(a_i) or np.isnan(b_j):
cov_aibj = 0.
else:
cov_aibj = cov[a_i, b_j]
if np.isnan(b_i) or np.isnan(a_j):
cov_biaj = 0.
else:
cov_biaj = cov[b_i, a_j]
# [cov_out]_ij = [cov]_bi,bj + [cov]_ai,aj - [cov]_ai,bj - [cov]_bi,aj
# Put it together with the weights
cov_out[idx_row, idx_col] = b_i_wt * b_j_wt * cov_bibj + \
a_i_wt * a_j_wt * cov_aiaj - \
a_i_wt * b_j_wt * cov_aibj - \
b_i_wt * a_j_wt * cov_biaj
return cov_out
def ensure_invertible(covariance, epsilon=1e-10):
"""
Check the input matrix by finding the eigenvalues and checking that they are all >= a small value
(epsilon), to ensure that it can be inverted.
If any of the eigenvalues are too small, then a diagonal loading term is applied to ensure that the matrix is
positive definite (all eigenvalues are >= epsilon).
Ported from MATLAB code.
Nicholas O'Donoughue
5 Sept 2021
:param covariance: 2D (nDim x nDim) covariance matrix. If >2 dimensions, the process is repeated for each.
:param epsilon: numerical precision term (the smallest eigenvalue must be >= epsilon) [Default = 1e-10]
:return covariance_out: Modified covariance matrix that is guaranteed invertible
"""
# Check input dimensions
sz = np.shape(covariance)
assert len(sz) > 1, 'Input must have at least two dimensions.'
assert sz[0] == sz[1], 'First two dimensions of input matrix must be equal.'
dim = sz[0]
if len(sz) > 2:
n_matrices = np.prod(sz[2:])
else:
n_matrices = 1
# Iterate across matrices (dimensions >2)
cov_out = np.zeros(shape=sz)
for idx_matrix in np.arange(n_matrices):
# Isolate the current covariance matrix
this_cov = np.squeze(covariance[:, :, idx_matrix])
# Eigen-decomposition
lam, v = np.linalg.eig(this_cov)
# Initialize the diagonal loading term
d = epsilon * np.eye(N=dim)
# Repeat until the smallest eigenvalue is larger than epsilon
while np.amin(lam) < epsilon:
# Add the diagonal loading term
this_cov += d
# Re-examine the eigenvalue
lam, v = np.linalg.eig(this_cov)
# Increase the magnitude of diagonal loading (for the next iteration)
d *= 10.0
# Store the modified covariance matrix in the output
cov_out[:, :, idx_matrix] = this_cov
return cov_out
def make_pdfs(measurement_function, measurements, pdf_type='MVN', covariance=1):
"""
Generate a joint PDF or set of unitary PDFs representing the measurements, given the measurement_function,
covariance matrix and pdf_type
The only currently supported pdf types are:
'mvn' multivariate normal
'normal' normal (each measurement is independent)
Ported from MATLAB Code
Nicholas O'Donoughue
16 January 2021
:param measurement_function: A single function handle that will accept an nDim x nSource array of candidate emitter
positions and return a num_measurement x num_source array of measurements that those
emitters are expected to have generated.
:param measurements: The received measurements
:param pdf_type: The type of distribution to assume.
:param covariance: Array of covariances (num_measurement x 1 for normal, num_measurement x num_measurement for
multivariate normal)
:return pdfs: List of function handles, each of which accepts an nDim x nSource array of candidate source
positions, and returns a 1 x nSource array of probabilities.
"""
if pdf_type is None:
pdf_type = 'mvn'
if pdf_type.lower() == 'mvn' or pdf_type.lower() == 'normal':
pdfs = [lambda x: stats.multivariate_normal.pdf(measurement_function(x), mean=measurements, cov=covariance)]
else:
raise KeyError('Unrecognized PDF type setting: ''{}'''.format(pdf_type))
return pdfs
def print_elapsed(t_elapsed):
"""
Print the elapsed time, provided in seconds.
Nicholas O'Donoughue
6 May 2021
:param t_elapsed: elapsed time, in seconds
"""
hrs_elapsed = np.floor(t_elapsed / 3600)
minutes_elapsed = np.floor((t_elapsed - 3600 * hrs_elapsed) / 60)
secs_elapsed = t_elapsed - hrs_elapsed * 3600 - minutes_elapsed * 60
print('Elapsed Time: {} hrs, {} min, {} sec'.format(hrs_elapsed, minutes_elapsed, secs_elapsed))
def print_predicted(t_elapsed, pct_elapsed, do_elapsed=False):
"""
Print the elapsed and predicted time, provided in seconds.
Nicholas O'Donoughue
6 May 2021
:param t_elapsed: elapsed time, in seconds
:param pct_elapsed:
:param do_elapsed:
"""
if do_elapsed:
hrs_elapsed = np.floor(t_elapsed / 3600)
minutes_elapsed = (t_elapsed - 3600 * hrs_elapsed) / 60
print('Elapsed Time: {} hrs, {:.2f} min. '.format(hrs_elapsed, minutes_elapsed), end='')
t_remaining = t_elapsed * (1 - pct_elapsed) / pct_elapsed
hrs_remaining = np.floor(t_remaining / 3600)
minutes_remaining = (t_remaining - 3600 * hrs_remaining) / 60
print('Estimated Time Remaining: {} hrs, {:.2f} min'.format(hrs_remaining, minutes_remaining))
def safe_2d_shape(x: np.array) -> np.array:
"""
Compute the 2D shape of the input, x, safely. Avoids errors when the input is a 1D array (in which case, the
second output is 1). Any dimensions higher than the second are ignored.
Nicholas O'Donoughue
19 May 2021
:param x: ND array to determine the size of.
:return dim1: length of first dimension
:return dim2: length of second dimension
"""
if x.ndim > 2:
# 3D array, drop anything after the second dimension
dim1, dim2, _ = np.shape(x)
elif x.ndim > 1:
# 2D array
dim1, dim2 = np.shape(x)
else:
# 1D array
dim1 = np.size(x)
dim2 = 1
return dim1, dim2
def make_nd_grid(x_ctr, max_offset, grid_spacing):
"""
Create and return an ND search grid, based on the specified center of the search space, extent, and grid spacing.
28 December 2021
Nicholas O'Donoughue
:param x_ctr: ND array of search grid center, for each dimension. The size of x_ctr dictates how many dimensions
there are
:param max_offset: scalar or ND array of the extent of the search grid in each dimension, taken as the one-sided
maximum offset from x_ctr
:param grid_spacing: scalar or ND array of grid spacing in each dimension
:return x_set: n-tuple of 1D axes for each dimension
:return x_grid: n-tuple of ND coordinates for each dimension.
:return out_shape: tuple with the size of the generated grid
"""
n_dim = np.size(x_ctr)
if n_dim < 1 or n_dim > 3:
raise AttributeError('Number of spatial dimensions must be between 1 and 3')
if np.size(max_offset) == 1:
max_offset = max_offset * np.ones((n_dim, ))
if np.size(grid_spacing) == 1:
grid_spacing = grid_spacing * np.ones((n_dim, ))
assert n_dim == np.size(max_offset) and n_dim == np.size(grid_spacing), \
'Search space dimensions do not match across specification of the center, search_size, and epsilon.'
n_elements = np.fix(1 + 2 * max_offset / grid_spacing).astype(int)
# Check Search Size
max_elements = 1e8 # Set a conservative limit
assert np.prod(n_elements) < max_elements, \
'Search size is too large; python is likely to crash or become unresponsive. Reduce your search size, or' \
+ ' increase the max allowed.'
# Make a set of axes, one for each dimension, that are centered on x_ctr
dims = [x + x_max * np.linspace(start=-x_max, stop=x_max*(1+n)/n, num=n) for (x, x_max, n)
in zip(x_ctr, max_offset, n_elements)]
# Use meshgrid expansion; each element of x_grid is now a full n_dim dimensioned grid
x_grid = np.meshgrid(*dims)
# Rearrange to a single 2D array of grid locations (n_dim x N)
x_set = np.asarray([x.flatten() for x in x_grid]).T
return x_set, x_grid, n_elements
|
#' BIB Tests
#'
#' @name BIBtests
#' @useDynLib BIBtests
NULL
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj28eqsynthconj3 : forall (lv0 : natural), (@eq natural (Succ lv0) (plus lv0 (Succ Zero))).
Admitted.
QuickChick conj28eqsynthconj3.
|
SUBROUTINE CALQQ1 (ISTL_)
! ** SUBROUTINE CALQQ CALCULATES THE TURBULENT INTENSITY SQUARED AT
! ** TIME LEVEL (N+1). THE VALUE OF ISTL INDICATES THE NUMBER OF
! ** TIME LEVELS INVOLVED
!----------------------------------------------------------------------C
! CHANGE RECORD
! DATE MODIFIED BY DESCRIPTION
!----------------------------------------------------------------------!
! 2015-06 PAUL M. CRAIG IMPLEMENTED SIGMA-Z (SGZ) IN EE7.3
! 2015-02 PAUL M. CRAIG ADDED THE LWET BYPASS APPROACH AND OMP
USE GLOBAL
IMPLICIT NONE
INTEGER,INTENT(IN) :: ISTL_
INTEGER :: L,K,LS,LN,LE,LW,LF,ND,LL,LP
REAL :: BETAVEG_P=1.0, BETAVEG_D=5.1,CE4VEG=0.9 !from Katul et al. 2003
REAL :: BETASUP_P=1.0, BETASUP_D=5.1,CE4SUP=0.9 !from Katul et al. 2003
REAL :: DELB,CTE3TMP,BSMALL,SQLDSQ,WB,UHUW,VHVW,PQQB,PQQU,PQQV,PQQW,TMPQQI,TMPQQE
REAL :: PQQ,PQQL,TMPVAL,WVFACT,FFTMP,CLQTMP,CUQTMP,CLQLTMP,CUQLTMP
REAL :: CMQTMP,CMQLTMP,EQ,EQL,QQHDH,DMLTMP,DMLMAX
REAL,SAVE,ALLOCATABLE :: PQQVEGI(:,:),PQQVEGE(:,:)
REAL,SAVE,ALLOCATABLE :: PQQMHKI(:,:),PQQMHKE(:,:)
REAL,SAVE,ALLOCATABLE :: PQQSUPI(:,:),PQQSUPE(:,:)
IF( .NOT. ALLOCATED(PQQVEGI) )THEN
ALLOCATE(PQQVEGI(LCM,KCM))
ALLOCATE(PQQVEGE(LCM,KCM))
ALLOCATE(PQQMHKI(LCM,KCM))
ALLOCATE(PQQMHKE(LCM,KCM))
ALLOCATE(PQQSUPI(LCM,KCM))
ALLOCATE(PQQSUPE(LCM,KCM))
PQQVEGI=0.0
PQQVEGE=0.0
PQQMHKI=0.0
PQQMHKE=0.0
PQQSUPI=0.0
PQQSUPE=0.0
ENDIF
DELT=DT2
S3TL=1.0
S2TL=0.0
IF( ISTL_ == 2 )THEN
DELT=DT
S3TL=0.0
S2TL=1.0
ENDIF
BSMALL=1.E-12
! *** SET WAVE RAMPUP FACTOR
IF( ISWAVE == 2 .OR. ISWAVE == 4 )THEN
IF( N<NTSWV )THEN
TMPVAL = FLOAT(N)/FLOAT(NTSWV)
WVFACT = 0.5-0.5*COS(PI*TMPVAL)
ELSE
WVFACT = 1.0
ENDIF
ENDIF
! *** ZERO FOR INITIALLY DRY CELLS
IF( LADRY > 0 )THEN
DO K=1,KC
DO LP=1,LADRY
L=LDRY(LP)
LE=LEC(L)
LN=LNC(L)
FWQQ(L,K)=0.
FWQQL(L,K)=0.
FUHU(L,K)=0.
FUHV(L,K)=0.
FUHU(LE,K)=0.
FUHV(LE,K)=0.
FVHU(L,K)=0.
FVHV(L,K)=0.
FVHU(LN,K)=0.
FVHV(LN,K)=0.
UUU(L,K)=0.
VVV(L,K)=0.
PQQVEGI(L,K)=0.
PQQVEGE(L,K)=0.
PQQMHKI(L,K)=0.
PQQMHKE(L,K)=0.
PQQSUPI(L,K)=0.
PQQSUPE(L,K)=0.
QQ(L,K)=QQMIN
QQL(L,K)=QQLMIN
QQ1(L,K)=QQMIN
QQL1(L,K)=QQLMIN
QQ2(L,K)=QQMIN
QQL2(L,K)=QQLMIN
DML(L,K)=QQLMIN/QQMIN
CU1(L,K)=0.
CU2(L,K)=0.
TVAR1W(L,K)=0.
ENDDO
ENDDO
ENDIF
! *** SET RATIO OF LENTH SCALE*TURB_INTENSITY TO TURB_INTENSITY DIFFUSION
SQLDSQ=1.0
IF( ISTOPT(0) == 3 )SQLDSQ=0.377/0.628
!$OMP PARALLEL DEFAULT(SHARED)
! *** ZERO ACCUMULATION ARRAYS FOR ACTIVE CELLS
!$OMP DO PRIVATE(ND,K,LP,L)
DO ND=1,NDM
IF( ISTL_ == 3 )THEN
IF( ISCDCA(0) == 2 )THEN
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
QQ2(L,K) = QQ1(L,K) +QQ(L,K)
QQL2(L,K) = QQL1(L,K)+QQL(L,K)
ENDDO
ENDDO
ELSE
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
QQ2(L,K) = QQ1(L,K) +QQ1(L,K)
QQL2(L,K) = QQL1(L,K)+QQL1(L,K)
ENDDO
ENDDO
ENDIF
ENDIF
ENDDO ! *** END OF DOMAIN
!$OMP END DO
! ** CALCULATE ADVECTIVE FLUXES BY UPWIND DIFFERENCE WITH TRANSPORT
! ** AVERAGED BETWEEN (N) AND (N+1) AND TRANSPORTED FIELD AT (N) OR
! ** TRANSPORT BETWEEN (N-1) AND (N+1) AND TRANSPORTED FIELD AT (N-1)
! ** FOR ISTL EQUAL TO 2 AND 3 RESPECTIVELY
! *** VERTICAL FLUXES
IF( ISTL_ == 2 )THEN
!$OMP DO PRIVATE(ND,K,LP,L,WB)
DO ND=1,NDM
DO K=1,KC
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
WB = 0.5*DXYP(L)*(W2(L,K-1)+W2(L,K))
FWQQ(L,K) = MAX(WB,0.)*QQ1(L,K-1) + MIN(WB,0.)*QQ1(L,K)
FWQQL(L,K) = MAX(WB,0.)*QQL1(L,K-1)*H1P(L) + MIN(WB,0.)*QQL1(L,K)*H1P(L)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ELSE
IF( ISCDCA(0) == 1 )THEN
! *** CENTRAL DIFFERENCE
!$OMP DO PRIVATE(ND,K,LP,L,WB)
DO ND=1,NDM
DO K=1,KC
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
WB = 0.25*DXYP(L)*(W2(L,K-1)+W2(L,K))
FWQQ(L,K) = WB* (QQ(L,K-1) +QQ(L,K))
FWQQL(L,K) = WB*H1P(L)*(QQL(L,K-1)+QQL(L,K))
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ELSE
! *** UPWIND DIFFERENCE
!$OMP DO PRIVATE(ND,K,LP,L,WB)
DO ND=1,NDM
DO K=1,KC
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
WB = 0.25*DXYP(L)*(W2(L,K-1)+W2(L,K))
FWQQ(L,K) = MAX(WB,0.)*QQ2(L,K-1) + MIN(WB,0.)*QQ2(L,K)
FWQQL(L,K) = MAX(WB,0.)*QQL2(L,K-1)*H2P(L) + MIN(WB,0.)*QQL2(L,K)*H2P(L)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF
ENDIF
! *** HORIZONTAL FLUXES
IF( ISTL_ == 2 )THEN
! *** UPWIND DIFFERENCING
!$OMP DO PRIVATE(ND,K,LP,L,LS,LW,UHUW,VHVW)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LS=LSC(L)
LW=LWC(L)
UHUW = 0.5*(UHDYF2(L,K)+UHDYF2(L,K+1))
FUHU(L,K) = MAX(UHUW,0.)*QQ1(LW,K) + MIN(UHUW,0.)*QQ1(L,K)
FUHV(L,K) = MAX(UHUW,0.)*QQL1(LW,K)*H1P(LW) + MIN(UHUW,0.)*QQL1(L,K)*H1P(L)
VHVW = 0.5*(VHDXF2(L,K)+VHDXF2(L,K+1))
FVHU(L,K) = MAX(VHVW,0.)*QQ1(LS,K) + MIN(VHVW,0.)*QQ1(L,K)
FVHV(L,K) = MAX(VHVW,0.)*QQL1(LS,K)*H1P(LS) + MIN(VHVW,0.)*QQL1(L,K)*H1P(L)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ELSE
! *** ISTL = 3
IF( ISCDCA(0) == 1 )THEN
! *** Central Differencing (3TL)
!$OMP DO PRIVATE(ND,K,LP,L,LS,LW,UHUW,VHVW)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LS=LSC(L)
LW=LWC(L)
UHUW = 0.25*(UHDYF2(L,K)+UHDYF2(L,K+1))
FUHU(L,K) = UHUW*(QQ(LW,K) + QQ(L,K))
FUHV(L,K) = UHUW*(QQL(LW,K)*H1P(LW) + QQL(L,K)*H1P(L))
VHVW = 0.25*(VHDXF2(L,K)+VHDXF2(L,K+1))
FVHU(L,K) = VHVW*(QQ(LS,K) + QQ(L,K))
FVHV(L,K) = VHVW*(QQL(LS,K)*H1P(LS) + QQL(L,K)*H1P(L))
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ELSE
! *** Upwind Differencing (3TL)
!$OMP DO PRIVATE(ND,K,LP,L,LS,LW,UHUW,VHVW)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LS=LSC(L)
LW=LWC(L)
UHUW = 0.25*(UHDYF2(L,K)+UHDYF2(L,K+1))
FUHU(L,K) = MAX(UHUW,0.)*QQ2(LW,K) + MIN(UHUW,0.)*QQ2(L,K)
FUHV(L,K) = MAX(UHUW,0.)*QQL2(LW,K)*H2P(LW) + MIN(UHUW,0.)*QQL2(L,K)*H2P(L)
VHVW = 0.25*(VHDXF2(L,K)+VHDXF2(L,K+1))
FVHU(L,K) = MAX(VHVW,0.)*QQ2(LS,K) + MIN(VHVW,0.)*QQ2(L,K)
FVHV(L,K) = MAX(VHVW,0.)*QQL2(LS,K)*H2P(LS) + MIN(VHVW,0.)*QQL2(L,K)*H2P(L)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF
ENDIF
! *** APPLY LAYER SPECIFIC SUB/SVB TO FLUX TERMS
!$OMP DO PRIVATE(ND,K,LP,L)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
FUHU(L,K) = SUB3D(L,K)*FUHU(L,K)
FUHV(L,K) = SUB3D(L,K)*FUHV(L,K)
FVHU(L,K) = SVB3D(L,K)*FVHU(L,K)
FVHV(L,K) = SVB3D(L,K)*FVHV(L,K)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
! ** CALCULATE PRODUCTION, LOAD BOUNDARY CONDITIONS AND SOLVE
! ** TRANSPORT EQUATIONS
! ** FUHQQ=FUHU, FVHQQ=FVHU, FUHQQL=FUHV, FVHQQL=FVHV
!$OMP SINGLE
DO K=1,KC
DO LL=1,NPBS
L=LPBS(LL)
LN=LNC(L)
IF( FVHU(LN,K) > 0. )THEN
FVHU(LN,K)=0.0
FVHV(LN,K)=0.0
ENDIF
ENDDO
DO LL=1,NPBW
L=LPBW(LL)
IF( FUHU(LEC(L),K) > 0. )THEN
FUHU(LEC(L),K)=0.0
FUHV(LEC(L),K)=0.0
ENDIF
ENDDO
DO LL=1,NPBE
L=LPBE(LL)
IF( FUHU(L,K) < 0. )THEN
FUHU(L,K)=0.0
FUHV(L,K)=0.0
ENDIF
ENDDO
DO LL=1,NPBN
L=LPBN(LL)
IF( FVHU(L,K) < 0. )THEN
FVHU(L,K)=0.0
FVHV(L,K)=0.0
ENDIF
ENDDO
ENDDO
!$OMP END SINGLE
! *** ADD VEGETATION IMPACTS ON TURBULENCE
IF( ISVEG > 0 )THEN ! SCJ vegetative/MHK impact on K-epsilon
!$OMP DO PRIVATE(ND,K,LP,L,LE,LN,TMPQQI,TMPQQE)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LE=LEC(L)
LN=LNC(L)
TMPQQI=0.25*BETAVEG_P
TMPQQE=0.25*BETAVEG_D
PQQVEGI(L,K)=TMPQQI*( FXVEG(L ,K )+FXVEG(L,K+1)+FXVEG(LE,K)+FXVEG(LE,K+1) + FYVEG(L,K)+FYVEG(L,K+1)+FYVEG(LN,K)+FYVEG(LN,K+1) )
PQQVEGE(L,K)=TMPQQE*( FXVEG(L ,K )*U(L ,K )*U(L ,K )+FXVEG(L ,K+1)*U(L ,K+1)*U(L ,K+1) &
+FXVEG(LE,K )*U(LE,K )*U(LE,K )+FXVEG(LE,K+1)*U(LE,K+1)*U(LE,K+1) &
+FYVEG(L ,K )*V(L ,K )*V(L ,K )+FYVEG(L ,K+1)*V(L ,K+1)*V(L ,K+1) &
+FYVEG(LN,K )*V(LN,K )*V(LN,K )+FYVEG(LN,K+1)*V(LN,K+1)*V(LN,K+1))
IF( MVEGL(L)>90 )THEN
TMPQQI=0.5*BETAMHK_P
TMPQQE=0.5*BETAMHK_D
PQQMHKI(L,K)=TMPQQI*(FXMHK(L,K )+FXMHK(L,K+1)+FYMHK(L,K )+FYMHK(L,K+1))
PQQMHKE(L,K)=TMPQQE*(FXMHK(L,K )*U(L,K )*U(L,K )+FXMHK(L,K+1)*U(L,K+1)*U(L,K+1) &
+FYMHK(L,K )*V(L,K )*V(L,K )+FYMHK(L,K+1)*V(L,K+1)*V(L,K+1))
TMPQQI=0.5*BETASUP_P
TMPQQE=0.5*BETASUP_D
PQQSUPI(L,K)=TMPQQI*(FXSUP(L,K )+FXSUP(L,K+1)+FYSUP(L,K )+FYSUP(L,K+1))
PQQSUPE(L,K)=TMPQQE*(FXSUP(L,K )*U(L,K )*U(L,K )+FXSUP(L,K+1)*U(L,K+1)*U(L,K+1) &
+FYSUP(L,K )*V(L,K )*V(L,K )+FYSUP(L,K+1)*V(L,K+1)*V(L,K+1))
ENDIF
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF
! *** CALCS WITHOUT INTERNAL RADIATION SHEAR STRESS DUE TO WAVE ACTION
IF( ISWAVE <= 1 .OR. ISWAVE == 3 )THEN
! *** NO WAVE INDUCED RADIATION SHEAR STRESS
IF( ISTL_ == 2 )THEN
!$OMP DO PRIVATE(ND,K,LP,L,LE,LN,DELB,CTE3TMP,PQQB,PQQU,PQQV,PQQ,PQQL)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LE=LEC(L)
LN=LNC(L)
UUU(L,K) = QQ1(L,K) *H1P(L) + DELT*( FUHU(L,K)-FUHU(LE,K)+FVHU(L,K)-FVHU(LN,K)+(FWQQ(L,K) -FWQQ(L,K+1)) *DZIG(L,K) )*DXYIP(L)
VVV(L,K) = QQL1(L,K)*H1P(L)*H1P(L) + DELT*( FUHV(L,K)-FUHV(LE,K)+FVHV(L,K)-FVHV(LN,K)+(FWQQL(L,K)-FWQQL(L,K+1))*DZIG(L,K) )*DXYIP(L)
ENDDO
ENDDO
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LN=LNC(L)
LE=LEC(L)
DELB=B(L,K)-B(L,K+1)
CTE3TMP=CTE3
IF( DELB < 0.0)CTE3TMP=CTE1
PQQB = AB(L,K)*GP*HP(L)*DZIG(L,K)*( B(L,K+1) - B(L,K) )
PQQU = AV(L,K)*DZIGSD4U(L,K)*( U(LE,K+1) - U(LE,K) + U(L,K+1) - U(L,K) )**2
PQQV = AV(L,K)*DZIGSD4V(L,K)*( V(LN,K+1) - V(LN,K) + V(L,K+1) - V(L,K) )**2
PQQ = DELT*( PQQB + PQQU + PQQV + PQQVEGE(L,K) + PQQMHKE(L,K) + PQQSUPE(L,K) )
UUU(L,K) = UUU(L,K) + 2.*PQQ
PQQL = DELT*H1P(L)*(CTE3TMP*PQQB + CTE1*(PQQU+PQQV) + CE4VEG*PQQVEGE(L,K) + CE4MHK*PQQMHKE(L,K) + CE4SUP*PQQSUPE(L,K))
VVV(L,K) = VVV(L,K) + DML(L,K)*PQQL
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ELSE
! *** ISTL_ == 3
!$OMP DO PRIVATE(ND,K,LP,L,LE,LN,DELB,CTE3TMP,PQQB,PQQU,PQQV,PQQ,PQQL)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LE=LEC(L)
LN=LNC(L)
UUU(L,K) = QQ1(L,K) *H2P(L) + DELT*( FUHU(L,K)-FUHU(LE,K)+FVHU(L,K)-FVHU(LN,K)+(FWQQ(L,K) -FWQQ(L,K+1) )*DZIG(L,K) )*DXYIP(L)
VVV(L,K) = QQL1(L,K)*H2P(L)*H2P(L) + DELT*( FUHV(L,K)-FUHV(LE,K)+FVHV(L,K)-FVHV(LN,K)+(FWQQL(L,K)-FWQQL(L,K+1))*DZIG(L,K) )*DXYIP(L)
ENDDO
ENDDO
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LN=LNC(L)
!IF( .NOT. LSGZV(LN,K) )LN=LC
LE=LEC(L)
!IF( .NOT. LSGZU(LE,K) )LE=LC
DELB=B(L,K)-B(L,K+1)
CTE3TMP=CTE3
IF( DELB < 0.0 )CTE3TMP=CTE1
PQQB = AB(L,K)*GP*HP(L)*DZIG(L,K)*( B(L,K+1) - B(L,K) )
PQQU = AV(L,K)*DZIGSD4U(L,K)*( U(LE,K+1)-U(LE,K) + U(L,K+1)-U(L,K) )**2
PQQV = AV(L,K)*DZIGSD4V(L,K)*( V(LN,K+1)-V(LN,K) + V(L,K+1)-V(L,K) )**2
PQQ = DELT*( PQQB + PQQU + PQQV + PQQVEGE(L,K) + PQQMHKE(L,K) + PQQSUPE(L,K) )
UUU(L,K) = UUU(L,K) + 2.*PQQ
PQQL = DELT*H2P(L)*(CTE3TMP*PQQB + CTE1*(PQQU+PQQV) + CE4VEG*PQQVEGE(L,K) + CE4MHK*PQQMHKE(L,K) + CE4SUP*PQQSUPE(L,K))
VVV(L,K) = VVV(L,K) + DML(L,K)*PQQL
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF
ENDIF
! *** WAVE OPTION: BED SHEAR AND WATER COLUMN
IF( ISWAVE == 2 .OR. ISWAVE == 4 )THEN
!$OMP DO PRIVATE(ND,K,LP,L,LN,LE) &
!$OMP PRIVATE(DELB,CTE3TMP,PQQB,PQQU,PQQV,PQQW,PQQ,PQQL,FFTMP)
DO ND=1,NDM
! *** SUM VERTICAL WAVE DISSIPATION DUE TO TKE CLOSURE
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
IF( LWVMASK(L) )THEN
! *** BOTTOM TOP
TVAR1W(L,K) = WVDTKEM(K)*WV(L).DISSIPA(K) + WVDTKEP(K)*WV(L).DISSIPA(K+1)
ELSE
TVAR1W(L,K) = 0.0
ENDIF
ENDDO
ENDDO
IF( ISTL_ == 2 )THEN
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LN=LNC(L)
!IF( .NOT. LSGZV(LN,K) )LN=LC
LE=LEC(L)
!IF( .NOT. LSGZU(LE,K) )LE=LC
DELB=B(L,K)-B(L,K+1)
CTE3TMP=CTE3
IF( DELB < 0.0 )CTE3TMP=CTE1
PQQB = AB(L,K)*GP*H1P(L)*DZIG(L,K)*(B(L,K+1)-B(L,K))
PQQU = AV(L,K)*DZIGSD4U(L,K)*(U(LE,K+1)-U(LE,K)+U(L,K+1)-U(L,K))**2
PQQV = AV(L,K)*DZIGSD4V(L,K)*(V(LN,K+1)-V(LN,K)+V(L,K+1)-V(L,K))**2
PQQW = WVFACT*TVAR1W(L,K)
PQQ = DELT*(PQQU+PQQV+PQQB+PQQW+PQQVEGE(L,K)+PQQMHKE(L,K)+PQQSUPE(L,K))
FFTMP = MAX( FUHU(L,K)-FUHU(LE,K)+FVHU(L,K)-FVHU(LN,K) + (FWQQ(L,K)-FWQQ(L,K+1))*DZIG(L,K), 0. )
UUU(L,K) = QQ1(L,K)*H1P(L) + DELT*FFTMP*DXYIP(L) + 2.*PQQ
FFTMP = MAX( FUHV(L,K)-FUHV(LE,K)+FVHV(L,K)-FVHV(LN,K) + (FWQQL(L,K)-FWQQL(L,K+1))*DZIG(L,K), 0. )
PQQL = DELT*H1P(L)*(CTE3TMP*PQQB + CTE1*(PQQU+PQQV) + CE4VEG*PQQVEGE(L,K) + CE4MHK*PQQMHKE(L,K) + CE4SUP*PQQSUPE(L,K))
VVV(L,K) = QQL1(L,K)*H1P(L)*H1P(L) + DELT*FFTMP*DXYIP(L) + DML(L,K)*PQQL
ENDDO
ENDDO
ELSE ! *** ISTL_ == 3
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
LN=LNC(L)
!IF( .NOT. LSGZV(LN,K) )LN=LC
LE=LEC(L)
!IF( .NOT. LSGZU(LE,K) )LE=LC
DELB=B(L,K)-B(L,K+1)
CTE3TMP=CTE3
IF( DELB < 0.0 )CTE3TMP=CTE1
PQQB = AB(L,K)*GP*H2P(L)*DZIG(L,K)*(B(L,K+1)-B(L,K))
PQQU = AV(L,K)*DZIGSD4U(L,K)*(U(LE,K+1)-U(LE,K) + U(L,K+1)-U(L,K))**2
PQQV = AV(L,K)*DZIGSD4V(L,K)*(V(LN,K+1)-V(LN,K) + V(L,K+1)-V(L,K))**2
PQQW = WVFACT*TVAR1W(L,K)
PQQ = DELT*(PQQU+PQQV+PQQB+PQQW+PQQVEGE(L,K)+PQQMHKE(L,K)+PQQSUPE(L,K))
FFTMP = MAX( FUHU(L,K)-FUHU(LE,K)+FVHU(L,K)-FVHU(LN,K) + (FWQQ(L,K)-FWQQ(L,K+1))*DZIG(L,K),0. )
UUU(L,K) = QQ1(L,K) *H2P(L) + DELT*FFTMP*DXYIP(L) + 2.*PQQ
FFTMP = MAX( FUHV(L,K)-FUHV(LE,K)+FVHV(L,K)-FVHV(LN,K) + (FWQQL(L,K)-FWQQL(L,K+1))*DZIG(L,K),0. )
PQQL = DELT*H2P(L)*(CTE3TMP*PQQB + CTE1*(PQQU+PQQV) + CE4VEG*PQQVEGE(L,K) + CE4MHK*PQQMHKE(L,K) + CE4SUP*PQQSUPE(L,K))
VVV(L,K) = QQL1(L,K)*H2P(L)*H2P(L) + DELT*FFTMP*DXYIP(L) + DML(L,K)*PQQL
ENDDO
ENDDO
ENDIF
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF ! *** END OF ISWAVE = 2 AND 4
! *****************************************************************************
IF( KC <= 2 )THEN
! *** 1 AND 2 LAYER CASE
!$OMP DO PRIVATE(ND,LF,LL,K,LP,L,CLQTMP,CUQTMP,CLQLTMP,CUQLTMP,CMQTMP,CMQLTMP,EQ,EQL)
DO ND=1,NDM
LF=(ND-1)*LDMWET+1
LL=MIN(LF+LDMWET-1,LAWET)
DO LP=LF,LL
L=LWET(LP)
CLQTMP=-DELT*CDZKK(L,1) *AQ(L,1)*HPI(L)
CUQTMP=-DELT*CDZKKP(L,1)*AQ(L,2)*HPI(L)
CLQLTMP=SQLDSQ*CLQTMP
CUQLTMP=SQLDSQ*CUQTMP
CMQTMP = 1.-CLQTMP -CUQTMP + 2.*DELT*QQSQR(L,1) /(CTURBB1(L,1)*DML(L,1)*HP(L))
CMQLTMP = 1.-CLQLTMP-CUQLTMP + DELT*(QQSQR(L,1)/(CTURBB1(L,1)*DML(L,1)*HP(L)))*(1.+CTE4*DML(L,1)*DML(L,1)*FPROX(L,1))
EQ=1./CMQTMP
EQL=1./CMQLTMP
CU1(L,1)=CUQTMP*EQ
CU2(L,1)=CUQLTMP*EQL
UUU(L,1)=(UUU(L,1)-CLQTMP*HP(L)*QQ(L,0)-CUQTMP*HP(L)*QQ(L,KC))*EQ
VVV(L,1)=VVV(L,1)*EQL
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF
IF( KC > 2 )THEN
! *** MULTI-LAYER CASE
!$OMP DO PRIVATE(ND,LF,LL,K,LP,L,LN,CLQTMP,CUQTMP,CLQLTMP,CUQLTMP,CMQTMP,CMQLTMP,EQ,EQL)
DO ND=1,NDM
LF=(ND-1)*LDMWET+1
LL=MIN(LF+LDMWET-1,LAWET)
! *** BOTTOM ACTIVE LAYER
DO LP=1,LLWET(KS,ND)
L=LKWET(LP,KS,ND)
K=KSZ(L)
CLQTMP = -DELT*CDZKK(L,K) *AQ(L,K) *HPI(L)
CUQTMP = -DELT*CDZKKP(L,K)*AQ(L,K+1)*HPI(L)
CLQLTMP = SQLDSQ*CLQTMP
CUQLTMP = SQLDSQ*CUQTMP
CMQTMP = 1.-CLQTMP -CUQTMP+2.*DELT*QQSQR(L,K) /(CTURBB1(L,K)*DML(L,K)*HP(L))
CMQLTMP = 1.-CLQLTMP-CUQLTMP +DELT*(QQSQR(L,K)/(CTURBB1(L,K)*DML(L,K)*HP(L)))*(1.+CTE4*DML(L,K)*DML(L,K)*FPROX(L,K))
EQ = 1./CMQTMP
EQL = 1./CMQLTMP
CU1(L,K) = CUQTMP*EQ
CU2(L,K) = CUQLTMP*EQL
UUU(L,K) = ( UUU(L,K) - CLQTMP*HP(L)*QQ(L,0) )*EQ
VVV(L,K) = VVV(L,K)*EQL
CUQTMP = -DELT*CDZKKP(L,KS)*AQ(L,KC)*HPI(L)
UUU(L,KS) = UUU(L,KS) - CUQTMP*HP(L)*QQ(L,KC)
ENDDO
DO K=2,KS
DO LP=1,LLWET(K-1,ND)
L=LKWET(LP,K-1,ND)
CLQTMP = -DELT*CDZKK(L,K) *AQ(L,K) *HPI(L)
CUQTMP = -DELT*CDZKKP(L,K)*AQ(L,K+1)*HPI(L)
CLQLTMP = SQLDSQ*CLQTMP
CUQLTMP = SQLDSQ*CUQTMP
CMQTMP = 1.-CLQTMP -CUQTMP+2.*DELT*QQSQR(L,K) /(CTURBB1(L,K)*DML(L,K)*HP(L))
CMQLTMP = 1.-CLQLTMP-CUQLTMP +DELT*(QQSQR(L,K)/(CTURBB1(L,K)*DML(L,K)*HP(L)))*(1.+CTE4*DML(L,K)*DML(L,K)*FPROX(L,K))
EQ = 1./(CMQTMP-CLQTMP*CU1(L,K-1))
EQL = 1./(CMQLTMP-CLQLTMP*CU2(L,K-1))
CU1(L,K) = CUQTMP*EQ
CU2(L,K) = CUQLTMP*EQL
UUU(L,K) = (UUU(L,K)-CLQTMP*UUU(L,K-1))*EQ
VVV(L,K) = (VVV(L,K)-CLQLTMP*VVV(L,K-1))*EQL
ENDDO
ENDDO
DO K=KS-1,1,-1
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
UUU(L,K) = UUU(L,K) - CU1(L,K)*UUU(L,K+1)
VVV(L,K) = VVV(L,K) - CU2(L,K)*VVV(L,K+1)
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
ENDIF ! *** END OF KC>2
! ** ORIGINAL FORM MODIFIED FOR DIMENSIONAL LENGTH SCALE TRANSPORT
!$OMP DO PRIVATE(ND,K,LP,L,LN,QQHDH,DMLTMP,DELB,DMLMAX)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
QQ1(L,K) = S2TL*QQ1(L,K) + S3TL*QQ(L,K)
QQHDH = UUU(L,K)*HPI(L)
QQ(L,K) = MAX(QQHDH,QQMIN)
ENDDO
ENDDO
! ** ORIGINAL FORM MODIFED FOR DIMENSIONAL LENGTH SCALE TRANSPORT
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
QQL1(L,K) = S2TL*QQL1(L,K)+S3TL*QQL(L,K)
QQHDH = VVV(L,K)*HPI(L)
QQHDH = MIN(QQHDH,HP(L)) ! LIMIT DML
QQHDH = MAX(QQHDH,QQLMIN)
QQL(L,K) = QQHDH/HP(L)
DMLTMP = QQL(L,K)/QQ(L,K)
DMLTMP = MAX(DMLTMP,DMLMIN)
DELB = B(L,K)-B(L,K+1)
IF( DELB > 0.0 .AND. ISLLIM == 2 )THEN
DMLMAX = SQRT(RIQMAX)*SQRT(QQ(L,K)/(G*HP(L)*DZIG(L,K)*DELB))
DML(L,K) = MIN(DMLMAX,DMLTMP)
QQL(L,K) = QQ(L,K)*DML(L,K)
ELSE
DML(L,K) = DMLTMP
ENDIF
ENDDO
ENDDO
ENDDO ! *** END OF DOMAIN
!$OMP END DO
! ****************************************************************************
! *** CHECK FOR DEPTHS LESS THAN ZBR
IF( ISDRY > 0 )THEN
!$OMP DO PRIVATE(ND,LF,LL,LP,L,K)
DO ND=1,NDM
LF=(ND-1)*LDMWET+1
LL=MIN(LF+LDMWET-1,LAWET)
DO LP=LF,LL
L=LWET(LP)
IF( HPK(L,KSZ(L)) < ZBR(L) )THEN
! *** SPECIAL CASE: LAYER 1 OR MORE THICKNESSES < Z0
DO K=KSZ(L),KS
IF( HP(L)*Z(L,K-1) > ZBR(L) )EXIT
QQ(L,K) = QQMIN
QQL(L,K) = QQLMIN
DML(L,K) = DMLMIN
ENDDO
ENDIF
ENDDO
ENDDO
!$OMP END DO
ENDIF
!$OMP SINGLE
DO K=1,KS
DO LL=1,NPBS
L=LPBS(LL)
LN=LNC(L)
QQ(L,K)=QQ(LN,K)
QQL(L,K)=QQL(LN,K)
DML(L,K)=DML(LN,K)
ENDDO
ENDDO
DO K=1,KS
DO LL=1,NPBW
L=LPBW(LL)
QQ(L,K)=QQ(LEC(L),K)
QQL(L,K)=QQL(LEC(L),K)
DML(L,K)=DML(LEC(L),K)
ENDDO
ENDDO
DO K=1,KS
DO LL=1,NPBE
L=LPBE(LL)
QQ(L,K)=QQ(LWC(L),K)
QQL(L,K)=QQL(LWC(L),K)
DML(L,K)=DML(LWC(L),K)
ENDDO
ENDDO
DO K=1,KS
DO LL=1,NPBN
L=LPBN(LL)
LS=LSC(L)
QQ(L,K)=QQ(LS,K)
QQL(L,K)=QQL(LS,K)
DML(L,K)=DML(LS,K)
ENDDO
ENDDO
!$OMP END SINGLE
! *** SAVE THE SQRT OF THE TURBULENCE (M/S)
!$OMP DO PRIVATE(ND,K,LP,L)
DO ND=1,NDM
DO K=1,KS
DO LP=1,LLWET(K,ND)
L=LKWET(LP,K,ND)
QQSQR(L,K)=SQRT(QQ(L,K))
ENDDO
ENDDO
ENDDO
!$OMP END DO
!$OMP END PARALLEL
RETURN
END
|
/-
Copyright (c) 2017 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.hom_functor
/-!
# The Yoneda embedding
The Yoneda embedding as a functor `yoneda : C ⥤ (Cᵒᵖ ⥤ Type v₁)`,
along with an instance that it is `fully_faithful`.
Also the Yoneda lemma, `yoneda_lemma : (yoneda_pairing C) ≅ (yoneda_evaluation C)`.
## References
* [Stacks: Opposite Categories and the Yoneda Lemma](https://stacks.math.columbia.edu/tag/001L)
-/
namespace category_theory
open opposite
universes v₁ u₁ u₂-- morphism levels before object levels. See note [category_theory universes].
variables {C : Type u₁} [category.{v₁} C]
/--
The Yoneda embedding, as a functor from `C` into presheaves on `C`.
See https://stacks.math.columbia.edu/tag/001O.
-/
@[simps]
def yoneda : C ⥤ (Cᵒᵖ ⥤ Type v₁) :=
{ obj := λ X,
{ obj := λ Y, unop Y ⟶ X,
map := λ Y Y' f g, f.unop ≫ g,
map_comp' := λ _ _ _ f g, begin ext, dsimp, erw [category.assoc] end,
map_id' := λ Y, begin ext, dsimp, erw [category.id_comp] end },
map := λ X X' f, { app := λ Y g, g ≫ f } }
/--
The co-Yoneda embedding, as a functor from `Cᵒᵖ` into co-presheaves on `C`.
-/
@[simps] def coyoneda : Cᵒᵖ ⥤ (C ⥤ Type v₁) :=
{ obj := λ X,
{ obj := λ Y, unop X ⟶ Y,
map := λ Y Y' f g, g ≫ f,
map_comp' := λ _ _ _ f g, begin ext1, dsimp, erw [category.assoc] end,
map_id' := λ Y, begin ext1, dsimp, erw [category.comp_id] end },
map := λ X X' f, { app := λ Y g, f.unop ≫ g },
map_comp' := λ _ _ _ f g, begin ext, dsimp, erw [category.assoc] end,
map_id' := λ X, begin ext, dsimp, erw [category.id_comp] end }
namespace yoneda
lemma obj_map_id {X Y : C} (f : op X ⟶ op Y) :
((@yoneda C _).obj X).map f (𝟙 X) = ((@yoneda C _).map f.unop).app (op Y) (𝟙 Y) :=
by obviously
@[simp] lemma naturality {X Y : C} (α : yoneda.obj X ⟶ yoneda.obj Y)
{Z Z' : C} (f : Z ⟶ Z') (h : Z' ⟶ X) : f ≫ α.app (op Z') h = α.app (op Z) (f ≫ h) :=
(functor_to_types.naturality _ _ α f.op h).symm
/--
The Yoneda embedding is full.
See https://stacks.math.columbia.edu/tag/001P.
-/
instance yoneda_full : full (@yoneda C _) :=
{ preimage := λ X Y f, (f.app (op X)) (𝟙 X) }
/--
The Yoneda embedding is faithful.
See https://stacks.math.columbia.edu/tag/001P.
-/
instance yoneda_faithful : faithful (@yoneda C _) :=
{ map_injective' := λ X Y f g p,
begin
injection p with h,
convert (congr_fun (congr_fun h (op X)) (𝟙 X)); dsimp; simp,
end }
/-- Extensionality via Yoneda. The typical usage would be
```
-- Goal is `X ≅ Y`
apply yoneda.ext,
-- Goals are now functions `(Z ⟶ X) → (Z ⟶ Y)`, `(Z ⟶ Y) → (Z ⟶ X)`, and the fact that these
functions are inverses and natural in `Z`.
```
-/
def ext (X Y : C)
(p : Π {Z : C}, (Z ⟶ X) → (Z ⟶ Y)) (q : Π {Z : C}, (Z ⟶ Y) → (Z ⟶ X))
(h₁ : Π {Z : C} (f : Z ⟶ X), q (p f) = f) (h₂ : Π {Z : C} (f : Z ⟶ Y), p (q f) = f)
(n : Π {Z Z' : C} (f : Z' ⟶ Z) (g : Z ⟶ X), p (f ≫ g) = f ≫ p g) : X ≅ Y :=
@preimage_iso _ _ _ _ yoneda _ _ _ _
(nat_iso.of_components (λ Z, { hom := p, inv := q, }) (by tidy))
/--
If `yoneda.map f` is an isomorphism, so was `f`.
-/
lemma is_iso {X Y : C} (f : X ⟶ Y) [is_iso (yoneda.map f)] : is_iso f :=
is_iso_of_fully_faithful yoneda f
end yoneda
namespace coyoneda
@[simp] lemma naturality {X Y : Cᵒᵖ} (α : coyoneda.obj X ⟶ coyoneda.obj Y)
{Z Z' : C} (f : Z' ⟶ Z) (h : unop X ⟶ Z') : (α.app Z' h) ≫ f = α.app Z (h ≫ f) :=
begin erw [functor_to_types.naturality], refl end
instance coyoneda_full : full (@coyoneda C _) :=
{ preimage := λ X Y f, ((f.app (unop X)) (𝟙 _)).op }
instance coyoneda_faithful : faithful (@coyoneda C _) :=
{ map_injective' := λ X Y f g p,
begin
injection p with h,
have t := (congr_fun (congr_fun h (unop X)) (𝟙 _)),
simpa using congr_arg quiver.hom.op t,
end }
/--
If `coyoneda.map f` is an isomorphism, so was `f`.
-/
lemma is_iso {X Y : Cᵒᵖ} (f : X ⟶ Y) [is_iso (coyoneda.map f)] : is_iso f :=
is_iso_of_fully_faithful coyoneda f
-- No need to use Cᵒᵖ here, works with any category
/-- A Type-valued presheaf `P` is isomorphic to the composition of `P` with the
coyoneda functor coming from `punit`. -/
@[simps] def iso_comp_punit (P : C ⥤ Type v₁) : (P ⋙ coyoneda.obj (op punit.{v₁+1})) ≅ P :=
{ hom := { app := λ X f, f punit.star},
inv := { app := λ X a _, a } }
end coyoneda
/--
A presheaf `F` is representable if there is object `X` so `F ≅ yoneda.obj X`.
See https://stacks.math.columbia.edu/tag/001Q.
-/
-- TODO should we make this a Prop, merely asserting existence of such an object?
class representable (F : Cᵒᵖ ⥤ Type v₁) :=
(X : C)
(w : yoneda.obj X ≅ F)
end category_theory
namespace category_theory
-- For the rest of the file, we are using product categories,
-- so need to restrict to the case morphisms are in 'Type', not 'Sort'.
universes v₁ u₁ u₂ -- morphism levels before object levels. See note [category_theory universes].
open opposite
variables (C : Type u₁) [category.{v₁} C]
-- We need to help typeclass inference with some awkward universe levels here.
instance prod_category_instance_1 : category ((Cᵒᵖ ⥤ Type v₁) × Cᵒᵖ) :=
category_theory.prod.{(max u₁ v₁) v₁} (Cᵒᵖ ⥤ Type v₁) Cᵒᵖ
instance prod_category_instance_2 : category (Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) :=
category_theory.prod.{v₁ (max u₁ v₁)} Cᵒᵖ (Cᵒᵖ ⥤ Type v₁)
open yoneda
/--
The "Yoneda evaluation" functor, which sends `X : Cᵒᵖ` and `F : Cᵒᵖ ⥤ Type`
to `F.obj X`, functorially in both `X` and `F`.
-/
def yoneda_evaluation : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁) ⥤ Type (max u₁ v₁) :=
evaluation_uncurried Cᵒᵖ (Type v₁) ⋙ ulift_functor.{u₁}
@[simp] lemma yoneda_evaluation_map_down
(P Q : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (x : (yoneda_evaluation C).obj P) :
((yoneda_evaluation C).map α x).down = α.2.app Q.1 (P.2.map α.1 x.down) := rfl
/--
The "Yoneda pairing" functor, which sends `X : Cᵒᵖ` and `F : Cᵒᵖ ⥤ Type`
to `yoneda.op.obj X ⟶ F`, functorially in both `X` and `F`.
-/
def yoneda_pairing : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁) ⥤ Type (max u₁ v₁) :=
functor.prod yoneda.op (𝟭 (Cᵒᵖ ⥤ Type v₁)) ⋙ functor.hom (Cᵒᵖ ⥤ Type v₁)
@[simp] lemma yoneda_pairing_map
(P Q : Cᵒᵖ × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (β : (yoneda_pairing C).obj P) :
(yoneda_pairing C).map α β = yoneda.map α.1.unop ≫ β ≫ α.2 := rfl
/--
The Yoneda lemma asserts that that the Yoneda pairing
`(X : Cᵒᵖ, F : Cᵒᵖ ⥤ Type) ↦ (yoneda.obj (unop X) ⟶ F)`
is naturally isomorphic to the evaluation `(X, F) ↦ F.obj X`.
See https://stacks.math.columbia.edu/tag/001P.
-/
def yoneda_lemma : yoneda_pairing C ≅ yoneda_evaluation C :=
{ hom :=
{ app := λ F x, ulift.up ((x.app F.1) (𝟙 (unop F.1))),
naturality' :=
begin
intros X Y f, ext, dsimp,
erw [category.id_comp, ←functor_to_types.naturality],
simp only [category.comp_id, yoneda_obj_map],
end },
inv :=
{ app := λ F x,
{ app := λ X a, (F.2.map a.op) x.down,
naturality' :=
begin
intros X Y f, ext, dsimp,
rw [functor_to_types.map_comp_apply]
end },
naturality' :=
begin
intros X Y f, ext, dsimp,
rw [←functor_to_types.naturality, functor_to_types.map_comp_apply]
end },
hom_inv_id' :=
begin
ext, dsimp,
erw [←functor_to_types.naturality,
obj_map_id],
simp only [yoneda_map_app, quiver.hom.unop_op],
erw [category.id_comp],
end,
inv_hom_id' :=
begin
ext, dsimp,
rw [functor_to_types.map_id_apply]
end }.
variables {C}
/--
The isomorphism between `yoneda.obj X ⟶ F` and `F.obj (op X)`
(we need to insert a `ulift` to get the universes right!)
given by the Yoneda lemma.
-/
@[simp] def yoneda_sections (X : C) (F : Cᵒᵖ ⥤ Type v₁) :
(yoneda.obj X ⟶ F) ≅ ulift.{u₁} (F.obj (op X)) :=
(yoneda_lemma C).app (op X, F)
/--
We have a type-level equivalence between natural transformations from the yoneda embedding
and elements of `F.obj X`, without any universe switching.
-/
def yoneda_equiv {X : C} {F : Cᵒᵖ ⥤ Type v₁} : (yoneda.obj X ⟶ F) ≃ F.obj (op X) :=
(yoneda_sections X F).to_equiv.trans equiv.ulift
lemma yoneda_equiv_naturality {X Y : C} {F : Cᵒᵖ ⥤ Type v₁} (f : yoneda.obj X ⟶ F) (g : Y ⟶ X) :
F.map g.op (yoneda_equiv f) = yoneda_equiv (yoneda.map g ≫ f) :=
begin
change (f.app (op X) ≫ F.map g.op) (𝟙 X) = f.app (op Y) (𝟙 Y ≫ g),
rw ← f.naturality,
dsimp,
simp,
end
@[simp]
lemma yoneda_equiv_apply {X : C} {F : Cᵒᵖ ⥤ Type v₁} (f : yoneda.obj X ⟶ F) :
yoneda_equiv f = f.app (op X) (𝟙 X) :=
rfl
@[simp]
lemma yoneda_equiv_symm_app_apply {X : C} {F : Cᵒᵖ ⥤ Type v₁} (x : F.obj (op X))
(Y : Cᵒᵖ) (f : Y.unop ⟶ X) :
(yoneda_equiv.symm x).app Y f = F.map f.op x :=
rfl
/--
When `C` is a small category, we can restate the isomorphism from `yoneda_sections`
without having to change universes.
-/
def yoneda_sections_small {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) :
(yoneda.obj X ⟶ F) ≅ F.obj (op X) :=
yoneda_sections X F ≪≫ ulift_trivial _
@[simp]
lemma yoneda_sections_small_hom {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) (f : yoneda.obj X ⟶ F) :
(yoneda_sections_small X F).hom f = f.app _ (𝟙 _) :=
rfl
@[simp]
lemma yoneda_sections_small_inv_app_apply {C : Type u₁} [small_category C] (X : C)
(F : Cᵒᵖ ⥤ Type u₁) (t : F.obj (op X)) (Y : Cᵒᵖ) (f : Y.unop ⟶ X) :
((yoneda_sections_small X F).inv t).app Y f = F.map f.op t :=
rfl
end category_theory
|
lemma mem_cone: assumes "cone S" "x \<in> S" "c \<ge> 0" shows "c *\<^sub>R x \<in> S" |
module OpenSecrets
using DataFrames
include("latin1buffer.jl")
include("opensecretsbuffer.jl")
include("data_detection.jl")
include("data_loading.jl")
global _campaign_finance_data_sources = detect_data_sources()
end
|
By visiting and/or using the true-blue.com.au website, associated services and functionality ("the website") you agree to be bound by this user agreement ("agreement").
This agreement is formed between you and DPAM Operations Pty Ltd (ABN 49 448 911 620) T/A True Blue Underwater Hockey Supplies ("us", "our", "we"). "you", "user", "member" and "visitor" means anyone who visits this website.
If you do not agree to any provisions of this agreement, you must not use the website.
We reserve the right to make changes to this agreement from time to time at our sole discretion. By continuing to use the website, you agree to be bound by the changes. We are not obliged to notify you of any changes but we will endeavour to alert you to any significant changes. Nevertheless, you should check our published agreement and policies from time to time to acquaint yourself with the current version.
In order to make purchases and access some features of the website, you will need to be a registered member.
You may not use another member's account without permission.
When registering to become a member and activate an account, you must provide personal information such as your name and address, and a valid email address. You agree to provide accurate and complete information and to keep this information current.
We recommend not using your real name as your username.
You are solely responsible for the activity that occurs on your account, and you must keep your account password secure.
If you suspect or become aware of any unauthorised use of your account or that your password is no longer secure, you agree to notify us immediately.
are entering into a legal contract with us.
Should we suffer any loss or damage, as a result of a transaction entered into by a minor, we reserve the right to take legal action and seek compensation for such losses from the parents or guardians of the minor who caused the order to be placed.
We aim to provide a positive experience for all users and members. Accordingly, we reserve the right to change (ie alter, remove or add functionality) the website at any time. We do not guarantee that you will be able to access the website in the same way or with the same equipment or software you used prior to the change.
We may stop (temporarily or permanently) providing access to the website to you, or to visitors or members generally, at our discretion and without prior notice to you.
We may in our sole discretion terminate your account or restrict your access to the website. If we do this, you may be prevented from accessing all or parts of the website, your account details or other content contained in your account. We will not be liable to you or any third party for doing so.
the merchantability or fitness for any purpose of any product or service of any linked sites.
We do not warrant, endorse, guarantee or assume responsibility for any product or service advertised or offered by a third party through the website or any linked website or featured in any banner or other advertising. We will not be a party to or in any way responsible for monitoring any transaction between you and third party providers of products and services.
We do not take responsibility for direct or indirect damages, or consequential losses suffered by use of fraudulent or unauthorised web address. The only authorised access point is http://www.true-blue.com.au with no characters before or after "www.true-blue.com.au".
the payment of the cost of resupply of our services.
generally accepted practice or guidelines.
You agree not to access (or attempt to access) any part of the Website by any means other than through the interface provided by us.
You agree that you will not engage in any activity that interferes with or disrupts the website or the servers and networks that host the website.
You agree not to, or attempt to, circumvent, disable or otherwise interfere with security-related features of the website or features that prevent or restrict use or copying of any content or enforce limitations on the use of the website or the content therein.
You agree not to use, copy, distribute or commercialise content except as permitted by this agreement, by law or with our prior written consent.
You understand and agree that any suspected fraudulent, abusive or illegal activity may be referred to appropriate law enforcement authorities.
Information about products (ie goods and services) on the website is based on material provided by suppliers and product manufacturers.
You understand and agree that we cannot be held responsible for inaccuracies or errors caused by incorrect information supplied to us or by manufacturers or suppliers changing product specifications without notice to us.
You agree to make your own enquiries to verify information provided and to assess the suitability of products before you purchase.
Products displayed on the website do not constitute an offer to sell. It is an invitation to treat only.
Orders placed by you are offers to purchase particular product under the terms and conditions in this agreement at the price specified (including delivery and other charges).
We reserve the right to accept or reject your offer for any reason (or no reason) including, but not limited to, the unavailability of any product, an error in the price or product description, or an error in your order. In the event that we cancel your order, we will provide a full refund of any payment received.
You may cancel your order only if we have not started processing it. A cancellation and re-stocking fee of 20% or $25 (whichever is lower) applies. Please contact us through the help centre.
The prices of products, delivery and other charges shown are in Australian dollars and include GST where applicable.
Prices are current at time of display but are subject to change.
All payments must be received in full prior to dispatch. Please read the Payment section of the website for payment options.
If your payment is not received or declined by your bank or credit card issuer, we cannot hold product against your order.
Subject to this agreement, we will supply to you the products shown on your order confirmation.
You understand that we will use our best endeavours to meet stated timeframes for dispatch and delivery, however many factors can affect these timeframes and we cannot guarantee that they will always be met.
Please read the Delivery section of the website for delivery options and details.
We do allow personal pick up Victoria only.
We retain ownership of goods until payment is received in full.
Risk in goods, such as loss or damage, passes to you upon delivery.
Your satisfaction is our number one priority however, please choose carefully as we do not refund or exchange simply because you changed your mind or the product was not what you expected.
However, if you wish to return products in their original packaging and in saleable condition we will provide you with a store credit for the purchase price less delivery fee. A restocking fee of 20% or $25 (whichever is lower) will apply.
Please contact us via the FAQ to obtain a Return Authorisation Number as no returns can be accepted without one.
We will provide you with a store credit only when we receive the returned product. Return postage is at your expense.
Store credits must be used within 12 months of issue. Store credits may only be used to purchase from true-blue.com.au and are not redeemable for cash.
We will refund, repair or replace if the product you receive - doesn't match the sample or description, doesn't do what it is supposed to do or is not of merchantable quality eg. defective or dead-on-arrival (DOA).
Where a refund, repair or replacement is approved under the terms of this warranty, we will pay for, or reimburse you for, any shipping costs to return the original product to us.
Warranty applies to the original product. Replacement product has the same warranty as the original. You must retain your proof of purchase for any manufacturer warranty claims.
We reserve the right to charge you, at our current hourly rate, for the cost of examining the good if our examination reveals that there has not been a breach of statutory conditions or warranties ie. the good is not DOA, not defective or faulty, or if it does match the sample or description.
Refunds will be issued by direct deposit, cheque or PayPal at our discretion.
In order to obtain these remedies:You must notify us within a reasonable time after you become aware that you wish to make a claim for a breach of condition or warranty in clause 15; generally within 14 days after you have received the product. Please contact us via the contact us page. Once contacted we will advise you of the best way to return any products. We may not accept products returned without a Return Authorisation Number.
In some circumstances, we may refund, replace or repair goods that you find have a defect or fault when you have owned or used it for some time but displays a manufacturing defect or fault within a reasonable time during which it should not have developed that defect or fault. Please contact us via the help centre.
A "reasonable time" is the amount of time that is reasonable to expect, given the cost and quality of the item.
Incorrect or defective goods must be returned to us in the condition received with all original packaging.
Replacement of good or refund and reimbursement of freight costs will not be made until the original good is received by us and your claim verified.
We aim to process refunds and replacements within 28 days of receipt by us of the original product.
We do not refund, repair or replace where in our reasonable opinion the product becomes unmerchantable due to fair wear and tear, misuse, failure to use in accordance with manufacturer's instructions, using it in an abnormal way or failure to take reasonable care.
Products damaged in transit must be reported to us within 24 hours of receipt so that we may make a claim under transit insurance.
Goods that develop a defect after first use may be covered by manufacturer warranty. You may wish to contact the manufacturer regarding returns and repairs.
Storage media (such as hard drives) can fail without warning and if this occurs, programs, data or other information ("software") stored on the media may be at risk of corruption or irrecoverably lost.
If you purchase storage media from us, whether separately or as a part of a larger electronic or computing product, it is your responsibility to guard against loss or damage to software stored on the media and to implement strategies for the safe keeping of software.
In the event that storage media purchased from us becomes faulty, fails or otherwise detrimentally affects software stored on it, we will not be liable for any loss or damage, howsoever arising.
If you return media, whether separately or as a part of a larger electronic or computing product, to us for any reason, including replacement or repair, we will not be responsible for any data stored on the media. We make no representation that we will be able to repair any product or make a product exchange without risk to or loss of software.
The Website may include links to other websites, content or resources. These linked websites, content or resources may be operated by third parties and we may have no responsibility or control over them. The existence of these links does not imply that we endorse the linked website, content or resource. You acknowledge that we have not reviewed any of these third party websites, content or resources and we are not responsible for the material contained therein.
We reserve all intellectual property rights, including but not limited to, copyright in material and/or services provided by us. Nothing in the agreement gives you a right to use any of our marketing material, business names, trademarks, logos, domain names or other distinctive brand features.
Other trade marks used on the website that belong to third parties are used with permission and remain the intellectual property of the third party.
You may not modify or copy the layout or appearance of the website nor any computer software or code contained in the website. You may not decompile or disassemble, reverse engineer or otherwise attempt to discover or access any source code related to the website.
If you correspond or otherwise communicate with us, you automatically grant to us an irrevocable, perpetual, non-exclusive, royalty-free, world-wide licence to use, copy, display and distribute the content of your correspondence or communication and to prepare derivative works of the content or incorporate the content into other works in order to publish and promote such content. This may include, but is not limited to, publishing testimonials on our website and developing your ideas and suggestions for improved products or services we provide.
In the event that we merge, sell or otherwise change control of our business or this website to a third-party, we reserve the right, without giving notice or seeking consent, to transfer or assign the personal information, content and rights that we have collected from you and any agreements we have made with you.
You will at all times indemnify, and keep indemnified, us and our directors, officers, employees and agents from and against any loss (including reasonable legal costs and expenses on a full indemnity basis) or liability incurred or suffered by you or by us arising from any claim, demand, suit, action or proceeding by any person against you or us where such loss or liability arose out of, in connection with or in respect of your conduct or breach of this agreement.
We shall not be liable for any delay in performing any of our obligations under this agreement if such delay is caused by circumstances beyond our reasonable control.
This agreement will be governed by and interpreted in accordance with the laws of Victoria, Australia. You irrevocably submit to the non exclusive jurisdiction of the courts of the State of Victoria, Australia.
If any part of this agreement is found to be void, unlawful or unenforceable then that part will be deemed to be severable from the balance of this agreement and the severed part will not affect the validity and enforceability of any remaining provisions.
If we do not exercise or enforce any right or provision under this agreement, it will not constitute a waiver of such right or provision. Any waiver of any provision under this agreement will only be effective if it is in writing and signed by us. |
-- HASKELL
-- $ ghci #invoca intèrpret Prelude de Haskell
> 2 + 2
4
> 2 * 2
4
* 4 2
>2 == 2
True
>2 /= 2
True
-- @
-- OPERATORS: + - * / div ** ^
-- | / -> divisió entera
-- | div -> divisió fraccionaria
--@
COMPS: && || not (AND/OR/NOT)
Decimals POINT: 2.333333333
CHAR: 'a'
STRING (list chars): "SEND NUDES"
:t 'A' -- "Quin és el tipus de 'A' ? "
'A' :: Char --A és de tipus Char
:t "A" --"Quin és el tipus de 'A' ? "
'A' :: [Char] --A és de tipus List<Char>
:t 2==4 --"Quin és el tipus de 2==4 ? "
2==4 :: Bool --A és de tipus Bool
:t 5 --"Quin és el tipus de 5 ? "
5 :: Num t => t --A és de tipus Numeric (com una interface de Java)
:t (5::Int) --"Quin és el tipus de (5::Int) ? "
(5::Int) :: Int --A és de tipus Int
:t (5::Integer) --"Quin és el tipus de (5::Integer) ? "
(5::Integer) :: Integer --A és de tipus Integer
-- PROTIP: Definir els tipus prèviament a la programació
FUNCIONS: <NAME> <arg1> <arg2> .. <argN> --SEPARATS ENTRE ESPAIS
--A haskell totes les funcions tenen un parametre
Int :: Int -> Int -> Int ======> Int :: Int -> (Int -> Int)
> (*) 2 3 --Operadors com a multiplicadors
6
dobla = (*) 2 --No és una assignació, però una definició
>dobla 6
12
--FITXERS *.hs
> :r --reload
-- Llistes, TOTES homogenies
>[1, 2, 4]
[1,2,4]
>:t [1, 2, 4]
[1, 2, 4] :: Num t -> [t]
>[4, 3, 9]
[4,3,9]
>[]
[]
> 9:[] --Push front (op recursiva)
[9]
> 5:9:[]
[5,9]
>head [1..10]
1
>tail [1..10]
[2,3,4,5,6,7,8,9,10]
>init [1..10]
[1,2,3,4,5,6,7,8,9]
> [1..10]++[5..10] --concat
[1,2,3,4,5,6,7,8,9,10,5,6,7,8,9,10]
|
lemma uncountable_open_segment: fixes a :: "'a::real_normed_vector" assumes "a \<noteq> b" shows "uncountable (open_segment a b)" |
A function is analytic on a set $S$ if and only if it is analytic at every point of $S$. |
Geospatial Australia is established as a spatial data analysis and data acquisition company in Dubbo, Central West New South Wales. Our modern business model is based on leveraging the advancements in technology for spatial data acquisition, processing and analysis.
Categories Listed: 3 categories including: Aerial Photographers, Surveying and Mapping Services and Geophysicists. |
%CREATECLIQUETREE Takes in a list of factors F, Evidence and returns a
%clique tree after calling ComputeInitialPotentials at the end.
%
% C = CREATECLIQUETREE(F) Takes a list of factors and creates a clique
% tree . The value of the cliques should be initialized to
% the initial potential.
% It returns a clique tree that has the following fields:
% - .edges: Contains indices of the nodes that have edges between them.
% - .factorList: Contains the list of factors used to build the Clique
% tree.
%
% Copyright (C) Daphne Koller, Stanford Univerity, 2012
function P = CreateCliqueTree(F)
C.nodes = {};
V = unique([F(:).var]);
% Setting up the cardinality for the variables since we only get a list
% of factors.
C.card = zeros(1, length(V));
for i = 1 : length(V),
for j = 1 : length(F)
if (~isempty(find(F(j).var == i)))
C.card(i) = F(j).card(find(F(j).var == i));
break;
end
end
end
C.factorList = F;
% Setting up the adjaceny matrix.
edges = zeros(length(V));
for i = 1:length(F)
for j = 1:length(F(i).var)
for k = 1:length(F(i).var)
edges(F(i).var(j), F(i).var(k)) = 1;
end
end
end
cliquesConsidered = 0;
while cliquesConsidered < length(V)
% Using Min-Neighbors where you prefer to eliminate the variable that has
% the smallest number of edges connected to it.
% Everytime you enter the loop, you look at the state of the graph and
% pick the variable to be eliminated.
bestClique = 0;
bestScore = inf;
for i=1:size(edges,1)
score = sum(edges(i,:));
if score > 0 && score < bestScore
bestScore = score;
bestClique = i;
end
end
cliquesConsidered = cliquesConsidered + 1;
[F, C, edges] = EliminateVar(F, C, edges, bestClique);
end
% Pruning the tree.
C = PruneTree(C);
% Assume that C now has correct cardinality, variables, nodes and edges.
% Here we make the function call to assign factors to cliques and compute the
% initial potentials for clusters.
P = ComputeInitialPotentials(C);
|
o beFresh from its unveiling at the Paris Show, BMW’s latest G20-generation M340i looks to reassert Munich’s position as the driver’s choice in its class. In recent years that dominance has waned a little, but with a wider track, a lower centre of gravity, a chassis that’s 50 percent stiffer than before and a trick e-diff, the latest Three looks to be back and firing.
We’ve yet to get behind the wheel of the M340i xDrive range topper, but we’ve seen a few numbers and, well, we’ll leave it to you to decide if the car can punch its weight when faced with the Audi S4, the Mercedes-AMG C43 and a welter of other tasty rivals. So, without wishing to prejudice your decision in any way, here are the facts.
We’ll have pricing for you on the back of the car’s Los Angeles show reveal, so keep checking back. In the meantime, let us know which of these power-packed sedans gets your vote. |
http://instagram.com/ Instagram is another social networking site where you can snap photos with your smartphone and share them with friends. In April 2012 Facebook acquired the company and integrated the service with Facebook user profile.
If you want to release your Instagram photos under a Creative Commons license, so they can be used on DavisWiki and other sites freely, check out http://iamcc.org iamcc.org.
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Relation.Binary where
open import Cubical.Relation.Binary.Base public
open import Cubical.Relation.Binary.Properties public
open import Cubical.Relation.Binary.Fiberwise public
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_on_inv__42.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_on_inv__42 imports n_g2kAbsAfter_base
begin
section{*All lemmas on causal relation between inv__42 and some rule r*}
lemma n_n_RecvReq_i1Vsinv__42:
assumes a1: "(r=n_n_RecvReq_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Ident ''Cache_1'') ''State'')) (Const E)))) (eqn (IVar (Field (Ident ''Chan2_1'') ''Cmd'')) (Const Empty))) (eqn (IVar (Ident ''ShrSet_1'')) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendInvE_i1Vsinv__42:
assumes a1: "(r=n_n_SendInvE_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendInvS_i1Vsinv__42:
assumes a1: "(r=n_n_SendInvS_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendInvAck_i1Vsinv__42:
assumes a1: "(r=n_n_SendInvAck_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''InvSet_1'')) (Const true)) (eqn (IVar (Field (Ident ''Chan2_1'') ''Cmd'')) (Const Inv))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_RecvInvAck_i1Vsinv__42:
assumes a1: "(r=n_n_RecvInvAck_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_n_SendGntS_i1Vsinv__42:
assumes a1: "(r=n_n_SendGntS_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendGntE_i1Vsinv__42:
assumes a1: "(r=n_n_SendGntE_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_RecvGntS_i1Vsinv__42:
assumes a1: "(r=n_n_RecvGntS_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (eqn (IVar (Field (Ident ''Chan2_1'') ''Cmd'')) (Const GntS))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_RecvGntE_i1Vsinv__42:
assumes a1: "(r=n_n_RecvGntE_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ARecvReq_i1Vsinv__42:
assumes a1: "(r=n_n_ARecvReq_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Ident ''Cache_1'') ''State'')) (Const E)))) (eqn (IVar (Field (Ident ''Chan2_1'') ''Cmd'')) (Const Empty))) (eqn (IVar (Ident ''ShrSet_1'')) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ARecvInvAck_i1Vsinv__42:
assumes a1: "(r=n_n_ARecvInvAck_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_n_ASendGntE_i1Vsinv__42:
assumes a1: "(r=n_n_ASendGntE_i1 )" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''InvSet_1'')) (Const true)) (eqn (IVar (Ident ''ShrSet_1'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendReqEI_i1Vsinv__42:
assumes a1: "r=n_n_SendReqEI_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqEI_i1Vsinv__42:
assumes a1: "r=n_n_ASendReqEI_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqIS_j1Vsinv__42:
assumes a1: "r=n_n_ASendReqIS_j1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqES_i1Vsinv__42:
assumes a1: "r=n_n_ASendReqES_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ARecvGntE_i1Vsinv__42:
assumes a1: "r=n_n_ARecvGntE_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendGntS_i1Vsinv__42:
assumes a1: "r=n_n_ASendGntS_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ARecvGntS_i1Vsinv__42:
assumes a1: "r=n_n_ARecvGntS_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendInvE_i1Vsinv__42:
assumes a1: "r=n_n_ASendInvE_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendInvS_i1Vsinv__42:
assumes a1: "r=n_n_ASendInvS_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqES_i1Vsinv__42:
assumes a1: "r=n_n_SendReqES_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqSE_j1Vsinv__42:
assumes a1: "r=n_n_ASendReqSE_j1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqEE_i1Vsinv__42:
assumes a1: "r=n_n_SendReqEE_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_Store_i1Vsinv__42:
assumes a1: "\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_AStore_i1Vsinv__42:
assumes a1: "\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d" and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqS_j1Vsinv__42:
assumes a1: "r=n_n_SendReqS_j1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendInvAck_i1Vsinv__42:
assumes a1: "r=n_n_ASendInvAck_i1 " and
a2: "(f=inv__42 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
As the regular 2006 season began for the Twins , Nathan started off strong , allowing no runs from the start of the season to April 25 . He also converted 10 straight save opportunities from April 11 to June 17 . On June 24 , Nathan recorded his one hundredth career save against the Chicago Cubs , and 99th save with Minnesota . Four days later he got save number 101 , his hundredth save with Minnesota against the Los Angeles Dodgers , becoming the fifth pitcher in Twins history to achieve that mark . Despite putting up great numbers during the 2006 season , Nathan was not selected to the All @-@ Star Game . He continued to pitch well throughout the season , passing Eddie <unk> for second on the Twins ' all @-@ time save list when he earned his 117th save against the Detroit Tigers on September 9 . Nathan was also given the Major League Baseball Delivery Man of the Month award for July , going nine for nine in save opportunities and posting a 0 @.@ 75 ERA for the month . He finished the season with some of his best numbers to date : a 7 – 0 record , a 1 @.@ 58 ERA , 95 strikeouts , 36 saves , an 18th @-@ place finish in MVP voting , and a fifth @-@ place finish in Cy Young voting . His 61 games finished were also good for the AL lead and opponents batted just .158 against him , a career high . With 36 saves in 38 opportunities , Nathan also became the first pitcher for the organization to earn 35 saves in three straight seasons . The Twins won the division on the last day of the regular season , but were swept by the Oakland Athletics in the ALDS as Nathan made one scoreless appearance .
|
theory SKAT_Term
imports KAT Boolean_Algebra_Extras
begin
section {* SKAT Terms *}
(* +------------------------------------------------------------------------+ *)
subsection {* Ranked Alphabets *}
(* +------------------------------------------------------------------------+ *)
class ranked_alphabet =
fixes arity :: "'a \<Rightarrow> nat"
and funs :: "'a set"
and rels :: "'a set"
and NULL :: 'a
and output_vars :: "'a itself \<Rightarrow> nat set"
assumes funs_rels_disjoint: "funs \<inter> rels = {}"
and funs_rels_union: "funs \<union> rels = UNIV"
and funs_exist: "funs \<noteq> {}"
and rels_exist: "rels \<noteq> {}"
and NULL_fun: "NULL \<in> funs"
and NULL_arity: "arity NULL = 0"
and output_exists: "\<exists>x. x \<in> output_vars TYPE('a)"
and output_finite: "finite (output_vars TYPE('a))"
text {* A ranked alphabet consists of a set of disjoint function and
relation symbols. Each symbol in the alphabet has an associated
arity. The set @{const funs} contains all the function symbols, while
@{const rels} contains all the relation symbols. The @{const arity}
function returns the arity of a symbol.
Ranked alphabets are formalised as a typeclass, so a concrete alphabet
is simply a type which supports the above functions and the typeclass
laws. This avoids having to parameterise defintions with alphabets,
which allows things to stay at the type level. *}
(* +------------------------------------------------------------------------+ *)
subsection {* Terms *}
(* +------------------------------------------------------------------------+ *)
datatype 'a trm = App 'a "'a trm list" | Var nat
fun trm_vars :: "'a trm \<Rightarrow> nat set" where
"trm_vars (App f xs) = \<Union> (set (map trm_vars xs))"
| "trm_vars (Var v) = {v}"
fun trm_subst :: "nat \<Rightarrow> 'a trm \<Rightarrow> 'a trm \<Rightarrow> 'a trm" where
"trm_subst v s (Var v') = (if v' = v then s else Var v')"
| "trm_subst v s (App f xs) = App f (map (trm_subst v s) xs)"
inductive_set wf_trms :: "'a::ranked_alphabet trm set" where
var: "Var n \<in> wf_trms"
| func: "\<lbrakk>f \<in> funs; arity f = n; xs \<in> lists wf_trms; length xs = n\<rbrakk> \<Longrightarrow> App f xs \<in> wf_trms"
lemma trm_subterms: "App f xs \<in> wf_trms \<Longrightarrow> xs \<in> lists wf_trms"
by (metis (lifting) trm.simps(1) trm.simps(4) wf_trms.simps)
lemma trm_subterms_var: "App f xs \<in> wf_trms \<Longrightarrow> set xs \<subseteq> wf_trms"
by (metis in_lists_conv_set subset_code(1) trm_subterms)
lemma map_in_lists: "map f xs \<in> lists X \<longleftrightarrow> f ` set xs \<subseteq> X"
by (metis in_lists_conv_set in_mono set_map subsetI)
lemma trm_subst_wf: "\<lbrakk>s \<in> wf_trms; x \<in> wf_trms\<rbrakk> \<Longrightarrow> trm_subst v s x \<in> wf_trms"
proof (rule wf_trms.induct[of x "\<lambda>x. trm_subst v s x \<in> wf_trms"], safe)
fix n assume "s \<in> wf_trms" and "x \<in> wf_trms" thus "trm_subst v s (Var n) \<in> wf_trms"
by (metis trm_subst.simps(1) wf_trms.var)
next
fix f :: "'a::ranked_alphabet" and xs :: "'a trm list"
assume s_trm: "s \<in> wf_trms" and x_trm: "x \<in> wf_trms" and f_fun: "f \<in> funs"
and xs_len: "length xs = arity f"
and ind_hyp: "\<forall>x\<in>set xs. x \<in> wf_trms \<inter> {x. trm_subst v s x \<in> wf_trms}"
show "trm_subst v s (App f xs) \<in> wf_trms"
proof (simp, rule wf_trms.func[of _ "length xs"])
show "f \<in> funs" using f_fun .
show "arity f = length xs" by (simp add: xs_len)
show "map (trm_subst v s) xs \<in> lists wf_trms"
by (simp add: map_in_lists image_def, auto, metis Int_Collect ind_hyp)
show "length (map (trm_subst v s) xs) = length xs"
by (metis length_map)
qed
qed
lemma wf_trms_const: "\<lbrakk>f \<in> funs; arity f = 0\<rbrakk> \<Longrightarrow> App f [] \<in> wf_trms"
by (rule wf_trms.func, simp_all add: lists_def)
(* +------------------------------------------------------------------------+ *)
subsection {* n-tuples *}
(* +------------------------------------------------------------------------+ *)
text {* Given a set, generate all posible $n$-tuples of a specific
length. Here we represent an $n$-tuple as a list. *}
fun ntuples :: "'a set \<Rightarrow> nat \<Rightarrow> ('a list) set" ("_\<^bsup>_\<^esup>") where
"X\<^bsup>0\<^esup> = {[]}"
| "X\<^bsup>Suc n\<^esup> = {x. \<exists>y\<in>X. \<exists>ys\<in>X\<^bsup>n\<^esup>. x = y#ys}"
lemma ntuples1: "set xs \<subseteq> X \<longleftrightarrow> xs \<in> X\<^bsup>length xs\<^esup>" by (induct xs, simp_all)
lemma ntuples2: "\<lbrakk>set xs \<subseteq> X; length xs = n\<rbrakk> \<Longrightarrow> xs \<in> X\<^bsup>n\<^esup>" by (metis ntuples1)
lemma ntuples3: "xs \<in> X\<^bsup>length xs\<^esup> \<Longrightarrow> set xs \<subseteq> X"
by (induct xs, simp_all)
lemma ntuples4: "xs \<in> X\<^bsup>n\<^esup> \<Longrightarrow> length xs = n"
apply (induct X n arbitrary: xs rule: ntuples.induct)
by (simp_all, metis Suc_length_conv)
lemma ntuples5 [iff]: "xs \<in> X\<^bsup>n\<^esup> \<longleftrightarrow> (set xs \<subseteq> X \<and> length xs = n)"
by (metis ntuples1 ntuples4)
lemma ntuples6: "(x \<in> X \<and> xs \<in> X\<^bsup>n\<^esup>) \<longleftrightarrow> x#xs \<in> X\<^bsup>Suc n\<^esup>" by simp
lemma ntuples7: "n \<noteq> 0 \<longleftrightarrow> [] \<notin> X\<^bsup>n\<^esup>"
by (induct n, simp_all)
lemma ntuples_set: "X\<^bsup>n\<^esup> = {xs. set xs \<subseteq> X \<and> length xs = n}" by auto
type_synonym 'a relation = "('a list) set"
(* +------------------------------------------------------------------------+ *)
subsection {* Interpretation and term evaluation *}
(* +------------------------------------------------------------------------+ *)
record ('a, 'b) interp =
interp_fun :: "'a \<Rightarrow> 'b list \<Rightarrow> 'b"
interp_rel :: "'a \<Rightarrow> 'b relation"
type_synonym 'a mem = "nat \<Rightarrow> 'a"
fun eval_trm :: "('a::ranked_alphabet, 'b) interp \<Rightarrow> (nat \<Rightarrow> 'b) \<Rightarrow> 'a trm \<Rightarrow> 'b" where
"eval_trm D mem (Var n) = mem n"
| "eval_trm D mem (App f xs) = interp_fun D f (map (eval_trm D mem) xs)"
definition null :: "'a::ranked_alphabet trm" where
"null \<equiv> App NULL []"
abbreviation trm_subst_notation :: "'a::ranked_alphabet trm \<Rightarrow> nat \<Rightarrow> 'a trm \<Rightarrow> 'a trm"
("_[_|_]" [100,100,100] 101) where
"s[x|t] \<equiv> trm_subst x t s"
(* +------------------------------------------------------------------------+ *)
subsection {* Predicates *}
(* +------------------------------------------------------------------------+ *)
datatype 'a pred = Pred 'a "'a trm list"
inductive_set wf_preds :: "'a::ranked_alphabet pred set" where
"\<lbrakk>P \<in> rels; arity P = length xs\<rbrakk> \<Longrightarrow> Pred P xs \<in> wf_preds"
primrec eval_pred :: "('a::ranked_alphabet, 'b) interp \<Rightarrow> 'b mem \<Rightarrow> 'a pred \<Rightarrow> bool" where
"eval_pred D mem (Pred P xs) \<longleftrightarrow> map (eval_trm D mem) xs \<in> interp_rel D P"
primrec pred_subst :: "nat \<Rightarrow> 'a::ranked_alphabet trm \<Rightarrow> 'a pred \<Rightarrow> 'a pred" where
"pred_subst v s (Pred P xs) = Pred P (map (trm_subst v s) xs)"
abbreviation
pred_subst_notation :: "'a::ranked_alphabet pred \<Rightarrow> nat \<Rightarrow> 'a trm \<Rightarrow> 'a pred"
("_[_|_]" [100,100,100] 101) where
"s[x|t] \<equiv> pred_subst x t s"
(* Simple while programs *)
datatype 'a prog = If "'a pred" "'a prog" "'a prog"
| While "'a pred" "'a prog"
| Seq "'a prog" "'a prog"
| Assign nat "'a trm"
| Skip
fun prog_preds :: "'a prog \<Rightarrow> 'a pred set" where
"prog_preds (If P x y) = {P} \<union> prog_preds x \<union> prog_preds y"
| "prog_preds (While P x) = {P} \<union> prog_preds x"
| "prog_preds (Seq x y) = prog_preds x \<union> prog_preds y"
| "prog_preds (Assign _ _) = {}"
| "prog_preds Skip = {}"
fun prog_whiles :: "'a prog \<Rightarrow> 'a prog set" where
"prog_whiles (If P x y) = prog_whiles x \<union> prog_whiles y"
| "prog_whiles (While P x) = {While P x} \<union> prog_whiles x"
| "prog_whiles (Seq x y) = prog_whiles x \<union> prog_whiles y"
| "prog_whiles (Assign _ _) = {}"
| "prog_whiles Skip = {}"
fun eval_prog :: "nat \<Rightarrow> ('a::ranked_alphabet, 'b) interp \<Rightarrow> 'b mem \<Rightarrow> 'a prog \<Rightarrow> 'b mem option" where
"eval_prog 0 _ _ _ = None"
| "eval_prog (Suc n) D mem (If P x y) =
(if eval_pred D mem P
then eval_prog n D mem x
else eval_prog n D mem y)"
| "eval_prog (Suc n) D mem (While P x) =
(if eval_pred D mem P
then case eval_prog n D mem x of
Some mem' \<Rightarrow> eval_prog n D mem' (While P x)
| None \<Rightarrow> None
else Some mem)"
| "eval_prog (Suc n) D mem (Seq x y) =
(case eval_prog n D mem x of
Some mem' \<Rightarrow> eval_prog n D mem' y
| None \<Rightarrow> None)"
| "eval_prog (Suc n) D mem Skip = Some mem"
| "eval_prog (Suc n) D mem (Assign m x) =
(Some (\<lambda>v. if v = m then eval_trm D mem x else mem v))"
fun FV :: "'a trm \<Rightarrow> nat set" where
"FV (Var v) = {v}"
| "FV (App f xs) = foldr op \<union> (map FV xs) {}"
lemma app_FV: "v \<notin> FV (App f xs) \<Longrightarrow> \<forall>x\<in>set xs. v \<notin> FV x"
by (erule contrapos_pp, simp, induct xs, auto)
lemma no_FV [simp]: "v \<notin> FV s \<Longrightarrow> s[v|t] = s"
proof (induct s)
fix f xs
assume asm: "\<forall>x\<in>set xs. v \<notin> FV x \<longrightarrow> trm_subst v t x = x"
and "v \<notin> FV (App f xs)"
hence "\<forall>x\<in>set xs. v \<notin> FV x"
by (metis app_FV)
thus "trm_subst v t (App f xs) = App f xs"
by (metis (lifting) asm map_idI trm_subst.simps(2))
next
fix v' assume "v \<notin> FV (Var v')"
thus "trm_subst v t (Var v') = Var v'" by simp
next
show "\<forall>x\<in>set []. v \<notin> FV x \<longrightarrow> trm_subst v t x = x" by simp
next
fix x xs
assume "v \<notin> FV x \<Longrightarrow> trm_subst v t x = x"
and "\<forall>y\<in>set xs. v \<notin> FV y \<longrightarrow> trm_subst v t y = y"
thus "\<forall>y\<in>set (x # xs). v \<notin> FV y \<longrightarrow> trm_subst v t y = y"
by auto
qed
primrec pred_vars :: "'a::ranked_alphabet pred \<Rightarrow> nat set" where
"pred_vars (Pred P xs) = \<Union> (set (map FV xs))"
lemma no_pred_vars: "v \<notin> pred_vars \<phi> \<Longrightarrow> \<phi>[v|t] = \<phi>"
proof (induct \<phi>, simp)
fix xs :: "'a trm list" assume "\<forall>x\<in>set xs. v \<notin> FV x"
thus "map (trm_subst v t) xs = xs"
by (induct xs, simp_all)
qed
ML {*
structure AlphabetRules = Named_Thms
(val name = @{binding "alphabet"}
val description = "Alphabet rules")
*}
setup {* AlphabetRules.setup *}
lemma trm_simple_induct': "\<lbrakk>\<And>f xs. (\<forall>x\<in>set xs. P x) \<Longrightarrow> P (App f xs); \<And>n. P (Var n)\<rbrakk> \<Longrightarrow> P s \<and> (\<forall>x\<in>set []. P x)"
by (rule trm.induct[of "\<lambda>xs. (\<forall>x\<in>set xs. P x)" P], simp_all)
lemma trm_simple_induct: "\<lbrakk>\<And>n. P (Var n); \<And>f xs. (\<forall>x\<in>set xs. P x) \<Longrightarrow> P (App f xs)\<rbrakk> \<Longrightarrow> P s"
by (metis trm_simple_induct')
lemma foldr_FV: "foldr op \<union> (map FV xs) {} = \<Union> (FV ` set xs)"
by (induct xs, auto)
lemma eval_trm_eq_mem: "(\<forall>v\<in>FV s. m1 v = m2 v) \<Longrightarrow> eval_trm D m1 s = eval_trm D m2 s"
proof (induct rule: trm_simple_induct, auto)
fix f :: "'a" and xs :: "'a trm list"
assume asm1: "\<forall>x\<in>set xs. (\<forall>v\<in>FV x. m1 v = m2 v) \<longrightarrow> eval_trm D m1 x = eval_trm D m2 x"
and asm2: "\<forall>v\<in>foldr op \<union> (map FV xs) {}. m1 v = m2 v"
have "foldr op \<union> (map FV xs) {} = \<Union> (FV ` set xs)"
by (induct xs, auto)
hence "\<forall>v\<in>\<Union>(FV ` set xs). m1 v = m2 v"
by (metis asm2)
hence "\<forall>x\<in>set xs. (\<forall>v\<in>FV x. m1 v = m2 v)"
by (metis UnionI imageI)
hence "\<forall>x\<in>set xs. eval_trm D m1 x = eval_trm D m2 x"
by (metis asm1)
hence "map (eval_trm D m1) xs = map (eval_trm D m2) xs"
by (induct xs, auto)
thus "interp_fun D f (map (eval_trm D m1) xs) = interp_fun D f (map (eval_trm D m2) xs)"
by metis
qed
definition set_mem :: "nat \<Rightarrow> 'a \<Rightarrow> 'a mem \<Rightarrow> 'a mem" where
"set_mem x s mem \<equiv> \<lambda>v. if v = x then s else mem v"
definition assign ::
"('a::ranked_alphabet, 'b) interp \<Rightarrow> nat \<Rightarrow> 'a trm \<Rightarrow> 'b mem \<Rightarrow> 'b mem"
where
"assign D x s mem = set_mem x (eval_trm D mem s) mem"
definition halt_null :: "('a::ranked_alphabet, 'b) interp \<Rightarrow> 'b mem \<Rightarrow> 'b mem"
where
"halt_null D mem \<equiv> \<lambda>v. if v \<notin> output_vars TYPE('a) then interp_fun D NULL [] else mem v"
lemma eval_assign1:
assumes xy: "x \<noteq> y" and ys: "y \<notin> FV s"
shows "assign D y t (assign D x s mem) = assign D x s (assign D y (trm_subst x s t) mem)"
apply (induct t rule: trm_simple_induct)
apply (simp add: assign_def set_mem_def)
apply default
apply default
apply default
apply (smt eval_trm_eq_mem ys)
apply auto
apply default
apply (smt eval_trm.simps(1) eval_trm_eq_mem xy ys)
proof
fix f ts v
assume "\<forall>t\<in>set ts. assign D y t (assign D x s mem) =
assign D x s (assign D y (trm_subst x s t) mem)"
hence "\<forall>t\<in>set ts. assign D y t (assign D x s mem) v =
assign D x s (assign D y (trm_subst x s t) mem) v"
by auto
thus "assign D y (App f ts) (assign D x s mem) v =
assign D x s (assign D y (App f (map (trm_subst x s) ts)) mem) v"
apply (simp add: assign_def set_mem_def o_def)
by (smt eval_trm_eq_mem map_eq_conv xy ys)
qed
lemma eval_assign2:
assumes xy: "x \<noteq> y" and xs: "x \<notin> FV s"
shows "assign D y t (assign D x s mem) =
assign D y (trm_subst x s t) (assign D x s mem)"
apply (induct t rule: trm_simple_induct)
apply (simp add: assign_def set_mem_def)
apply default
apply default
apply default
apply (smt eval_trm_eq_mem xs)
apply auto
proof
fix f ts v
assume "\<forall>t\<in>set ts. assign D y t (assign D x s mem) =
assign D y (trm_subst x s t) (assign D x s mem)"
hence "\<forall>t\<in>set ts. assign D y t (assign D x s mem) v =
assign D y (trm_subst x s t) (assign D x s mem) v"
by auto
thus "assign D y (App f ts) (assign D x s mem) v =
assign D y (App f (map (trm_subst x s) ts)) (assign D x s mem) v"
apply (simp add: assign_def set_mem_def o_def)
by (smt eval_trm_eq_mem map_eq_conv xy xs)
qed
lemma eval_assign3: "assign D x t (assign D x s mem) = assign D x (trm_subst x s t) mem"
proof (induct t rule: trm_simple_induct, simp add: assign_def set_mem_def, auto, default)
fix f ts v
assume "\<forall>t\<in>set ts. assign D x t (assign D x s mem) = assign D x (trm_subst x s t) mem"
hence "\<forall>t\<in>set ts. assign D x t (assign D x s mem) v = assign D x (trm_subst x s t) mem v"
by auto
hence "v = x \<longrightarrow> map (eval_trm D (\<lambda>v. if v = x then eval_trm D mem s else mem v)) ts =
map (eval_trm D mem \<circ> trm_subst x s) ts"
by (auto simp add: assign_def set_mem_def)
thus "assign D x (App f ts) (assign D x s mem) v = assign D x (App f (map (trm_subst x s) ts)) mem v"
by (auto simp add: assign_def set_mem_def o_def, smt map_eq_conv)
qed
lemma eval_halt:
"x \<notin> output_vars TYPE('a::ranked_alphabet) \<Longrightarrow> halt_null D mem = halt_null D (assign D x (App (NULL::'a) []) mem)"
by (auto simp add: halt_null_def assign_def set_mem_def)
lemma subst_preds: "P \<in> wf_preds \<Longrightarrow> P[x|s] \<in> wf_preds"
apply (induct P)
apply simp
by (metis SKAT_Term.pred.inject length_map wf_preds.simps)
lemma eval_assign4: "P \<in> preds \<Longrightarrow> eval_pred D (assign D x t mem) P = eval_pred D mem (pred_subst x t P)"
proof (induct P)
fix P and xs :: "'a trm list" assume "Pred P xs \<in> preds"
have "\<And>s. s \<in> set xs \<Longrightarrow> eval_trm D (assign D x t mem) s = eval_trm D mem (s[x|t])"
by (metis eval_assign3 set_mem_def assign_def)
thus "eval_pred D (assign D x t mem) (Pred P xs) = eval_pred D mem (pred_subst x t (Pred P xs))"
by (simp add: o_def, metis (lifting) map_ext)
qed
end
|
The imaginary part of the product of a complex number with its conjugate is zero. |
module Lec7 where
open import Lec1Done
data List (X : Set) : Set where
[] : List X
_,-_ : X -> List X -> List X
foldrL : {X T : Set} -> (X -> T -> T) -> T -> List X -> T
foldrL c n [] = n
foldrL c n (x ,- xs) = c x (foldrL c n xs)
data Bwd (X : Set) : Set where
[] : Bwd X
_-,_ : Bwd X -> X -> Bwd X
infixl 3 _-,_
data _<=_ {X : Set} : (xz yz : Bwd X) -> Set where
oz : [] <= []
os : {xz yz : Bwd X}{y : X} -> xz <= yz -> (xz -, y) <= (yz -, y)
o' : {xz yz : Bwd X}{y : X} -> xz <= yz -> xz <= (yz -, y)
oe : {X : Set}{xz : Bwd X} -> [] <= xz
oe {_} {[]} = oz
oe {_} {xz -, _} = o' oe
oi : {X : Set}{xz : Bwd X} -> xz <= xz
oi {_} {[]} = oz
oi {_} {xz -, _} = os oi -- look here...
_<o<_ : {X : Set}{xz yz zz : Bwd X} -> xz <= yz -> yz <= zz -> xz <= zz
th <o< o' ph = o' (th <o< ph)
oz <o< oz = oz
os th <o< os ph = os (th <o< ph) -- ...and here
o' th <o< os ph = o' (th <o< ph)
Elem : {X : Set} -> X -> Bwd X -> Set
Elem x yz = ([] -, x) <= yz
data Ty : Set where
one : Ty
list : Ty -> Ty
_=>_ : Ty -> Ty -> Ty
infixr 4 _=>_
Val : Ty -> Set
Val one = One
Val (list T) = List (Val T)
Val (S => T) = Val S -> Val T
data Tm (Tz : Bwd Ty) : Ty -> Set where
var : {T : Ty} -> Elem T Tz -> Tm Tz T
<> : Tm Tz one
[] : {T : Ty} -> Tm Tz (list T)
_,-_ : {T : Ty} -> Tm Tz T -> Tm Tz (list T) -> Tm Tz (list T)
foldr : {S T : Ty} ->
Tm Tz (S => T => T) ->
Tm Tz T ->
Tm Tz (list S)
-> Tm Tz T
lam : {S T : Ty} ->
Tm (Tz -, S) T
-> Tm Tz (S => T)
_$$_ : {S T : Ty} ->
Tm Tz (S => T) ->
Tm Tz S
-> Tm Tz T
infixl 3 _$$_
All : {X : Set} -> (X -> Set) -> Bwd X -> Set
All P [] = One
All P (xz -, x) = All P xz * P x
all : {X : Set}{P Q : X -> Set}(f : (x : X) -> P x -> Q x) -> (xz : Bwd X) -> All P xz -> All Q xz
all f [] <> = <>
all f (xz -, x) (pz , p) = all f xz pz , f x p
Env : Bwd Ty -> Set
Env = All Val
select : {X : Set}{P : X -> Set}{Sz Tz : Bwd X} -> Sz <= Tz -> All P Tz -> All P Sz
select oz <> = <>
select (os x) (vz , v) = select x vz , v
select (o' x) (vz , v) = select x vz
eval : {Tz : Bwd Ty}{T : Ty} -> Env Tz -> Tm Tz T -> Val T
eval vz (var x) with select x vz
eval vz (var x) | <> , v = v
eval vz <> = <>
eval vz [] = []
eval vz (t ,- ts) = eval vz t ,- eval vz ts
eval vz (foldr c n ts) = foldrL (eval vz c) (eval vz n) (eval vz ts)
eval vz (lam t) = \ s -> eval (vz , s) t
eval vz (f $$ s) = eval vz f (eval vz s)
append : {Tz : Bwd Ty}{T : Ty} ->
Tm Tz (list T => list T => list T)
append = lam (lam (foldr (lam (lam (var (o' (os oe)) ,- var (os oe))))
(var (os oe)) (var (o' (os oe)))))
test : Val (list one)
test = eval {[]} <> (append $$ (<> ,- []) $$ (<> ,- []))
thin : {Sz Tz : Bwd Ty} -> Sz <= Tz -> {S : Ty} -> Tm Sz S -> Tm Tz S
thin th (var x) = var (x <o< th)
thin th <> = <>
thin th [] = []
thin th (t ,- ts) = thin th t ,- thin th ts
thin th (foldr c n ts) = foldr (thin th c) (thin th n) (thin th ts)
thin th (lam t) = lam (thin (os th) t)
thin th (f $$ s) = thin th f $$ thin th s
Subst : Bwd Ty -> Bwd Ty -> Set
Subst Sz Tz = All (Tm Tz) Sz
subst : {Sz Tz : Bwd Ty} -> Subst Sz Tz -> {S : Ty} -> Tm Sz S -> Tm Tz S
subst tz (var x) with select x tz
subst tz (var x) | <> , t = t
subst tz <> = <>
subst tz [] = []
subst tz (t ,- ts) = subst tz t ,- subst tz ts
subst tz (foldr c n ts) = foldr (subst tz c) (subst tz n) (subst tz ts)
subst tz (lam t) = lam (subst (all (\ T -> thin (o' oi)) _ tz , (var (os oe))) t)
subst tz (f $$ s) = subst tz f $$ subst tz s
record Action (M : Bwd Ty -> Bwd Ty -> Set) : Set where
field
varA : forall {S Sz Tz} -> M Sz Tz -> Elem S Sz -> Tm Tz S
lamA : forall {S Sz Tz} -> M Sz Tz -> M (Sz -, S) (Tz -, S)
act : {Sz Tz : Bwd Ty} -> M Sz Tz -> {S : Ty} -> Tm Sz S -> Tm Tz S
act m (var x) = varA m x
act m <> = <>
act m [] = []
act m (t ,- ts) = act m t ,- act m ts
act m (foldr c n ts) = foldr (act m c) (act m n) (act m ts)
act m (lam t) = lam (act (lamA m) t)
act m (f $$ s) = act m f $$ act m s
THIN : Action _<=_
Action.varA THIN th x = var (x <o< th)
Action.lamA THIN = os
SUBST : Action Subst
Action.varA SUBST tz x with select x tz
... | <> , t = t
Action.lamA SUBST tz = all (\ T -> Action.act THIN (o' oi)) _ tz , (var (os oe))
-- substitution
-- thinning
-- abstr-action
|
" Securing occupation : The real meaning of the Wye River Memorandum " , New Left Review , ( 1998 ) 232 , pp. 128 – 39
|
f : (Int -> Bool) -> Int
f p = case (p 0, p 1) of
(False, False) => 0
(False, True) => 1
(True , False) => 2
(True , True) => 4
il : Int
il = f $ \x => x > 0
lc : Int
lc = f $ \case 0 => True ; _ => False
ilc : Int
ilc = f (\case 0 => True; _ => False)
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_int_add_ident_right
imports "../../Test_Base"
begin
datatype Nat = Z | S "Nat"
datatype Integer = P "Nat" | N "Nat"
definition(*fun*) zero :: "Integer" where
"zero = P Z"
fun pred :: "Nat => Nat" where
"pred (S y) = y"
fun plus2 :: "Nat => Nat => Nat" where
"plus2 (Z) y = y"
| "plus2 (S z) y = S (plus2 z y)"
fun t2 :: "Nat => Nat => Integer" where
"t2 x y =
(let fail :: Integer =
(case y of
Z => P x
| S z =>
(case x of
Z => N y
| S x2 => t2 x2 z))
in (case x of
Z =>
(case y of
Z => P Z
| S x4 => fail)
| S x3 => fail))"
fun plus :: "Integer => Integer => Integer" where
"plus (P m) (P n) = P (plus2 m n)"
| "plus (P m) (N o2) = t2 m (plus2 (S Z) o2)"
| "plus (N m2) (P n2) = t2 n2 (plus2 (S Z) m2)"
| "plus (N m2) (N n3) = N (plus2 (plus2 (S Z) m2) n3)"
theorem property0 :
"(x = (plus x zero))"
oops
end
|
[GOAL]
α : Type u_1
inst✝ : DivisionRing α
n✝ : ℤ
a : α
n : ℤ
⊢ (-a) ^ bit1 n = -a ^ bit1 n
[PROOFSTEP]
rw [zpow_bit1', zpow_bit1', neg_mul_neg, neg_mul_eq_mul_neg]
[GOAL]
α : Type u_1
inst✝ : DivisionRing α
n : ℤ
h : Odd n
a : α
⊢ (-a) ^ n = -a ^ n
[PROOFSTEP]
obtain ⟨k, rfl⟩ := h.exists_bit1
[GOAL]
case intro
α : Type u_1
inst✝ : DivisionRing α
a : α
k : ℤ
h : Odd (bit1 k)
⊢ (-a) ^ bit1 k = -a ^ bit1 k
[PROOFSTEP]
exact zpow_bit1_neg _ _
[GOAL]
α : Type u_1
inst✝ : DivisionRing α
n : ℤ
h : Odd n
⊢ (-1) ^ n = -1
[PROOFSTEP]
rw [h.neg_zpow, one_zpow]
|
lemma distr_cong_AE: assumes 1: "M = K" "sets N = sets L" and 2: "(AE x in M. f x = g x)" and "f \<in> measurable M N" and "g \<in> measurable K L" shows "distr M N f = distr K L g" |
State Before: J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
x y : (j : J) × ↑(F.obj j)
k : J
f : x.fst ⟶ k
g : y.fst ⟶ k
⊢ M.mk F x * M.mk F y = M.mk F { fst := k, snd := ↑(F.map f) x.snd * ↑(F.map g) y.snd } State After: case mk
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
y : (j : J) × ↑(F.obj j)
k : J
g : y.fst ⟶ k
j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
⊢ M.mk F { fst := j₁, snd := x } * M.mk F y =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) y.snd } Tactic: cases' x with j₁ x State Before: case mk
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
y : (j : J) × ↑(F.obj j)
k : J
g : y.fst ⟶ k
j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
⊢ M.mk F { fst := j₁, snd := x } * M.mk F y =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) y.snd } State After: case mk.mk
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
⊢ M.mk F { fst := j₁, snd := x } * M.mk F { fst := j₂, snd := y } =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd } Tactic: cases' y with j₂ y State Before: case mk.mk
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
⊢ M.mk F { fst := j₁, snd := x } * M.mk F { fst := j₂, snd := y } =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd } State After: case mk.mk.intro.intro.intro.intro
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ M.mk F { fst := j₁, snd := x } * M.mk F { fst := j₂, snd := y } =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd } Tactic: obtain ⟨s, α, β, h₁, h₂⟩ := IsFiltered.bowtie (IsFiltered.leftToMax j₁ j₂) f
(IsFiltered.rightToMax j₁ j₂) g State Before: case mk.mk.intro.intro.intro.intro
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ M.mk F { fst := j₁, snd := x } * M.mk F { fst := j₂, snd := y } =
M.mk F { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd } State After: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ∃ k_1 f_1 g_1,
↑(F.map f_1)
{ fst := IsFiltered.max { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst,
snd :=
↑(F.map (IsFiltered.leftToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₁, snd := x }.snd *
↑(F.map (IsFiltered.rightToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₂, snd := y }.snd }.snd =
↑(F.map g_1)
{ fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd }.snd Tactic: apply M.mk_eq State Before: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ∃ k_1 f_1 g_1,
↑(F.map f_1)
{ fst := IsFiltered.max { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst,
snd :=
↑(F.map (IsFiltered.leftToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₁, snd := x }.snd *
↑(F.map (IsFiltered.rightToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₂, snd := y }.snd }.snd =
↑(F.map g_1)
{ fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd }.snd State After: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α)
{ fst := IsFiltered.max { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst,
snd :=
↑(F.map (IsFiltered.leftToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₁, snd := x }.snd *
↑(F.map (IsFiltered.rightToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₂, snd := y }.snd }.snd =
↑(F.map β) { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd }.snd Tactic: use s, α, β State Before: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α)
{ fst := IsFiltered.max { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst,
snd :=
↑(F.map (IsFiltered.leftToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₁, snd := x }.snd *
↑(F.map (IsFiltered.rightToMax { fst := j₁, snd := x }.fst { fst := j₂, snd := y }.fst))
{ fst := j₂, snd := y }.snd }.snd =
↑(F.map β) { fst := k, snd := ↑(F.map f) { fst := j₁, snd := x }.snd * ↑(F.map g) { fst := j₂, snd := y }.snd }.snd State After: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α) (↑(F.map (IsFiltered.leftToMax j₁ j₂)) x * ↑(F.map (IsFiltered.rightToMax j₁ j₂)) y) =
↑(F.map β) (↑(F.map f) x * ↑(F.map g) y) Tactic: dsimp State Before: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α) (↑(F.map (IsFiltered.leftToMax j₁ j₂)) x * ↑(F.map (IsFiltered.rightToMax j₁ j₂)) y) =
↑(F.map β) (↑(F.map f) x * ↑(F.map g) y) State After: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α) (↑(F.map (IsFiltered.leftToMax j₁ j₂)) x) * ↑(F.map α) (↑(F.map (IsFiltered.rightToMax j₁ j₂)) y) =
↑(F.map β) (↑(F.map f) x) * ↑(F.map β) (↑(F.map g) y) Tactic: simp_rw [MonoidHom.map_mul] State Before: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map α) (↑(F.map (IsFiltered.leftToMax j₁ j₂)) x) * ↑(F.map α) (↑(F.map (IsFiltered.rightToMax j₁ j₂)) y) =
↑(F.map β) (↑(F.map f) x) * ↑(F.map β) (↑(F.map g) y) State After: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map (IsFiltered.leftToMax j₁ j₂) ≫ F.map α) x * ↑(F.map (IsFiltered.rightToMax j₁ j₂) ≫ F.map α) y =
↑(F.map f ≫ F.map β) x * ↑(F.map g ≫ F.map β) y Tactic: change (F.map _ ≫ F.map _) _ * (F.map _ ≫ F.map _) _ =
(F.map _ ≫ F.map _) _ * (F.map _ ≫ F.map _) _ State Before: case mk.mk.intro.intro.intro.intro.h
J : Type v
inst✝¹ : SmallCategory J
F : J ⥤ MonCat
inst✝ : IsFiltered J
k j₁ : J
x : ↑(F.obj j₁)
f : { fst := j₁, snd := x }.fst ⟶ k
j₂ : J
y : ↑(F.obj j₂)
g : { fst := j₂, snd := y }.fst ⟶ k
s : J
α : IsFiltered.max j₁ j₂ ⟶ s
β : k ⟶ s
h₁ : IsFiltered.leftToMax j₁ j₂ ≫ α = f ≫ β
h₂ : IsFiltered.rightToMax j₁ j₂ ≫ α = g ≫ β
⊢ ↑(F.map (IsFiltered.leftToMax j₁ j₂) ≫ F.map α) x * ↑(F.map (IsFiltered.rightToMax j₁ j₂) ≫ F.map α) y =
↑(F.map f ≫ F.map β) x * ↑(F.map g ≫ F.map β) y State After: no goals Tactic: simp_rw [← F.map_comp, h₁, h₂] |
Formal statement is: lemma is_unit_const_poly_iff: "[:c:] dvd 1 \<longleftrightarrow> c dvd 1" for c :: "'a::{comm_semiring_1,semiring_no_zero_divisors}" Informal statement is: A constant polynomial is a unit if and only if the constant is a unit. |
\documentclass{report}
\usepackage[latin1]{inputenc}
\usepackage[english]{babel}
\usepackage[T1]{fontenc}
\usepackage{enumitem}
\usepackage[margin=1in]{geometry}
\begin{document}
\section*{Divide-and-Conquer Disposition}
Name: Peter Debes Christensen\hfill
KU-name: [email protected] \\[\baselineskip]
%
What is Divide and Conquer
\begin{enumerate}
\item A paradigm where problems are solved recursively by applying three steps
\begin{enumerate}
\item Divide intro related subproblems of smaller size
\item Conquer the subproblems by solving them recursively
\item Combine subproblem solutions into solution for original problem
\end{enumerate}
\end{enumerate}
Why is it usefull
\begin{enumerate}
\item Solve recurrence equations eg. $T(n) = aT(n/b) + f(n)$
\end{enumerate}
How to determine asymptotic bounds, three methods, will only go into one.
\begin{enumerate}
\item Recursion tree method
\item Substitution method
\item Master method
\end{enumerate}
Draw a recursion tree for Merge-Sort, guess a upper bound from the drawing and show by substitution method that it is correct.
\end{document} |
module BSTree {A : Set}(_≤_ : A → A → Set) where
open import BTree {A}
data _⊴*_ : A → BTree → Set where
gelf : {x : A}
→ x ⊴* leaf
gend : {x y : A}{l r : BTree}
→ x ≤ y
→ x ⊴* l
→ x ⊴* (node y l r)
data _*⊴_ : BTree → A → Set where
lelf : {x : A}
→ leaf *⊴ x
lend : {x y : A}{l r : BTree}
→ y ≤ x
→ r *⊴ x
→ (node y l r) *⊴ x
data BSTree : BTree → Set where
slf : BSTree leaf
snd : {x : A}{l r : BTree}
→ BSTree l
→ BSTree r
→ l *⊴ x
→ x ⊴* r
→ BSTree (node x l r)
|
(*
(C) Copyright Andreas Viktor Hess, DTU, 2018-2020
(C) Copyright Sebastian A. Mödersheim, DTU, 2018-2020
(C) Copyright Achim D. Brucker, University of Sheffield, 2018-2020
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*)
(* Title: Labeled_Strands.thy
Author: Andreas Viktor Hess, DTU
Author: Sebastian A. Mödersheim, DTU
Author: Achim D. Brucker, The University of Sheffield
*)
section \<open>Labeled Strands\<close>
theory Labeled_Strands
imports Strands_and_Constraints
begin
subsection \<open>Definitions: Labeled Strands and Constraints\<close>
datatype 'l strand_label =
LabelN (the_LabelN: "'l") ("ln _")
| LabelS ("\<star>")
text \<open>Labeled strands are strands whose steps are equipped with labels\<close>
type_synonym ('a,'b,'c) labeled_strand_step = "'c strand_label \<times> ('a,'b) strand_step"
type_synonym ('a,'b,'c) labeled_strand = "('a,'b,'c) labeled_strand_step list"
abbreviation is_LabelN where "is_LabelN n x \<equiv> fst x = ln n"
abbreviation is_LabelS where "is_LabelS x \<equiv> fst x = \<star>"
definition unlabel where "unlabel S \<equiv> map snd S"
definition proj where "proj n S \<equiv> filter (\<lambda>s. is_LabelN n s \<or> is_LabelS s) S"
abbreviation proj_unl where "proj_unl n S \<equiv> unlabel (proj n S)"
abbreviation wfrestrictedvars\<^sub>l\<^sub>s\<^sub>t where "wfrestrictedvars\<^sub>l\<^sub>s\<^sub>t S \<equiv> wfrestrictedvars\<^sub>s\<^sub>t (unlabel S)"
abbreviation subst_apply_labeled_strand_step (infix "\<cdot>\<^sub>l\<^sub>s\<^sub>t\<^sub>p" 51) where
"x \<cdot>\<^sub>l\<^sub>s\<^sub>t\<^sub>p \<theta> \<equiv> (case x of (l, s) \<Rightarrow> (l, s \<cdot>\<^sub>s\<^sub>t\<^sub>p \<theta>))"
abbreviation subst_apply_labeled_strand (infix "\<cdot>\<^sub>l\<^sub>s\<^sub>t" 51) where
"S \<cdot>\<^sub>l\<^sub>s\<^sub>t \<theta> \<equiv> map (\<lambda>x. x \<cdot>\<^sub>l\<^sub>s\<^sub>t\<^sub>p \<theta>) S"
abbreviation trms\<^sub>l\<^sub>s\<^sub>t where "trms\<^sub>l\<^sub>s\<^sub>t S \<equiv> trms\<^sub>s\<^sub>t (unlabel S)"
abbreviation trms_proj\<^sub>l\<^sub>s\<^sub>t where "trms_proj\<^sub>l\<^sub>s\<^sub>t n S \<equiv> trms\<^sub>s\<^sub>t (proj_unl n S)"
abbreviation vars\<^sub>l\<^sub>s\<^sub>t where "vars\<^sub>l\<^sub>s\<^sub>t S \<equiv> vars\<^sub>s\<^sub>t (unlabel S)"
abbreviation vars_proj\<^sub>l\<^sub>s\<^sub>t where "vars_proj\<^sub>l\<^sub>s\<^sub>t n S \<equiv> vars\<^sub>s\<^sub>t (proj_unl n S)"
abbreviation bvars\<^sub>l\<^sub>s\<^sub>t where "bvars\<^sub>l\<^sub>s\<^sub>t S \<equiv> bvars\<^sub>s\<^sub>t (unlabel S)"
abbreviation fv\<^sub>l\<^sub>s\<^sub>t where "fv\<^sub>l\<^sub>s\<^sub>t S \<equiv> fv\<^sub>s\<^sub>t (unlabel S)"
abbreviation wf\<^sub>l\<^sub>s\<^sub>t where "wf\<^sub>l\<^sub>s\<^sub>t V S \<equiv> wf\<^sub>s\<^sub>t V (unlabel S)"
subsection \<open>Lemmata: Projections\<close>
lemma is_LabelS_proj_iff_not_is_LabelN:
"list_all is_LabelS (proj l A) \<longleftrightarrow> \<not>list_ex (is_LabelN l) A"
by (induct A) (auto simp add: proj_def)
lemma proj_subset_if_no_label:
assumes "\<not>list_ex (is_LabelN l) A"
shows "set (proj l A) \<subseteq> set (proj l' A)"
and "set (proj_unl l A) \<subseteq> set (proj_unl l' A)"
using assms by (induct A) (auto simp add: unlabel_def proj_def)
lemma proj_in_setD:
assumes a: "a \<in> set (proj l A)"
obtains k b where "a = (k, b)" "k = (ln l) \<or> k = \<star>"
using that a unfolding proj_def by (cases a) auto
lemma proj_set_mono:
assumes "set A \<subseteq> set B"
shows "set (proj n A) \<subseteq> set (proj n B)"
and "set (proj_unl n A) \<subseteq> set (proj_unl n B)"
using assms unfolding proj_def unlabel_def by auto
lemma unlabel_nil[simp]: "unlabel [] = []"
by (simp add: unlabel_def)
lemma unlabel_mono: "set A \<subseteq> set B \<Longrightarrow> set (unlabel A) \<subseteq> set (unlabel B)"
by (auto simp add: unlabel_def)
lemma unlabel_in: "(l,x) \<in> set A \<Longrightarrow> x \<in> set (unlabel A)"
unfolding unlabel_def by force
lemma unlabel_mem_has_label: "x \<in> set (unlabel A) \<Longrightarrow> \<exists>l. (l,x) \<in> set A"
unfolding unlabel_def by auto
lemma proj_nil[simp]: "proj n [] = []" "proj_unl n [] = []"
unfolding unlabel_def proj_def by auto
lemma singleton_lst_proj[simp]:
"proj_unl l [(ln l, a)] = [a]"
"l \<noteq> l' \<Longrightarrow> proj_unl l' [(ln l, a)] = []"
"proj_unl l [(\<star>, a)] = [a]"
"unlabel [(l'', a)] = [a]"
unfolding proj_def unlabel_def by simp_all
lemma unlabel_nil_only_if_nil[simp]: "unlabel A = [] \<Longrightarrow> A = []"
unfolding unlabel_def by auto
lemma unlabel_Cons[simp]:
"unlabel ((l,a)#A) = a#unlabel A"
"unlabel (b#A) = snd b#unlabel A"
unfolding unlabel_def by simp_all
lemma unlabel_append[simp]: "unlabel (A@B) = unlabel A@unlabel B"
unfolding unlabel_def by auto
lemma proj_Cons[simp]:
"proj n ((ln n,a)#A) = (ln n,a)#proj n A"
"proj n ((\<star>,a)#A) = (\<star>,a)#proj n A"
"m \<noteq> n \<Longrightarrow> proj n ((ln m,a)#A) = proj n A"
"l = (ln n) \<Longrightarrow> proj n ((l,a)#A) = (l,a)#proj n A"
"l = \<star> \<Longrightarrow> proj n ((l,a)#A) = (l,a)#proj n A"
"fst b \<noteq> \<star> \<Longrightarrow> fst b \<noteq> (ln n) \<Longrightarrow> proj n (b#A) = proj n A"
unfolding proj_def by auto
lemma proj_append[simp]:
"proj l (A'@B') = proj l A'@proj l B'"
"proj_unl l (A@B) = proj_unl l A@proj_unl l B"
unfolding proj_def unlabel_def by auto
lemma proj_unl_cons[simp]:
"proj_unl l ((ln l, a)#A) = a#proj_unl l A"
"l \<noteq> l' \<Longrightarrow> proj_unl l' ((ln l, a)#A) = proj_unl l' A"
"proj_unl l ((\<star>, a)#A) = a#proj_unl l A"
unfolding proj_def unlabel_def by simp_all
lemma trms_unlabel_proj[simp]:
"trms\<^sub>s\<^sub>t\<^sub>p (snd (ln l, x)) \<subseteq> trms_proj\<^sub>l\<^sub>s\<^sub>t l [(ln l, x)]"
by auto
lemma trms_unlabel_star[simp]:
"trms\<^sub>s\<^sub>t\<^sub>p (snd (\<star>, x)) \<subseteq> trms_proj\<^sub>l\<^sub>s\<^sub>t l [(\<star>, x)]"
by auto
lemma trms\<^sub>l\<^sub>s\<^sub>t_union[simp]: "trms\<^sub>l\<^sub>s\<^sub>t A = (\<Union>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l A)"
proof (induction A)
case (Cons a A)
obtain l s where ls: "a = (l,s)" by moura
have "trms\<^sub>l\<^sub>s\<^sub>t [a] = (\<Union>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l [a])"
proof -
have *: "trms\<^sub>l\<^sub>s\<^sub>t [a] = trms\<^sub>s\<^sub>t\<^sub>p s" using ls by simp
show ?thesis
proof (cases l)
case (LabelN n)
hence "trms_proj\<^sub>l\<^sub>s\<^sub>t n [a] = trms\<^sub>s\<^sub>t\<^sub>p s" using ls by simp
moreover have "\<forall>m. n \<noteq> m \<longrightarrow> trms_proj\<^sub>l\<^sub>s\<^sub>t m [a] = {}" using ls LabelN by auto
ultimately show ?thesis using * ls by fastforce
next
case LabelS
hence "\<forall>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l [a] = trms\<^sub>s\<^sub>t\<^sub>p s" using ls by auto
thus ?thesis using * ls by fastforce
qed
qed
moreover have "\<forall>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l (a#A) = trms_proj\<^sub>l\<^sub>s\<^sub>t l [a] \<union> trms_proj\<^sub>l\<^sub>s\<^sub>t l A"
unfolding unlabel_def proj_def by auto
hence "(\<Union>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l (a#A)) = (\<Union>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l [a]) \<union> (\<Union>l. trms_proj\<^sub>l\<^sub>s\<^sub>t l A)" by auto
ultimately show ?case using Cons.IH ls by auto
qed simp
lemma trms\<^sub>l\<^sub>s\<^sub>t_append[simp]: "trms\<^sub>l\<^sub>s\<^sub>t (A@B) = trms\<^sub>l\<^sub>s\<^sub>t A \<union> trms\<^sub>l\<^sub>s\<^sub>t B"
by (metis trms\<^sub>s\<^sub>t_append unlabel_append)
lemma trms_proj\<^sub>l\<^sub>s\<^sub>t_append[simp]: "trms_proj\<^sub>l\<^sub>s\<^sub>t l (A@B) = trms_proj\<^sub>l\<^sub>s\<^sub>t l A \<union> trms_proj\<^sub>l\<^sub>s\<^sub>t l B"
by (metis (no_types, lifting) filter_append proj_def trms\<^sub>l\<^sub>s\<^sub>t_append)
lemma trms_proj\<^sub>l\<^sub>s\<^sub>t_subset[simp]:
"trms_proj\<^sub>l\<^sub>s\<^sub>t l A \<subseteq> trms_proj\<^sub>l\<^sub>s\<^sub>t l (A@B)"
"trms_proj\<^sub>l\<^sub>s\<^sub>t l B \<subseteq> trms_proj\<^sub>l\<^sub>s\<^sub>t l (A@B)"
using trms_proj\<^sub>l\<^sub>s\<^sub>t_append[of l] by blast+
lemma trms\<^sub>l\<^sub>s\<^sub>t_subset[simp]:
"trms\<^sub>l\<^sub>s\<^sub>t A \<subseteq> trms\<^sub>l\<^sub>s\<^sub>t (A@B)"
"trms\<^sub>l\<^sub>s\<^sub>t B \<subseteq> trms\<^sub>l\<^sub>s\<^sub>t (A@B)"
proof (induction A)
case (Cons a A)
obtain l s where *: "a = (l,s)" by moura
{ case 1 thus ?case using Cons * by auto }
{ case 2 thus ?case using Cons * by auto }
qed simp_all
lemma vars\<^sub>l\<^sub>s\<^sub>t_union: "vars\<^sub>l\<^sub>s\<^sub>t A = (\<Union>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l A)"
proof (induction A)
case (Cons a A)
obtain l s where ls: "a = (l,s)" by moura
have "vars\<^sub>l\<^sub>s\<^sub>t [a] = (\<Union>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l [a])"
proof -
have *: "vars\<^sub>l\<^sub>s\<^sub>t [a] = vars\<^sub>s\<^sub>t\<^sub>p s" using ls by auto
show ?thesis
proof (cases l)
case (LabelN n)
hence "vars_proj\<^sub>l\<^sub>s\<^sub>t n [a] = vars\<^sub>s\<^sub>t\<^sub>p s" using ls by simp
moreover have "\<forall>m. n \<noteq> m \<longrightarrow> vars_proj\<^sub>l\<^sub>s\<^sub>t m [a] = {}" using ls LabelN by auto
ultimately show ?thesis using * ls by fast
next
case LabelS
hence "\<forall>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l [a] = vars\<^sub>s\<^sub>t\<^sub>p s" using ls by auto
thus ?thesis using * ls by fast
qed
qed
moreover have "\<forall>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l (a#A) = vars_proj\<^sub>l\<^sub>s\<^sub>t l [a] \<union> vars_proj\<^sub>l\<^sub>s\<^sub>t l A"
unfolding unlabel_def proj_def by auto
hence "(\<Union>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l (a#A)) = (\<Union>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l [a]) \<union> (\<Union>l. vars_proj\<^sub>l\<^sub>s\<^sub>t l A)"
using strand_vars_split(1) by auto
ultimately show ?case using Cons.IH ls strand_vars_split(1) by auto
qed simp
lemma unlabel_Cons_inv:
"unlabel A = b#B \<Longrightarrow> \<exists>A'. (\<exists>n. A = (ln n, b)#A') \<or> A = (\<star>, b)#A'"
proof -
assume *: "unlabel A = b#B"
then obtain l A' where "A = (l,b)#A'" unfolding unlabel_def by moura
thus "\<exists>A'. (\<exists>l. A = (ln l, b)#A') \<or> A = (\<star>, b)#A'" by (metis strand_label.exhaust)
qed
lemma unlabel_snoc_inv:
"unlabel A = B@[b] \<Longrightarrow> \<exists>A'. (\<exists>n. A = A'@[(ln n, b)]) \<or> A = A'@[(\<star>, b)]"
proof -
assume *: "unlabel A = B@[b]"
then obtain A' l where "A = A'@[(l,b)]"
unfolding unlabel_def by (induct A rule: List.rev_induct) auto
thus "\<exists>A'. (\<exists>n. A = A'@[(ln n, b)]) \<or> A = A'@[(\<star>, b)]" by (cases l) auto
qed
lemma proj_idem[simp]: "proj l (proj l A) = proj l A"
unfolding proj_def by auto
lemma proj_ik\<^sub>s\<^sub>t_is_proj_rcv_set:
"ik\<^sub>s\<^sub>t (proj_unl n A) = {t. (ln n, Receive t) \<in> set A \<or> (\<star>, Receive t) \<in> set A} "
using ik\<^sub>s\<^sub>t_is_rcv_set unfolding unlabel_def proj_def by force
lemma unlabel_ik\<^sub>s\<^sub>t_is_rcv_set:
"ik\<^sub>s\<^sub>t (unlabel A) = {t | l t. (l, Receive t) \<in> set A}"
using ik\<^sub>s\<^sub>t_is_rcv_set unfolding unlabel_def by force
lemma proj_ik_union_is_unlabel_ik:
"ik\<^sub>s\<^sub>t (unlabel A) = (\<Union>l. ik\<^sub>s\<^sub>t (proj_unl l A))"
proof
show "(\<Union>l. ik\<^sub>s\<^sub>t (proj_unl l A)) \<subseteq> ik\<^sub>s\<^sub>t (unlabel A)"
using unlabel_ik\<^sub>s\<^sub>t_is_rcv_set[of A] proj_ik\<^sub>s\<^sub>t_is_proj_rcv_set[of _ A] by auto
show "ik\<^sub>s\<^sub>t (unlabel A) \<subseteq> (\<Union>l. ik\<^sub>s\<^sub>t (proj_unl l A))"
proof
fix t assume "t \<in> ik\<^sub>s\<^sub>t (unlabel A)"
then obtain l where "(l, Receive t) \<in> set A"
using ik\<^sub>s\<^sub>t_is_rcv_set unlabel_mem_has_label[of _ A]
by moura
thus "t \<in> (\<Union>l. ik\<^sub>s\<^sub>t (proj_unl l A))" using proj_ik\<^sub>s\<^sub>t_is_proj_rcv_set[of _ A] by (cases l) auto
qed
qed
lemma proj_ik_append[simp]:
"ik\<^sub>s\<^sub>t (proj_unl l (A@B)) = ik\<^sub>s\<^sub>t (proj_unl l A) \<union> ik\<^sub>s\<^sub>t (proj_unl l B)"
using proj_append(2)[of l A B] ik_append by auto
lemma proj_ik_append_subst_all:
"ik\<^sub>s\<^sub>t (proj_unl l (A@B)) \<cdot>\<^sub>s\<^sub>e\<^sub>t I = (ik\<^sub>s\<^sub>t (proj_unl l A) \<cdot>\<^sub>s\<^sub>e\<^sub>t I) \<union> (ik\<^sub>s\<^sub>t (proj_unl l B) \<cdot>\<^sub>s\<^sub>e\<^sub>t I)"
using proj_ik_append[of l] by auto
lemma ik_proj_subset[simp]: "ik\<^sub>s\<^sub>t (proj_unl n A) \<subseteq> trms_proj\<^sub>l\<^sub>s\<^sub>t n A"
by auto
lemma prefix_proj:
"prefix A B \<Longrightarrow> prefix (unlabel A) (unlabel B)"
"prefix A B \<Longrightarrow> prefix (proj n A) (proj n B)"
"prefix A B \<Longrightarrow> prefix (proj_unl n A) (proj_unl n B)"
unfolding prefix_def unlabel_def proj_def by auto
subsection \<open>Lemmata: Well-formedness\<close>
lemma wfvarsoccs\<^sub>s\<^sub>t_proj_union:
"wfvarsoccs\<^sub>s\<^sub>t (unlabel A) = (\<Union>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l A))"
proof (induction A)
case (Cons a A)
obtain l s where ls: "a = (l,s)" by moura
have "wfvarsoccs\<^sub>s\<^sub>t (unlabel [a]) = (\<Union>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l [a]))"
proof -
have *: "wfvarsoccs\<^sub>s\<^sub>t (unlabel [a]) = wfvarsoccs\<^sub>s\<^sub>t\<^sub>p s" using ls by auto
show ?thesis
proof (cases l)
case (LabelN n)
hence "wfvarsoccs\<^sub>s\<^sub>t (proj_unl n [a]) = wfvarsoccs\<^sub>s\<^sub>t\<^sub>p s" using ls by simp
moreover have "\<forall>m. n \<noteq> m \<longrightarrow> wfvarsoccs\<^sub>s\<^sub>t (proj_unl m [a]) = {}" using ls LabelN by auto
ultimately show ?thesis using * ls by fast
next
case LabelS
hence "\<forall>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l [a]) = wfvarsoccs\<^sub>s\<^sub>t\<^sub>p s" using ls by auto
thus ?thesis using * ls by fast
qed
qed
moreover have
"wfvarsoccs\<^sub>s\<^sub>t (proj_unl l (a#A)) =
wfvarsoccs\<^sub>s\<^sub>t (proj_unl l [a]) \<union> wfvarsoccs\<^sub>s\<^sub>t (proj_unl l A)"
for l
unfolding unlabel_def proj_def by auto
hence "(\<Union>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l (a#A))) =
(\<Union>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l [a])) \<union> (\<Union>l. wfvarsoccs\<^sub>s\<^sub>t (proj_unl l A))"
using strand_vars_split(1) by auto
ultimately show ?case using Cons.IH ls strand_vars_split(1) by auto
qed simp
lemma wf_if_wf_proj:
assumes "\<forall>l. wf\<^sub>s\<^sub>t V (proj_unl l A)"
shows "wf\<^sub>s\<^sub>t V (unlabel A)"
using assms
proof (induction A arbitrary: V rule: List.rev_induct)
case (snoc a A)
hence IH: "wf\<^sub>s\<^sub>t V (unlabel A)" using proj_append(2)[of _ A] by auto
obtain b l where b: "a = (ln l, b) \<or> a = (\<star>, b)" by (cases a, metis strand_label.exhaust)
hence *: "wf\<^sub>s\<^sub>t V (proj_unl l A@[b])"
by (metis snoc.prems proj_append(2) singleton_lst_proj(1) proj_unl_cons(1,3))
thus ?case using IH b snoc.prems proj_append(2)[of l A "[a]"] unlabel_append[of A "[a]"]
proof (cases b)
case (Receive t)
have "fv t \<subseteq> wfvarsoccs\<^sub>s\<^sub>t (unlabel A) \<union> V"
proof
fix x assume "x \<in> fv t"
hence "x \<in> V \<union> wfvarsoccs\<^sub>s\<^sub>t (proj_unl l A)" using wf_append_exec[OF *] b Receive by auto
thus "x \<in> wfvarsoccs\<^sub>s\<^sub>t (unlabel A) \<union> V" using wfvarsoccs\<^sub>s\<^sub>t_proj_union[of A] by auto
qed
hence "fv t \<subseteq> wfrestrictedvars\<^sub>s\<^sub>t (unlabel A) \<union> V"
using vars_snd_rcv_strand_subset2(4)[of "unlabel A"] by blast
hence "wf\<^sub>s\<^sub>t V (unlabel A@[Receive t])" by (rule wf_rcv_append'''[OF IH])
thus ?thesis using b Receive unlabel_append[of A "[a]"] by auto
next
case (Equality ac s t)
have "fv t \<subseteq> wfvarsoccs\<^sub>s\<^sub>t (unlabel A) \<union> V" when "ac = Assign"
proof
fix x assume "x \<in> fv t"
hence "x \<in> V \<union> wfvarsoccs\<^sub>s\<^sub>t (proj_unl l A)" using wf_append_exec[OF *] b Equality that by auto
thus "x \<in> wfvarsoccs\<^sub>s\<^sub>t (unlabel A) \<union> V" using wfvarsoccs\<^sub>s\<^sub>t_proj_union[of A] by auto
qed
hence "fv t \<subseteq> wfrestrictedvars\<^sub>l\<^sub>s\<^sub>t A \<union> V" when "ac = Assign"
using vars_snd_rcv_strand_subset2(4)[of "unlabel A"] that by blast
hence "wf\<^sub>s\<^sub>t V (unlabel A@[Equality ac s t])"
by (cases ac) (metis wf_eq_append'''[OF IH], metis wf_eq_check_append''[OF IH])
thus ?thesis using b Equality unlabel_append[of A "[a]"] by auto
qed auto
qed simp
end
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_sort_StoogeSort2IsSort
imports "../../Test_Base"
begin
datatype ('a, 'b) pair = pair2 "'a" "'b"
datatype 'a list = nil2 | cons2 "'a" "'a list"
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun take :: "int => 'a list => 'a list" where
"take y z =
(if y <= 0 then nil2 else
(case z of
nil2 => nil2
| cons2 z2 xs => cons2 z2 (take (y - 1) xs)))"
fun sort2 :: "int => int => int list" where
"sort2 y z =
(if y <= z then cons2 y (cons2 z (nil2)) else
cons2 z (cons2 y (nil2)))"
fun length :: "'a list => int" where
"length (nil2) = 0"
| "length (cons2 z l) = 1 + (length l)"
fun insert :: "int => int list => int list" where
"insert y (nil2) = cons2 y (nil2)"
| "insert y (cons2 z2 xs) =
(if y <= z2 then cons2 y (cons2 z2 xs) else
cons2 z2 (insert y xs))"
fun isort :: "int list => int list" where
"isort (nil2) = nil2"
| "isort (cons2 z xs) = insert z (isort xs)"
fun drop :: "int => 'a list => 'a list" where
"drop y z =
(if y <= 0 then z else
(case z of
nil2 => nil2
| cons2 z2 xs1 => drop (y - 1) xs1))"
fun splitAt :: "int => 'a list =>
(('a list), ('a list)) pair" where
"splitAt y z = pair2 (take y z) (drop y z)"
function stooge2sort2 :: "int list => int list"
and stoogesort2 :: "int list => int list"
and stooge2sort1 :: "int list => int list" where
"stooge2sort2 y =
(case splitAt ((op div) ((2 * (length y)) + 1) 3) y of
pair2 ys2 zs1 => x (stoogesort2 ys2) zs1)"
| "stoogesort2 (nil2) = nil2"
| "stoogesort2 (cons2 z (nil2)) = cons2 z (nil2)"
| "stoogesort2 (cons2 z (cons2 y2 (nil2))) = sort2 z y2"
| "stoogesort2 (cons2 z (cons2 y2 (cons2 x3 x4))) =
stooge2sort2
(stooge2sort1 (stooge2sort2 (cons2 z (cons2 y2 (cons2 x3 x4)))))"
| "stooge2sort1 y =
(case splitAt ((op div) (length y) 3) y of
pair2 ys2 zs1 => x ys2 (stoogesort2 zs1))"
by pat_completeness auto
theorem property0 :
"((stoogesort2 xs) = (isort xs))"
oops
end
|
Formal statement is: lemma uniformly_continuous_on_def: fixes f :: "'a::metric_space \<Rightarrow> 'b::metric_space" shows "uniformly_continuous_on s f \<longleftrightarrow> (\<forall>e>0. \<exists>d>0. \<forall>x\<in>s. \<forall>x'\<in>s. dist x' x < d \<longrightarrow> dist (f x') (f x) < e)" Informal statement is: A function $f$ is uniformly continuous on a set $S$ if and only if for every $\epsilon > 0$, there exists a $\delta > 0$ such that for all $x, x' \in S$, if $|x - x'| < \delta$, then $|f(x) - f(x')| < \epsilon$. |
#include <boost/tokenizer.hpp>
#include "lconf.h"
using namespace std;
using namespace boost;
luaConf::luaConf()
{
L = luaL_newstate();
luaopen_base(L); // do we really need these?
luaopen_math(L); // maybe not?
luaopen_string(L); // but maybe yes?
}
luaConf::~luaConf()
{
lua_close(L);
}
const char *luaConf::name()
{
return "luaConf";
}
bool luaConf::loadConf(const char *conf) // returns true on success
{
if (luaL_loadfile(L, conf) != 0) {
goto error;
}
if (lua_pcall(L, 0, LUA_MULTRET, 0) != 0) {
goto error;
}
printf("%s: loaded %s\n", name(), conf);
return true;
error:
printf("%s: error %s\n", name(), lua_tostring(L, -1));
lua_pop(L, 1); // pop error message from stack
return false;
}
// output in varValue
// returns false if there is an error
bool luaConf::getString(string varName, string &varValue)
{
int stack = 0;
char_separator<char> sep(".");
tokenizer<char_separator<char>> tokens(varName, sep);
auto t = tokens.begin();
lua_getglobal(L, (*t).c_str()); // object to stack @ -1
stack++;
while (true) {
if (lua_isstring(L, -1)) {
varValue = lua_tostring(L, -1);
break;
} else if (lua_istable(L, -1)) {
lua_pushnil(L); // nil key
if (lua_next(L, -2)) { // puts k,v on stack
lua_pop(L, 2); // pop off v
t++;
if (t != tokens.end()) {
lua_getfield(L, -1, (*t).c_str());
stack++;
}
} else {
goto error;
}
} else {
goto error;
}
}
lua_pop(L, stack);
#ifdef DEBUG
printf("%s: loaded %s\n", name(), varName.c_str());
#endif
return true;
error:
lua_pop(L, stack);
printf("%s: variable '%s' is empty or does not exist\n",
name(), varName.c_str());
return false;
}
// returns false if there is an error
bool luaConf::getBool(string varName)
{
int stack = 0;
bool b;
char_separator<char> sep(".");
tokenizer<char_separator<char>> tokens(varName, sep);
auto t = tokens.begin();
lua_getglobal(L, (*t).c_str()); // object to stack @ -1
stack++;
while (true) {
if (lua_isboolean(L, -1)) {
b = lua_toboolean(L, -1) != 0; // convert to bool (really)
break;
} else if (lua_istable(L, -1)) {
lua_pushnil(L); // nil key
if (lua_next(L, -2)) {
lua_pop(L, 2); // pop off k,v
t++;
if (t != tokens.end()) {
lua_getfield(L, -1, (*t).c_str());
stack++;
}
} else {
goto error;
}
} else {
goto error;
}
}
lua_pop(L, stack);
#ifdef DEBUG
printf("%s: loaded %s\n", name(), varName.c_str());
#endif
return b;
error:
lua_pop(L, stack);
printf("%s: variable '%s' is empty or does not exist\n",
name(), varName.c_str());
return false;
}
|
For $ 20 @,@ 000 , made up of gambling winnings , his girlfriend 's credit card , and money his father set aside for him for college , Anderson made Cigarettes & Coffee ( 1993 ) , a short film connecting multiple story lines with a twenty @-@ dollar bill . The film was screened at the 1993 Sundance Festival Shorts Program . He decided to expand the film into a feature @-@ length film and was subsequently invited to the 1994 Sundance Feature Film Program . At the Sundance Feature Film Program , Michael Caton @-@ Jones served as Anderson 's mentor ; he saw Anderson as someone with " talent and a fully formed creative voice but not much hands @-@ on experience " and gave him some hard and practical lessons .
|
SUBROUTINE conta3(itask,dtcal,ttime,iwrit,velnp,maxve)
! main contac routine (ALGORITHM 3)
USE ctrl_db, ONLY: bottom,top ,tdtime
USE npo_db, ONLY : coord,emass,fcont,coorb,coort
USE cont3_db
USE gvar_db, ONLY : static,updiv
IMPLICIT NONE
! variable for statistical record
INTEGER (kind=4), SAVE :: nn(41,3)
REAL (kind=8) :: td
! Dummy arguments
!task to perform
CHARACTER(len=*),INTENT(IN):: itask
INTEGER(kind=4),INTENT(IN) :: iwrit
INTEGER(kind=4),INTENT(IN), OPTIONAL :: maxve
REAL(kind=8),INTENT(IN),OPTIONAL ::dtcal,ttime,velnp(:,:)
! Local variables
TYPE (pair3_db), POINTER :: pair
INTEGER (kind=4) :: ipair,i
REAL (kind=8) :: disma, dtime, auxil
REAL (kind=8), SAVE :: timpr = 0d0
!.... PERFORM SELECT TASK
SELECT CASE (TRIM(itask))
CASE ('NEW','NSTRA0','NSTRA1','NSTRA2') !Read Input Data
!from npo_db INTENT(IN) :: npoin,coord,label,oldlb
! INTENT(IN OUT) :: bottom,top,emass
nn = 0 ! extended check statistics
! .. Initializes data base and input element DATA
CALL cinpu3(maxve,iwrit,npoin,coord,top,bottom)
CASE ('LUMASS')
! compute surface mass
CALL surms3(emass,coord)
CASE ('FORCES') !Performs contact search and computes contact forces
!from npo_db INTENT(IN) :: npoin,coora,coorb,coort,label,emass
! INTENT(IN OUT) :: fcont
! .... compute maximum displacement increment possible
td = tdtime
IF( td == 0 ) td = 1d0 !to avoid an error due to initial plastic work
disma = ABS( MAXVAL(velnp) - MINVAL(velnp) )
disma = 11.d0*disma * dtcal * SQRT(3d0) !maximum increment
IF( ctime < 0d0 )ctime = dtcal !first step only
dtime = ctime !contact dtime from database
IF(dtime == 0d0)dtime = dtcal !computation time
! .... Perform Contact Searching & Compute contact forces
IF( static .AND. updiv)THEN !initializes values
pair => headp
DO ipair=1,npair !for each pair
IF( pair%press ) pair%presn = 0d0
pair => pair%next
END DO
surtf = 0d0 !initializes for next period
timpr = 0d0
END IF
CALL celmn3(ttime,dtime,td,disma,coora,emass,fcont,coorb,coort)
timpr = timpr + dtime !increase elapsed time
CASE ('DUMPIN') !write data to a re-start file
CALL cdump3(npoin)
! WRITE(55,"(i5,3i20)",ERR=9999)(i,nn(i,1:3),i=1,41) !statistic to debug file
CASE ('RESTAR') !read data from a re-start file
!from npo_db INTENT(IN) :: bottom,top
CALL crest3(npoin)
ALLOCATE ( surtf(3,npair) ) !get memory for total forces
surtf = 0d0 !initializes
CASE ('UPDLON') !Modifies internal numeration
CALL cupdl3( )
CASE ('OUTDY1') ! for History
!Writes contact forces between pairs
!Initializes average nodal forces also
dtime = ctime !contact dtime from database
IF(dtime == 0d0)dtime = dtcal !computation time
WRITE(41,ERR=9999) ttime !control variable
pair => headp
DO ipair=1,npair !for each pair
IF(timpr > 0d0 )THEN
!average contact forces on surfaces
WRITE(41,ERR=9999) surtf(1:3,ipair)/timpr
!initializes average contact forces on Nodes
IF( pair%press ) pair%presn = pair%presn/(timpr+dtime)*dtime
ELSE
WRITE(41,ERR=9999) surtf(1:3,ipair) !average contact forces
END IF
pair => pair%next
END DO
surtf = 0d0 !initializes for next period
timpr = 0d0 !initializes elapsed time
CASE ('OUTDY2') ! for STP
!Writes contact forces and gaps for nodes
dtime = ctime !contact dtime from database
IF(dtime == 0d0)dtime = dtcal !computation time
auxil = timpr+dtime
IF( auxil == 0d0 ) auxil = 1d0
pair => headp
DO ipair=1,npair !for each pair
IF( pair%press ) WRITE(44,ERR=9999) (pair%presn(i)/auxil,i=1,pair%ncnod)
IF( pair%wrink ) WRITE(44,ERR=9999) (pair%rssdb(3,i),i=1,pair%ncnod)
IF( pair%wrink ) WRITE(44,ERR=9999) (pair%mingp(i),i=1,pair%ncnod)
pair => pair%next
END DO
IF( wear ) WRITE(45,ERR=9999) (wwear(i),i=1,npoin)
CASE ('INITIA') !Initial penetrations
! .... Perform Contact Searching & Output a report
CALL celmn3p(ttime,coora,coorb,coort)
CASE ('WRTSUR') !Writes contact surfaces for Post-processing
! OPEN files for postprocess, and detect surface to use
CALL prsur3(iwrit)
END SELECT
RETURN
9999 CALL runen2('')
END SUBROUTINE conta3
|
Win 1 of 5 Timbuk2 prize bundles worth £200!
Timbuk2 is a San Francisco original brand. Founded in a San Francisco garage in 1989 by a bike messenger named Rob Honeycutt, Timbuk2 have manufactured in the city’s Mission District ever since. Rob made products that served his needs as a messenger. They needed to be quick to access, comfortable for all day wear, super robust and obviously have the San Francisco flair. Rob had a huge drive, a knack for branding, and an obsession with just-in-time manufacturing.
Timbuk2 brought customisation to the masses. Selling custom messengers to bike shops across the USA in the 90s and launching the first online customiser in 1999. This technology will be landing on the UK shores in 2013. The Timbuk2 product range has grown to include luggage, laptop bags and accessories but the made in San Francisco custom messenger bag remains at the heart and soul of the company.
Five lucky winners will bag themselves a Timbuk2 prize bundle worth £200 that includes a messenger bag, seat pack, phone wallet and backpack. Overall that's £1,000 worth of gear up for grabs! All you have to do is answer the question below!
Closing date for entries is Friday 29th November 2013. Entries after midnight on 29/11/13 will not be accepted. The winners will be the first five correct answers drawn after the closing date. The winners will be notified by email. Entrants email addresses will be passed to Timbuk2 for marketing purposes, you will be able to opt out of any future marketing from Timbuk2 should you wish. There is no cash alternative to this prize. Only one entry per person will go into the final draw. Cyclescheme reserves the right to amend the rules at any time. |
(* Title: HOL/MicroJava/JVM/JVMListExample.thy
Author: Stefan Berghofer
*)
section \<open>Example for generating executable code from JVM semantics \label{sec:JVMListExample}\<close>
theory JVMListExample
imports "../J/SystemClasses" JVMExec
begin
text \<open>
Since the types @{typ cnam}, \<open>vnam\<close>, and \<open>mname\<close> are
anonymous, we describe distinctness of names in the example by axioms:
\<close>
axiomatization list_nam test_nam :: cnam
where distinct_classes: "list_nam \<noteq> test_nam"
axiomatization append_name makelist_name :: mname
where distinct_methods: "append_name \<noteq> makelist_name"
axiomatization val_nam next_nam :: vnam
where distinct_fields: "val_nam \<noteq> next_nam"
axiomatization
where nat_to_loc'_inject: "nat_to_loc' l = nat_to_loc' l' \<longleftrightarrow> l = l'"
definition list_name :: cname
where "list_name = Cname list_nam"
definition test_name :: cname
where "test_name = Cname test_nam"
definition val_name :: vname
where "val_name = VName val_nam"
definition next_name :: vname
where "next_name = VName next_nam"
definition append_ins :: bytecode where
"append_ins =
[Load 0,
Getfield next_name list_name,
Dup,
LitPush Null,
Ifcmpeq 4,
Load 1,
Invoke list_name append_name [Class list_name],
Return,
Pop,
Load 0,
Load 1,
Putfield next_name list_name,
LitPush Unit,
Return]"
definition list_class :: "jvm_method class" where
"list_class =
(Object,
[(val_name, PrimT Integer), (next_name, Class list_name)],
[((append_name, [Class list_name]), PrimT Void,
(3, 0, append_ins,[(1,2,8,Xcpt NullPointer)]))])"
definition make_list_ins :: bytecode where
"make_list_ins =
[New list_name,
Dup,
Store 0,
LitPush (Intg 1),
Putfield val_name list_name,
New list_name,
Dup,
Store 1,
LitPush (Intg 2),
Putfield val_name list_name,
New list_name,
Dup,
Store 2,
LitPush (Intg 3),
Putfield val_name list_name,
Load 0,
Load 1,
Invoke list_name append_name [Class list_name],
Pop,
Load 0,
Load 2,
Invoke list_name append_name [Class list_name],
Return]"
definition test_class :: "jvm_method class" where
"test_class =
(Object, [],
[((makelist_name, []), PrimT Void, (3, 2, make_list_ins,[]))])"
definition E :: jvm_prog where
"E = SystemClasses @ [(list_name, list_class), (test_name, test_class)]"
code_datatype list_nam test_nam
lemma equal_cnam_code [code]:
"HOL.equal list_nam list_nam \<longleftrightarrow> True"
"HOL.equal test_nam test_nam \<longleftrightarrow> True"
"HOL.equal list_nam test_nam \<longleftrightarrow> False"
"HOL.equal test_nam list_nam \<longleftrightarrow> False"
by(simp_all add: distinct_classes distinct_classes[symmetric] equal_cnam_def)
code_datatype append_name makelist_name
lemma equal_mname_code [code]:
"HOL.equal append_name append_name \<longleftrightarrow> True"
"HOL.equal makelist_name makelist_name \<longleftrightarrow> True"
"HOL.equal append_name makelist_name \<longleftrightarrow> False"
"HOL.equal makelist_name append_name \<longleftrightarrow> False"
by(simp_all add: distinct_methods distinct_methods[symmetric] equal_mname_def)
code_datatype val_nam next_nam
lemma equal_vnam_code [code]:
"HOL.equal val_nam val_nam \<longleftrightarrow> True"
"HOL.equal next_nam next_nam \<longleftrightarrow> True"
"HOL.equal val_nam next_nam \<longleftrightarrow> False"
"HOL.equal next_nam val_nam \<longleftrightarrow> False"
by(simp_all add: distinct_fields distinct_fields[symmetric] equal_vnam_def)
lemma equal_loc'_code [code]:
"HOL.equal (nat_to_loc' l) (nat_to_loc' l') \<longleftrightarrow> l = l'"
by(simp add: equal_loc'_def nat_to_loc'_inject)
definition undefined_cname :: cname
where [code del]: "undefined_cname = undefined"
code_datatype Object Xcpt Cname undefined_cname
declare undefined_cname_def[symmetric, code_unfold]
definition undefined_val :: val
where [code del]: "undefined_val = undefined"
declare undefined_val_def[symmetric, code_unfold]
code_datatype Unit Null Bool Intg Addr undefined_val
definition
"test = exec (E, start_state E test_name makelist_name)"
ML_val \<open>
@{code test};
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
@{code exec} (@{code E}, @{code the} it);
\<close>
end
|
If $f$ and $g$ are holomorphic functions on an open set $S$, then the $n$th derivative of $f + g$ is equal to the $n$th derivative of $f$ plus the $n$th derivative of $g$. |
Embers Movement is a bounty mission in Tom Clancy’s The Division 2.
This bounty is unlocked after completing the Constitution Hall project.
Here’s a walkthrough of Embers Movement in The Division 2.
Your main objective is to find the bounty and the other Outcasts.
Get to the location shown in the map below, marked as the bounty symbol.
As you get there, you will see a pretty obvious entrance. Some guards will walk out of the place.
Secure the area first by killing them.
Now, get inside the compound. Go upstairs and towards the door in the image below.
Inside the room, you should find this window and climb over it.
Keep going and you’ll have to pass through a corridor filled with mannequins.
Keep going until you reach this outdoor space filled with Outcast’s hostiles.
Kill all of them until the bounty The Match shows up.
These hostiles mostly rely on fire, i.e. flamethrowers and molotov.
You have to be wary of the flames or it will kill you quickly.
After killing the boss ‘The Match’, you have to secure the area to complete the bounty.
You should get some E-Credits for completing this bounty. |
Formal statement is: lemma locally_connected: "locally connected S \<longleftrightarrow> (\<forall>v x. openin (top_of_set S) v \<and> x \<in> v \<longrightarrow> (\<exists>u. openin (top_of_set S) u \<and> connected u \<and> x \<in> u \<and> u \<subseteq> v))" Informal statement is: A topological space $S$ is locally connected if and only if for every open set $v$ containing a point $x$, there exists an open set $u$ containing $x$ such that $u$ is connected and $u \subseteq v$. |
[STATEMENT]
theorem completeness:
fixes p :: \<open>(nat, nat) form\<close>
assumes \<open>\<forall>(e :: nat \<Rightarrow> nat hterm) f g. e, f, g, z \<Turnstile> p\<close>
shows \<open>z \<turnstile> p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
let ?p = \<open>put_imps p (rev z)\<close>
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
have *: \<open>\<forall>(e :: nat \<Rightarrow> nat hterm) f g. eval e f g ?p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>e f g. eval e f g (put_imps p (rev z))
[PROOF STEP]
using assms semantics_put_imps
[PROOF STATE]
proof (prove)
using this:
\<forall>e f g. e,f,g,z \<Turnstile> p
(?e,?f,?g,?z \<Turnstile> ?p) = eval ?e ?f ?g (put_imps ?p ?z)
goal (1 subgoal):
1. \<forall>e f g. eval e f g (put_imps p (rev z))
[PROOF STEP]
unfolding model_def
[PROOF STATE]
proof (prove)
using this:
\<forall>e f g. list_all (eval e f g) z \<longrightarrow> eval e f g p
(list_all (eval ?e ?f ?g) ?z \<longrightarrow> eval ?e ?f ?g ?p) = eval ?e ?f ?g (put_imps ?p ?z)
goal (1 subgoal):
1. \<forall>e f g. eval e f g (put_imps p (rev z))
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>e f g. eval e f g (put_imps p (rev z))
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
obtain m where **: \<open>closed 0 (put_unis m ?p)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>m. closed 0 (put_unis m (put_imps p (rev z))) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ex_closure
[PROOF STATE]
proof (prove)
using this:
\<exists>m. closed 0 (put_unis m ?p)
goal (1 subgoal):
1. (\<And>m. closed 0 (put_unis m (put_imps p (rev z))) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
closed 0 (put_unis m (put_imps p (rev z)))
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
closed 0 (put_unis m (put_imps p (rev z)))
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
have \<open>list_all (closed 0) []\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_all (closed 0) []
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
list_all (closed 0) []
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
list_all (closed 0) []
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
have \<open>\<forall>(e :: nat \<Rightarrow> nat hterm) f g. e, f, g, [] \<Turnstile> put_unis m ?p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
using * valid_put_unis
[PROOF STATE]
proof (prove)
using this:
\<forall>e f g. eval e f g (put_imps p (rev z))
\<forall>e f g. eval e f g ?p \<Longrightarrow> eval ?e ?f ?g (put_unis ?m ?p)
goal (1 subgoal):
1. \<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
unfolding model_def
[PROOF STATE]
proof (prove)
using this:
\<forall>e f g. eval e f g (put_imps p (rev z))
\<forall>e f g. eval e f g ?p \<Longrightarrow> eval ?e ?f ?g (put_unis ?m ?p)
goal (1 subgoal):
1. \<forall>e f g. list_all (eval e f g) [] \<longrightarrow> eval e f g (put_unis m (put_imps p (rev z)))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
closed 0 (put_unis m (put_imps p (rev z)))
list_all (closed 0) []
\<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
have \<open>[] \<turnstile> put_unis m ?p\<close>
[PROOF STATE]
proof (prove)
using this:
closed 0 (put_unis m (put_imps p (rev z)))
list_all (closed 0) []
\<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
goal (1 subgoal):
1. [] \<turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
using natded_complete
[PROOF STATE]
proof (prove)
using this:
closed 0 (put_unis m (put_imps p (rev z)))
list_all (closed 0) []
\<forall>e f g. e,f,g,[] \<Turnstile> put_unis m (put_imps p (rev z))
\<lbrakk>closed 0 ?p; list_all (closed 0) ?ps; \<forall>e f g. e,f,g,?ps \<Turnstile> ?p\<rbrakk> \<Longrightarrow> ?ps \<turnstile> ?p
goal (1 subgoal):
1. [] \<turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
[] \<turnstile> put_unis m (put_imps p (rev z))
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[] \<turnstile> put_unis m (put_imps p (rev z))
[PROOF STEP]
have \<open>[] \<turnstile> ?p\<close>
[PROOF STATE]
proof (prove)
using this:
[] \<turnstile> put_unis m (put_imps p (rev z))
goal (1 subgoal):
1. [] \<turnstile> put_imps p (rev z)
[PROOF STEP]
using ** remove_unis_sentence
[PROOF STATE]
proof (prove)
using this:
[] \<turnstile> put_unis m (put_imps p (rev z))
closed 0 (put_unis m (put_imps p (rev z)))
\<lbrakk>infinite (- params ?p); closed 0 (put_unis ?m ?p); [] \<turnstile> put_unis ?m ?p\<rbrakk> \<Longrightarrow> [] \<turnstile> ?p
goal (1 subgoal):
1. [] \<turnstile> put_imps p (rev z)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
[] \<turnstile> put_imps p (rev z)
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[] \<turnstile> put_imps p (rev z)
[PROOF STEP]
show \<open>z \<turnstile> p\<close>
[PROOF STATE]
proof (prove)
using this:
[] \<turnstile> put_imps p (rev z)
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
using remove_imps
[PROOF STATE]
proof (prove)
using this:
[] \<turnstile> put_imps p (rev z)
\<lbrakk>infinite (- params ?p); ?z' \<turnstile> put_imps ?p ?z\<rbrakk> \<Longrightarrow> rev ?z @ ?z' \<turnstile> ?p
goal (1 subgoal):
1. z \<turnstile> p
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
z \<turnstile> p
goal:
No subgoals!
[PROOF STEP]
qed |
open import Prelude
module Implicits.Syntax where
open import Implicits.Syntax.Type public
open import Implicits.Syntax.Term public
open import Implicits.Syntax.Context public
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall A B C P Q R X : Universe, ((wd_ A B /\ (wd_ B C /\ (wd_ A C /\ (wd_ Q A /\ (wd_ Q C /\ (wd_ P B /\ (wd_ P C /\ (wd_ R A /\ (wd_ R B /\ (wd_ A P /\ (wd_ C X /\ (wd_ X A /\ (wd_ P X /\ (wd_ Q P /\ (wd_ Q X /\ (col_ X Q P /\ (col_ B R A /\ (col_ C Q A /\ (col_ C P B /\ (col_ P X A /\ (col_ P X B /\ (col_ P A B /\ (col_ X A B /\ (col_ P Q A /\ (col_ P Q B /\ col_ Q A B))))))))))))))))))))))))) -> col_ A B C)).
Proof.
time tac.
Qed.
End FOFProblem.
|
[STATEMENT]
lemma tm_dither_halts_aux:
shows "steps0 (1, Bk \<up> m, [Oc, Oc]) tm_dither 2 = (0, Bk \<up> m, [Oc, Oc])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. steps0 (1, Bk \<up> m, [Oc, Oc]) tm_dither 2 = (0, Bk \<up> m, [Oc, Oc])
[PROOF STEP]
unfolding tm_dither_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. steps0 (1, Bk \<up> m, [Oc, Oc]) [(WB, 1), (R, 2), (L, 1), (L, 0)] 2 = (0, Bk \<up> m, [Oc, Oc])
[PROOF STEP]
by (simp add: steps.simps step.simps numeral_eqs_upto_12) |
(***********************************************************************************
* Copyright (c) 2016-2018 The University of Sheffield, UK
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-2-Clause
***********************************************************************************)
section\<open>Testing getElementById\<close>
text\<open>This theory contains the test cases for getElementById.\<close>
theory Document_getElementById
imports
"Core_DOM_BaseTest"
begin
definition Document_getElementById_heap :: "(unit, unit, unit, unit, unit, unit, unit, unit, unit, unit, unit) heap" where
"Document_getElementById_heap = create_heap [(cast (document_ptr.Ref 1), cast (create_document_obj html (Some (cast (element_ptr.Ref 1))) [])),
(cast (element_ptr.Ref 1), cast (create_element_obj ''html'' [cast (element_ptr.Ref 2), cast (element_ptr.Ref 9)] fmempty None)),
(cast (element_ptr.Ref 2), cast (create_element_obj ''head'' [cast (element_ptr.Ref 3), cast (element_ptr.Ref 4), cast (element_ptr.Ref 5), cast (element_ptr.Ref 6), cast (element_ptr.Ref 7), cast (element_ptr.Ref 8)] fmempty None)),
(cast (element_ptr.Ref 3), cast (create_element_obj ''meta'' [] (fmap_of_list [(''charset'', ''utf-8'')]) None)),
(cast (element_ptr.Ref 4), cast (create_element_obj ''title'' [cast (character_data_ptr.Ref 1)] fmempty None)),
(cast (character_data_ptr.Ref 1), cast (create_character_data_obj ''Document.getElementById'')),
(cast (element_ptr.Ref 5), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''author''), (''title'', ''Tetsuharu OHZEKI''), (''href'', ''mailto:[email protected]'')]) None)),
(cast (element_ptr.Ref 6), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''help''), (''href'', ''https://dom.spec.whatwg.org/#dom-document-getelementbyid'')]) None)),
(cast (element_ptr.Ref 7), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharness.js'')]) None)),
(cast (element_ptr.Ref 8), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharnessreport.js'')]) None)),
(cast (element_ptr.Ref 9), cast (create_element_obj ''body'' [cast (element_ptr.Ref 10), cast (element_ptr.Ref 11), cast (element_ptr.Ref 12), cast (element_ptr.Ref 13), cast (element_ptr.Ref 16), cast (element_ptr.Ref 19)] fmempty None)),
(cast (element_ptr.Ref 10), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''log'')]) None)),
(cast (element_ptr.Ref 11), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', '''')]) None)),
(cast (element_ptr.Ref 12), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''test1'')]) None)),
(cast (element_ptr.Ref 13), cast (create_element_obj ''div'' [cast (element_ptr.Ref 14), cast (element_ptr.Ref 15)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''1st'')]) None)),
(cast (element_ptr.Ref 14), cast (create_element_obj ''p'' [cast (character_data_ptr.Ref 2)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''2nd'')]) None)),
(cast (character_data_ptr.Ref 2), cast (create_character_data_obj ''P'')),
(cast (element_ptr.Ref 15), cast (create_element_obj ''input'' [] (fmap_of_list [(''id'', ''test5''), (''type'', ''submit''), (''value'', ''Submit''), (''data-name'', ''3rd'')]) None)),
(cast (element_ptr.Ref 16), cast (create_element_obj ''div'' [cast (element_ptr.Ref 17)] (fmap_of_list [(''id'', ''outer'')]) None)),
(cast (element_ptr.Ref 17), cast (create_element_obj ''div'' [cast (element_ptr.Ref 18)] (fmap_of_list [(''id'', ''middle'')]) None)),
(cast (element_ptr.Ref 18), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''inner'')]) None)),
(cast (element_ptr.Ref 19), cast (create_element_obj ''script'' [cast (character_data_ptr.Ref 3)] fmempty None)),
(cast (character_data_ptr.Ref 3), cast (create_character_data_obj ''%3C%3Cscript%3E%3E''))]"
definition document :: "(unit, unit, unit, unit, unit, unit) object_ptr option" where "document = Some (cast (document_ptr.Ref 1))"
text \<open>"Document.getElementById with a script-inserted element"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test2'';
test \<leftarrow> document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
result \<leftarrow> document . getElementById(TEST_ID);
assert_not_equals(result, None, ''should not be null.'');
tmp0 \<leftarrow> result . tagName;
assert_equals(tmp0, ''div'', ''should have appended element's tag name'');
gBody . removeChild(test);
removed \<leftarrow> document . getElementById(TEST_ID);
assert_equals(removed, None, ''should not get removed element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via setAttribute/removeAttribute"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test3'';
test \<leftarrow> document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return ''test3-updated'';
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . removeAttribute(''id'');
e2 \<leftarrow> document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"Ensure that the id attribute only affects elements present in a document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test4-should-not-exist'';
e \<leftarrow> document . createElement(''div'');
e . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(tmp0, None, ''should be null'');
tmp1 \<leftarrow> document . body;
tmp1 . appendChild(e);
tmp2 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(tmp2, e, ''should be the appended element'')
}) Document_getElementById_heap"
by eval
text \<open>"in tree order, within the context object's tree"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test5'';
target \<leftarrow> document . getElementById(TEST_ID);
assert_not_equals(target, None, ''should not be null'');
tmp0 \<leftarrow> target . getAttribute(''data-name'');
assert_equals(tmp0, ''1st'', ''should return the 1st'');
element4 \<leftarrow> document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
element4 . setAttribute(''data-name'', ''4th'');
gBody . appendChild(element4);
target2 \<leftarrow> document . getElementById(TEST_ID);
assert_not_equals(target2, None, ''should not be null'');
tmp1 \<leftarrow> target2 . getAttribute(''data-name'');
assert_equals(tmp1, ''1st'', ''should be the 1st'');
tmp2 \<leftarrow> target2 . parentNode;
tmp2 . removeChild(target2);
target3 \<leftarrow> document . getElementById(TEST_ID);
assert_not_equals(target3, None, ''should not be null'');
tmp3 \<leftarrow> target3 . getAttribute(''data-name'');
assert_equals(tmp3, ''4th'', ''should be the 4th'')
}) Document_getElementById_heap"
by eval
text \<open>"Modern browsers optimize this method with using internal id cache.
This test checks that their optimization should effect only append to
`Document`, not append to `Node`."\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test6'';
s \<leftarrow> document . createElement(''div'');
s . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> document . createElement(''div'');
tmp0 . appendChild(s);
tmp1 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(tmp1, None, ''should be null'')
}) Document_getElementById_heap"
by eval
text \<open>"changing attribute's value via `Attr` gotten from `Element.attribute`."\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test7'';
element \<leftarrow> document . createElement(''div'');
element . setAttribute(''id'', TEST_ID);
gBody . appendChild(element);
target \<leftarrow> document . getElementById(TEST_ID);
assert_equals(target, element, ''should return the element before changing the value'');
element . setAttribute(''id'', (TEST_ID @ ''-updated''));
target2 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(target2, None, ''should return null after updated id via Attr.value'');
target3 \<leftarrow> document . getElementById((TEST_ID @ ''-updated''));
assert_equals(target3, element, ''should be equal to the updated element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via element.id"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test12'';
test \<leftarrow> document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return (TEST_ID @ ''-updated'');
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . setAttribute(''id'', '''');
e2 \<leftarrow> document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"where insertion order and tree order don't match"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test13'';
container \<leftarrow> document . createElement(''div'');
container . setAttribute(''id'', (TEST_ID @ ''-fixture''));
gBody . appendChild(container);
element1 \<leftarrow> document . createElement(''div'');
element1 . setAttribute(''id'', TEST_ID);
element2 \<leftarrow> document . createElement(''div'');
element2 . setAttribute(''id'', TEST_ID);
element3 \<leftarrow> document . createElement(''div'');
element3 . setAttribute(''id'', TEST_ID);
element4 \<leftarrow> document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
container . appendChild(element2);
container . appendChild(element4);
container . insertBefore(element3, element4);
container . insertBefore(element1, element2);
test \<leftarrow> document . getElementById(TEST_ID);
assert_equals(test, element1, ''should return 1st element'');
container . removeChild(element1);
test \<leftarrow> document . getElementById(TEST_ID);
assert_equals(test, element2, ''should return 2nd element'');
container . removeChild(element2);
test \<leftarrow> document . getElementById(TEST_ID);
assert_equals(test, element3, ''should return 3rd element'');
container . removeChild(element3);
test \<leftarrow> document . getElementById(TEST_ID);
assert_equals(test, element4, ''should return 4th element'');
container . removeChild(element4)
}) Document_getElementById_heap"
by eval
text \<open>"Inserting an id by inserting its parent node"\<close>
lemma "test (do {
gBody \<leftarrow> document . body;
TEST_ID \<leftarrow> return ''test14'';
a \<leftarrow> document . createElement(''a'');
b \<leftarrow> document . createElement(''b'');
a . appendChild(b);
b . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(tmp0, None);
gBody . appendChild(a);
tmp1 \<leftarrow> document . getElementById(TEST_ID);
assert_equals(tmp1, b)
}) Document_getElementById_heap"
by eval
text \<open>"Document.getElementById must not return nodes not present in document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test15'';
outer \<leftarrow> document . getElementById(''outer'');
middle \<leftarrow> document . getElementById(''middle'');
inner \<leftarrow> document . getElementById(''inner'');
tmp0 \<leftarrow> document . getElementById(''middle'');
outer . removeChild(tmp0);
new_el \<leftarrow> document . createElement(''h1'');
new_el . setAttribute(''id'', ''heading'');
inner . appendChild(new_el);
tmp1 \<leftarrow> document . getElementById(''heading'');
assert_equals(tmp1, None)
}) Document_getElementById_heap"
by eval
end
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj27eqsynthconj2 : forall (lv0 : natural), (@eq natural (lv0) (mult lv0 (Succ Zero))).
Admitted.
QuickChick conj27eqsynthconj2.
|
theory Wasm_Assertions_Shallow imports "Wasm_Big_Step" begin
typedef lvar = "UNIV :: (nat) set" ..
(* global, local, logical variables*)
datatype var = Gl nat | Lc nat | Lv lvar
datatype 'a lvar_v = V_p v | V_n nat | V_b byte | V_a 'a
abbreviation "case_ret r r_new \<equiv> case_option r_new (\<lambda>x. Some x) r"
lemma case_ret_None[simp]:"case_ret r None = r"
by (cases r) auto
(* variable store *)
(* global, local, logical variables*)
type_synonym 'a var_st = "global list \<times> v list \<times> (lvar, 'a lvar_v) map"
definition var_st_get_local :: "'a var_st \<Rightarrow> nat \<Rightarrow> v option" where
"var_st_get_local st n \<equiv> let st_l = (fst (snd st)) in
(if (n < length st_l)
then Some (st_l!n)
else None)"
definition var_st_set_local :: "'a var_st \<Rightarrow> nat \<Rightarrow> v \<Rightarrow> 'a var_st" where
"var_st_set_local st n v \<equiv> let (gs, vs, lvs) = st in
(if (n < length vs)
then (gs, vs[n := v], lvs)
else st)"
definition var_st_get_global :: "'a var_st \<Rightarrow> nat \<Rightarrow> global option" where
"var_st_get_global st n \<equiv> let st_g = (fst st) in
(if (n < length st_g)
then Some (st_g!n)
else None)"
definition var_st_set_global :: "'a var_st \<Rightarrow> nat \<Rightarrow> global \<Rightarrow> 'a var_st" where
"var_st_set_global st n g \<equiv> let (gs, vs, lvs) = st in
(if (n < length gs)
then (gs[n := g], vs, lvs)
else st)"
definition var_st_set_global_v :: "'a var_st \<Rightarrow> nat \<Rightarrow> v \<Rightarrow> 'a var_st" where
"var_st_set_global_v st n v \<equiv> let (gs, vs, lvs) = st in
(if (n < length gs)
then (gs[n := ((gs!n)\<lparr>g_val := v\<rparr>)], vs, lvs)
else st)"
definition var_st_get_lvar :: "'a var_st \<Rightarrow> lvar \<Rightarrow> 'a lvar_v option" where
"var_st_get_lvar st lv \<equiv> let st_lv = (snd (snd st)) in st_lv lv"
definition var_st_set_lvar :: "'a var_st \<Rightarrow> lvar \<Rightarrow> 'a lvar_v \<Rightarrow> 'a var_st" where
"var_st_set_lvar st l lv \<equiv> let (gs, vs, lvs) = st in
(gs, vs, (lvs(l \<mapsto> lv)))"
(* abstract heap with max length *)
type_synonym heap = "((nat, byte) map) \<times> (nat option)"
definition map_disj :: "('a,'b) map \<Rightarrow> ('a,'b) map \<Rightarrow> bool" where
"map_disj m1 m2 \<equiv> Set.disjnt (dom m1) (dom m2)"
definition option_disj :: "'a option \<Rightarrow> 'a option \<Rightarrow> bool" where
"option_disj o1 o2 \<equiv> Option.is_none o1 \<or> Option.is_none o2"
definition heap_disj :: "heap \<Rightarrow> heap \<Rightarrow> bool" where
"heap_disj h1 h2 \<equiv> map_disj (fst h1) (fst h2) \<and> option_disj (snd h1) (snd h2)"
definition heap_merge :: "heap \<Rightarrow> heap \<Rightarrow> heap" where
"heap_merge h1 h2 \<equiv> let (m1,s1) = h1 in
let (m2,s2) = h2 in
(m1 ++ m2, case_option s2 (\<lambda>s. Some s) s1)"
lemma heap_disj_sym: "heap_disj h1 h2 = heap_disj h2 h1"
unfolding heap_disj_def map_disj_def option_disj_def
using disjnt_sym
by blast
lemma heap_merge_disj_sym:
assumes "heap_disj h1 h2"
shows "heap_merge h1 h2 = heap_merge h2 h1"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
by (auto simp add: map_add_comm split: option.splits prod.splits)
lemma heap_merge_assoc:
shows "heap_merge h1 (heap_merge h2 h3) = heap_merge (heap_merge h1 h2) h3"
unfolding heap_merge_def
by (auto split: option.splits prod.splits)
lemma heap_disj_merge_sub:
assumes "heap_disj h1 (heap_merge h2 h3)"
shows "heap_disj h1 h2"
"heap_disj h1 h3"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def map_disj_def disjnt_def
by (auto split: option.splits prod.splits)
lemma heap_disj_merge_assoc:
assumes "heap_disj h_H h_Hf"
"heap_disj (heap_merge h_H h_Hf) hf"
shows "heap_disj h_H (heap_merge h_Hf hf)"
using assms
unfolding heap_disj_def heap_merge_def map_disj_def disjnt_def
by (auto split: option.splits prod.splits)
lemma heap_merge_dom:
assumes "x \<in> dom (fst (heap_merge h1 h2))"
shows "x \<in> dom (fst h1) \<or> x \<in> dom (fst h2)"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
by (auto simp add: map_add_comm split: option.splits prod.splits)
lemma heap_dom_merge:
assumes "x \<in> dom (fst h1) \<or> x \<in> dom (fst h2)"
shows "x \<in> dom (fst (heap_merge h1 h2))"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
by (auto simp add: map_add_comm split: option.splits prod.splits)
lemma heap_dom_merge_eq:
assumes "dom (fst h1) = dom(fst h2)"
shows "dom (fst (heap_merge h1 hf)) = dom (fst (heap_merge h2 hf))"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
apply (simp add: map_add_comm split: option.splits prod.splits)
apply force
done
lemma heap_disj_merge_maps1:
assumes "heap_disj h1 h2"
"(fst h1) x = Some y"
shows "fst (heap_merge h1 h2) x = Some y"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
apply (simp add: map_add_dom_app_simps map_add_comm split: option.splits prod.splits)
apply (metis fst_conv map_add_comm map_add_find_right)
done
lemma heap_disj_merge_maps2:
assumes "heap_disj h1 h2"
"(fst h2) x = Some y"
shows "fst (heap_merge h1 h2) x = Some y"
using assms
unfolding heap_disj_def heap_merge_def option_disj_def heap_disj_def map_disj_def disjnt_def
apply (simp add: map_add_dom_app_simps map_add_comm split: option.splits prod.splits)
apply (metis fst_conv map_add_find_right)
done
(* local variable reification *)
definition reifies_loc :: "[v list, 'a var_st] \<Rightarrow> bool" where
"reifies_loc locs st \<equiv> (fst (snd st)) = locs"
(* global variable reification (with respect to a partial instance) *)
definition reifies_glob :: "[global list, nat list, 'a var_st] \<Rightarrow> bool" where
"reifies_glob gs igs st \<equiv>
let st_g = (fst st) in
length st_g = (length igs) \<and> (\<forall>gn < length st_g. igs!gn < (length gs) \<and> st_g!gn = (gs!(igs!gn)))"
(* function reification (with respect to a partial instance) *)
definition reifies_func :: "[cl list, nat list, cl list] \<Rightarrow> bool" where
"reifies_func cls icls fs \<equiv> list_all2 (\<lambda>icl f. icl < (length cls) \<and> cls!icl = f) icls fs"
(* heap reification relations *)
definition reifies_heap_contents :: "[mem, ((nat, byte) map)] \<Rightarrow> bool" where
"reifies_heap_contents m byte_m \<equiv>
\<forall>ind \<in> (dom byte_m). ind < mem_length m \<and> byte_m(ind) = Some (byte_at m ind)"
definition reifies_heap_length :: "[mem, nat option] \<Rightarrow> bool" where
"reifies_heap_length m l_opt \<equiv> pred_option (\<lambda>l. mem_length m = (l * Ki64)) l_opt"
definition reifies_heap :: "[mem list, nat list, heap] \<Rightarrow> bool" where
"reifies_heap ms im_opt h \<equiv> let im = hd im_opt in
im < length ms
\<and> reifies_heap_contents (ms!im) (fst h)
\<and> reifies_heap_length (ms!im) (snd h)"
(* store reification relation *)
definition reifies_s :: "[s, inst, heap, 'a var_st, cl list] \<Rightarrow> bool" where
"reifies_s s i h st fs \<equiv> reifies_glob (globs s) (inst.globs i) st
\<and> reifies_func (funcs s) (inst.funcs i) fs
\<and> reifies_heap (mems s) (inst.mems i) h"
definition var_st_agree :: "'a var_st \<Rightarrow> var \<Rightarrow> 'a var_st \<Rightarrow> bool" where
"var_st_agree st1 var st2 \<equiv> case var of
Lc n \<Rightarrow> (var_st_get_local st1 n) = (var_st_get_local st2 n)
| Gl n \<Rightarrow> (var_st_get_global st1 n) = (var_st_get_global st2 n)
| Lv lvar \<Rightarrow> (var_st_get_lvar st1 lvar) = (var_st_get_lvar st2 lvar)"
(* shallow embedding of assertions *)
type_synonym 'a stack_ass = "(v \<Rightarrow> 'a var_st \<Rightarrow> bool) list"
type_synonym 'a heap_ass = "heap \<Rightarrow> 'a var_st \<Rightarrow> bool"
datatype 'a ass = Ass "'a stack_ass" "'a heap_ass" (infix "\<^sub>s|\<^sub>h" 60) | Ex_ass lvar "'a ass"
type_synonym 'a triple = "'a ass \<times> e list \<times> 'a ass"
(* function list, assms, label ass, return ass *)
type_synonym 'a triple_context = "cl list \<times> 'a ass list \<times> 'a ass option"
definition add_label_ass :: "'a triple_context \<Rightarrow> 'a ass \<Rightarrow> 'a triple_context" where
"add_label_ass \<Gamma> l \<equiv> let (fs, labs, ret) = \<Gamma> in (fs, l#labs, ret)"
definition stack_ass_sat :: "'a stack_ass \<Rightarrow> v list \<Rightarrow> 'a var_st \<Rightarrow> bool" where
"stack_ass_sat St ves v_st = list_all2 (\<lambda>Si v. Si v v_st) St ves"
fun ass_sat :: "'a ass \<Rightarrow> v list \<Rightarrow> heap \<Rightarrow> 'a var_st \<Rightarrow> bool" where
"ass_sat (St \<^sub>s|\<^sub>h H) ves h v_st = (stack_ass_sat St ves v_st \<and> H h v_st)"
| "ass_sat (Ex_ass lv P) ves h st = (\<exists>v. ass_sat P ves h (var_st_set_lvar st lv v))"
fun ass_stack_len :: "'a ass \<Rightarrow> nat" where
"ass_stack_len (St \<^sub>s|\<^sub>h H) = length St"
| "ass_stack_len (Ex_ass lv P) = ass_stack_len P"
(* label reification relation *)
definition reifies_lab :: "nat list \<Rightarrow> 'a triple_context \<Rightarrow> bool" where
"reifies_lab lns \<Gamma> \<equiv> lns = map ass_stack_len (fst (snd \<Gamma>))"
(* return reification relation *)
definition reifies_ret :: "nat option \<Rightarrow> 'a triple_context \<Rightarrow> bool" where
"reifies_ret rn \<Gamma> \<equiv> rn = Option.map_option ass_stack_len (snd (snd \<Gamma>))"
locale encapsulated_module =
fixes i :: inst
assumes encapsulated_inst_globs:"\<And> j k. \<lbrakk>j \<noteq> k; (j < length (inst.globs i)); (k < length (inst.globs i))\<rbrakk>
\<Longrightarrow> (inst.globs i)!j \<noteq> (inst.globs i)!k"
begin
definition ass_wf where
"ass_wf lvar_st ret \<Gamma> labs locs s hf st h vcs P \<equiv>
ass_sat P vcs h st
\<and> heap_disj h hf
\<and> reifies_s s i (heap_merge h hf) st (fst \<Gamma>)
\<and> reifies_loc locs st
\<and> reifies_lab labs \<Gamma>
\<and> reifies_ret ret \<Gamma>
\<and> snd (snd st) = lvar_st"
definition res_wf where
"res_wf lvar_st' \<Gamma> res locs' s' hf vcsf Q \<equiv>
let (fs,lasss,rass) = \<Gamma> in
(case res of
RTrap \<Rightarrow> False
| RValue rvs \<Rightarrow> \<exists>h' h'' vcs' st'.
ass_sat Q vcs' h'' st'
\<and> rvs = vcsf@vcs'
\<and> heap_disj h'' hf
\<and> h' = heap_merge h'' hf
\<and> reifies_s s' i h' st' fs
\<and> reifies_loc locs' st'
\<and> snd (snd st') = lvar_st'
| RBreak n rvs \<Rightarrow> \<exists>h' h'' vcs' st'.
n < length lasss
\<and> ass_sat (lasss!n) vcs' h'' st'
\<and> rvs = vcs'
\<and> heap_disj h'' hf
\<and> h' = heap_merge h'' hf
\<and> reifies_s s' i h' st' fs
\<and> reifies_loc locs' st'
\<and> snd (snd st') = lvar_st'
| RReturn rvs \<Rightarrow> \<exists>h' h'' vcs' st' the_rass.
rass = Some the_rass
\<and> ass_sat the_rass vcs' h'' st'
\<and> rvs = vcs'
\<and> heap_disj h'' hf
\<and> h' = heap_merge h'' hf
\<and> reifies_s s' i h' st' fs
\<and> reifies_loc locs' st'
\<and> snd (snd st') = lvar_st')"
(* TODO: frame? ? ? ?*)
definition valid_triple :: "'a triple_context \<Rightarrow> 'a ass \<Rightarrow> e list \<Rightarrow> 'a ass \<Rightarrow> bool" ("_ \<Turnstile> {_}_{_}" 60) where
"(\<Gamma> \<Turnstile> {P}es{Q}) \<equiv> \<forall>vcs h st s locs labs labsf ret retf lvar_st hf vcsf s' locs' res.
((ass_wf lvar_st ret \<Gamma> labs locs s hf st h vcs P \<and>
((s,locs,($$*vcsf)@($$*vcs)@es) \<Down>{(labs@labsf,case_ret ret retf,i)} (s',locs', res))) \<longrightarrow>
res_wf lvar_st \<Gamma> res locs' s' hf vcsf Q)"
definition valid_triples :: "'a triple_context \<Rightarrow> 'a triple set \<Rightarrow> bool" ("_ \<TTurnstile> _" 60) where
"\<Gamma> \<TTurnstile> specs \<equiv> \<forall>(P,es,Q) \<in> specs. (\<Gamma> \<Turnstile> {P}es{Q})"
(* TODO: frame? ? ? ?*)
definition valid_triple_n :: "'a triple_context \<Rightarrow> nat \<Rightarrow> 'a ass \<Rightarrow> e list \<Rightarrow> 'a ass \<Rightarrow> bool" ("_ \<Turnstile>'_ _ {_}_{_}" 60) where
"(\<Gamma> \<Turnstile>_k {P}es{Q}) \<equiv> \<forall>vcs h st s locs labs labsf ret retf lvar_st hf vcsf s' locs' res.
((ass_wf lvar_st ret \<Gamma> labs locs s hf st h vcs P \<and>
((s,locs,($$*vcsf)@($$*vcs)@es) \<Down>k{(labs@labsf,case_ret ret retf,i)} (s',locs', res))) \<longrightarrow>
res_wf lvar_st \<Gamma> res locs' s' hf vcsf Q)"
definition valid_triples_n :: "'a triple_context \<Rightarrow> nat \<Rightarrow> 'a triple set \<Rightarrow> bool" ("_ \<TTurnstile>'_ _ _" 60) where
"(\<Gamma> \<TTurnstile>_n specs) \<equiv> \<forall>(P,es,Q) \<in> specs. (\<Gamma> \<Turnstile>_n {P}es{Q})"
definition valid_triples_assms :: "'a triple_context \<Rightarrow> 'a triple set \<Rightarrow> 'a triple set \<Rightarrow> bool" ("_\<bullet>_ \<TTurnstile> _" 60) where
"(\<Gamma>\<bullet>assms \<TTurnstile> specs) \<equiv> ((fst \<Gamma>,[],None) \<TTurnstile> assms) \<longrightarrow> (\<Gamma> \<TTurnstile> specs)"
definition valid_triples_assms_n :: "'a triple_context \<Rightarrow> 'a triple set \<Rightarrow> nat \<Rightarrow> 'a triple set \<Rightarrow> bool" ("_\<bullet>_ \<TTurnstile>'_ _ _" 60) where
"(\<Gamma>\<bullet>assms \<TTurnstile>_n specs) \<equiv> ((fst \<Gamma>,[],None) \<TTurnstile>_n assms) \<longrightarrow> (\<Gamma> \<TTurnstile>_n specs)"
lemmas valid_triple_defs = valid_triple_def valid_triples_def valid_triples_assms_def
valid_triple_n_def valid_triples_n_def valid_triples_assms_n_def
definition ass_conseq :: "'a ass \<Rightarrow> 'a ass \<Rightarrow> v list \<Rightarrow> heap \<Rightarrow> 'a var_st \<Rightarrow> bool" where
"ass_conseq P P' vcs h st \<equiv> (ass_stack_len P \<le> ass_stack_len P' \<and> (ass_sat P vcs h st \<longrightarrow> ass_sat P' vcs h st))"
lemma extend_context_res_wf:
assumes "res_wf lvar_st' (fs,[],None) res locs' s' hf vcsf Q"
shows "res_wf lvar_st' (fs,ls,rs) res locs' s' hf vcsf Q"
using assms
unfolding res_wf_def
by (auto split: res_b.splits)
lemma extend_context_res_wf_value_trap:
assumes "res_wf lvar_st' (fs,ls,rs) res locs' s' hf vcsf Q"
"\<exists>rvs. res = RValue rvs \<or> res = RTrap"
shows "res_wf lvar_st' (fs,ls',rs') res locs' s' hf vcsf Q"
using assms
unfolding res_wf_def
by (auto split: res_b.splits)
lemma ex_lab:"\<exists>l. lab = ass_stack_len l"
using ass_stack_len.simps(1)
by (metis Ex_list_of_length)
lemma ex_labs:"\<exists>ls. labs = map ass_stack_len ls"
using ex_lab
by (simp add: ex_lab ex_map_conv)
lemma ex_ret:"\<exists>rs. ret = map_option ass_stack_len rs"
using ex_lab
by (metis not_Some_eq option.simps(8) option.simps(9))
lemma res_wf_valid_triple_n_intro:
assumes "\<Gamma> \<Turnstile>_k {P}es{Q}"
"ass_wf lvar_st ret \<Gamma> labs locs s hf st h vcs P"
"((s,locs,($$*vcsf)@($$*vcs)@es) \<Down>k{(labs@labsf,case_ret ret retf,i)} (s',locs', res))"
shows "res_wf lvar_st \<Gamma> res locs' s' hf vcsf Q"
using assms
unfolding valid_triple_n_def
by blast
lemma res_wf_valid_triple_n_intro_tight:
assumes "\<Gamma> \<Turnstile>_k {P}es{Q}"
"ass_wf lvar_st ret \<Gamma> labs locs s hf st h vcs P"
"((s,locs,($$*vcsf)@($$*vcs)@es) \<Down>k{(labs,ret,i)} (s',locs', res))"
shows "res_wf lvar_st \<Gamma> res locs' s' hf vcsf Q"
using res_wf_valid_triple_n_intro[OF assms(1,2), of vcsf "[]" None] assms(3)
by fastforce
lemma res_wf_valid_triple_n_not_rvalue:
assumes "res_wf lvar_st \<Gamma> res locs s hf vcsf Q"
"\<nexists>vs. res = RValue vs"
shows "res_wf lvar_st \<Gamma> res locs s hf vcsf Q'"
using assms
unfolding res_wf_def
by (cases res) auto
lemma extend_context_call:
assumes "(fs,ls,rs) \<Turnstile>_n {P} [$Call j] {Q}"
shows "(fs,ls',rs') \<Turnstile>_n {P} [$Call j] {Q}"
proof -
{
fix vcs h st s locs labs labsf ret retf lvar_st hf vcsf s' locs' res
assume local_assms:"ass_wf lvar_st ret (fs, ls', rs') labs locs s hf st h vcs P"
"(s, locs, ($$* vcsf) @ ($$* vcs) @ [$Call j]) \<Down>n{(labs@labsf, case_ret ret retf, i)} (s', locs', res)"
obtain ret' labs' where "ass_wf lvar_st ret' (fs, ls, rs) labs' locs s hf st h vcs P"
using local_assms(1)
unfolding ass_wf_def reifies_s_def reifies_lab_def reifies_ret_def
by fastforce
moreover
have "(s, locs, ($$* vcsf) @ ($$* vcs) @ [$Call j]) \<Down>n{(labs'@labsf, case_ret ret' retf, i)} (s', locs', res)"
by (metis append_assoc calln_context local_assms(2) map_append)
ultimately
have "res_wf lvar_st (fs, ls, rs) res locs' s' hf vcsf Q"
using assms local_assms(2)
unfolding valid_triple_n_def
by fastforce
hence "res_wf lvar_st (fs, ls', rs') res locs' s' hf vcsf Q"
using extend_context_res_wf_value_trap call_value_trap local_assms(2)
by (metis (no_types, lifting) append_assoc map_append)
}
thus ?thesis
unfolding valid_triple_n_def
by blast
qed
lemma reifies_func_ind:
assumes "reifies_func (funcs s) (inst.funcs i) fs"
"j < length fs"
shows "sfunc s i j = fs!j"
using assms
unfolding reifies_func_def
by (simp add: list_all2_conv_all_nth sfunc_def sfunc_ind_def)
lemma valid_triples_n_emp: "\<Gamma> \<TTurnstile>_n {}"
unfolding valid_triples_n_def
by blast
lemma res_wf_conseq:
assumes "res_wf l_st (fs,ls,rs) res locs' s' hf vcsf P"
"\<forall>vs h v_st. (list_all2 (\<lambda>L L'. ass_conseq L L' vs h v_st) ls ls')"
"\<forall>vs h v_st. (rel_option (\<lambda>R R'. ass_conseq R R' vs h v_st) rs rs')"
"\<forall>vs h v_st. (ass_sat P vs h v_st \<longrightarrow> ass_sat P' vs h v_st)"
shows "res_wf l_st (fs,ls',rs') res locs' s' hf vcsf P'"
proof (cases res)
case (RValue x1)
thus ?thesis
using assms
unfolding res_wf_def
apply simp
apply metis
done
next
case (RBreak x21 x22)
thus ?thesis
using assms
unfolding res_wf_def
apply (simp add: ass_conseq_def)
apply (metis (no_types, lifting) list_all2_conv_all_nth)
done
next
case (RReturn x3)
thus ?thesis
using assms(1,3)
unfolding res_wf_def
apply (cases rs; cases rs')
apply (simp_all add: ass_conseq_def)
apply blast
done
next
case RTrap
thus ?thesis
using assms
unfolding res_wf_def
by simp
qed
lemma stack_ass_sat_len:
assumes "ass_sat P vcs h st"
shows "length vcs = ass_stack_len P"
using assms
proof (induction rule: ass_sat.induct)
case (1 St H ves h v_st)
thus ?case
apply (simp add: stack_ass_sat_def)
apply (metis list_all2_lengthD)
done
qed auto
lemma stack_ass_sat_len1:
assumes "ass_sat P vcs h st"
"ass_sat P vcs' h st"
shows "length vcs = length vcs'"
using stack_ass_sat_len[OF assms(1)] stack_ass_sat_len[OF assms(2)]
by simp
lemma ass_sat_len_eq_lab:
assumes "(list_all2 (\<lambda>L L'. ass_conseq L L' vcs h st) ls ls')"
shows "(list_all2 (\<lambda>L L'. ((\<not>ass_sat L vcs h st \<and> (ass_stack_len L \<le> ass_stack_len L')) \<or> (ass_stack_len L = ass_stack_len L'))) ls ls')"
using assms
unfolding ass_conseq_def list_all2_conv_all_nth
by (metis stack_ass_sat_len)
lemma ass_sat_len_eq_ret:
assumes "(rel_option (\<lambda>R R'. ass_conseq R R' vcs h st) rs rs')"
shows "(rel_option (\<lambda>R R'. ((\<not>ass_sat R vcs h st \<and> (ass_stack_len R \<le> ass_stack_len R')) \<or> (ass_stack_len R = ass_stack_len R'))) rs rs')"
using assms
unfolding ass_conseq_def
apply (cases rs; cases rs')
apply simp_all
apply (metis stack_ass_sat_len)
done
lemma ass_wf_conseq1:
assumes "ass_wf lvar_st ret (fs,ls,rs) labs locs s hf st h vcs P"
"(ass_sat P vcs h st \<longrightarrow> ass_sat P' vcs h st)"
shows "ass_wf lvar_st ret (fs,ls,rs) labs locs s hf st h vcs P'"
using assms
unfolding ass_wf_def
by (auto simp add: reifies_lab_def reifies_ret_def)
lemma rel_option_to_eq_map_option:
assumes
"rel_option f x y" and
"\<And>a b. f a b \<Longrightarrow> g a = g b"
shows "map_option g x = map_option g y"
using assms
by (induction x y rule: option.rel_induct; simp)
lemma list_all2_to_eq_map:
assumes
"list_all2 f xs ys" and
"\<And>a b. f a b \<Longrightarrow> g a = g b"
shows "map g xs = map g ys"
using assms
by (induction xs ys rule: list.rel_induct; simp)
lemma ass_wf_conseq2:
assumes "ass_wf lvar_st ret (fs,ls,rs) labs locs s hf st h vcs P"
"(list_all2 (\<lambda>L L'. (ass_stack_len L = ass_stack_len L') \<and> ass_conseq L L' vcs h st) ls ls')"
"(rel_option (\<lambda>R R'. (ass_stack_len R = ass_stack_len R') \<and> ass_conseq R R' vcs h st) rs rs')"
shows "ass_wf lvar_st ret (fs,ls',rs') labs locs s hf st h vcs P"
proof -
show ?thesis
using assms
unfolding ass_wf_def ass_conseq_def
unfolding reifies_lab_def reifies_ret_def
by (auto intro: list_all2_to_eq_map rel_option_to_eq_map_option)
qed
lemma valid_triple_assms_n_label_false:
assumes "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q"
"j \<ge> length ls \<or> (\<forall>vcs h st. \<not>ass_sat (ls!j) vcs h st)"
shows "res \<noteq> RBreak j rvs"
using assms
unfolding res_wf_def
by (auto split: res_b.splits)
lemma valid_triple_assms_n_return_false:
assumes "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q"
"rs = None \<or> (\<forall>vcs h st. \<not>ass_sat (the rs) vcs h st)"
shows "res \<noteq> RReturn rvs"
using assms
unfolding res_wf_def
by (auto split: res_b.splits)
lemma valid_triple_n_conseq:
assumes "(fs,ls,rs) \<Turnstile>_n {P'} es {Q'}"
"\<forall>vs h v_st. (list_all2 (\<lambda>L L'. ass_conseq L L' vs h v_st) ls ls') \<and>
(rel_option (\<lambda>R R'. ass_conseq R R' vs h v_st) rs rs') \<and>
(ass_sat P vs h v_st \<longrightarrow> ass_sat P' vs h v_st) \<and>
(ass_sat Q' vs h v_st \<longrightarrow> ass_sat Q vs h v_st)"
shows "(fs,ls',rs') \<Turnstile>_n {P} es {Q}"
proof -
{
fix vcs h st s locs labs' labsf' ret' retf' lvar_st hf vcsf s' locs' res
assume local_assms:"ass_wf lvar_st ret' (fs,ls',rs') labs' locs s hf st h vcs P"
"(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs'@labsf', case_ret ret' retf', i)} (s', locs', res)"
have "ass_wf lvar_st ret' (fs,ls',rs') labs' locs s hf st h vcs P'"
using ass_wf_conseq1[OF local_assms(1)] assms(2)
by blast
then obtain labs ret where labs_def:"ass_wf lvar_st ret (fs,ls,rs) labs locs s hf st h vcs P'"
"length labs = length labs'"
"length ls = length ls'"
"length (labs'@labsf') = length (labs@labsf')"
unfolding ass_wf_def reifies_lab_def reifies_ret_def
by simp (meson assms(2) list_all2_conv_all_nth)
have "res_wf lvar_st (fs,ls',rs') res locs' s' hf vcsf Q"
proof (cases res)
case (RValue x1)
hence "(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', res)"
using reduce_to_n_not_break_n_return[OF local_assms(2)] labs_def(2)
by simp
hence "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q'"
using assms(1) labs_def
unfolding valid_triple_n_def
by fastforce
thus ?thesis
using assms(2) RValue
unfolding res_wf_def
by fastforce
next
case (RBreak ib vbs)
have 0:"ib < length (labs'@labsf')"
using local_assms(2) RBreak reduce_to_n_break_n
by fastforce
have b0:"(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs'@labsf', case_ret ret' retf', i)} (s', locs', RBreak ib vbs)"
using local_assms(2) RBreak
by simp
show ?thesis
proof (cases "labs!ib \<noteq> labs'!ib")
case True
have "\<And>vs h v_st. list_all2 (\<lambda>L L'. \<not> ass_sat L vs h v_st \<and> ass_stack_len L \<le> ass_stack_len L' \<or> ass_stack_len L = ass_stack_len L') ls ls'"
using ass_sat_len_eq_lab assms(2)
by blast
hence 1:"\<And>vcs h st. ib < length labs' \<Longrightarrow> \<not> ass_sat (ls!ib) vcs h st"
"ib < length labs' \<Longrightarrow> labs!ib \<le> labs'!ib"
"ib < length labs' \<Longrightarrow> labs!ib = ass_stack_len (ls!ib)"
"ib < length labs' \<Longrightarrow> labs'!ib = ass_stack_len (ls'!ib)"
using True 0 labs_def local_assms(1)
unfolding list_all2_conv_all_nth ass_wf_def reifies_lab_def
by fastforce+
obtain vbs' where 2:"((s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', RBreak ib vbs'))"
using reduce_to_n_break_n2[OF reduce_to_n_not_return[OF b0] labs_def(4)] 0 1(2)
by simp (metis True append_Nil2 labs_def(2) nth_append)
thus ?thesis
using res_wf_valid_triple_n_intro[OF assms(1) labs_def(1) 2] 1(1) RBreak
unfolding res_wf_def
by (simp split: res_b.splits) (metis True append_Nil2 labs_def(2) nth_append)
next
case False
hence "(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', res)"
using reduce_to_n_not_break_n_return[OF local_assms(2), of "labs@labsf'"] RBreak
labs_def(2)
by simp (metis nth_append)
hence "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q'"
using assms(1) labs_def
unfolding valid_triple_n_def
by fastforce
thus ?thesis
using assms(2) RBreak
unfolding res_wf_def ass_conseq_def
by simp (metis (mono_tags, lifting) list_all2_conv_all_nth)
qed
next
case (RReturn vrs)
show ?thesis
proof (cases "(pred_option (\<lambda>r_r. (case_ret ret retf') \<noteq> Some r_r) (case_ret ret' retf'))")
case True
obtain r_r r_r' rs'_r rs_r where r_r'_def:"ret = Some r_r"
"ret' = Some r_r'"
"rs' = Some rs'_r"
"rs = Some rs_r"
using reduce_to_n_return1 local_assms(1,2) RReturn assms(2) labs_def True
unfolding ass_wf_def reifies_ret_def
by (fastforce split: option.splits)
have "ass_stack_len rs_r \<noteq> ass_stack_len rs'_r"
using local_assms(1) labs_def r_r'_def True
unfolding ass_wf_def reifies_ret_def
by simp
moreover
have "\<And>vs h v_st. rel_option (\<lambda>R R'. (\<not>ass_sat R vs h v_st \<and> (ass_stack_len R \<le> ass_stack_len R')) \<or> ass_stack_len R = ass_stack_len R') rs rs'"
using ass_sat_len_eq_ret assms(2)
by blast
ultimately
have 1:"\<And>vcs h st. \<not> ass_sat rs_r vcs h st \<and> ass_stack_len rs_r \<le> ass_stack_len rs'_r"
using r_r'_def
by simp
hence "r_r \<le> r_r'"
using r_r'_def local_assms(1)
unfolding ass_wf_def reifies_ret_def
by (metis (mono_tags, lifting) ass_wf_def eq_snd_iff labs_def(1) option.inject option.map(2) reifies_ret_def)
then obtain vrs' where "((s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', RReturn vrs'))"
using local_assms(2) RReturn reduce_to_n_return2 reduce_to_n_not_break_n r_r'_def(1,2)
by (metis labs_def(4) option.simps(5) res_b.distinct(7))
thus ?thesis
using assms(1) labs_def 1 r_r'_def RReturn
unfolding valid_triple_n_def res_wf_def
apply (cases "hf")
apply (cases "st")
apply (cases "h")
apply (simp split: res_b.splits option.splits)
apply metis
done
next
case False
hence "(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', res)"
using reduce_to_n_not_break_n_return[OF local_assms(2), of "labs@labsf'" "case_ret ret retf'"] RReturn labs_def(2)
by auto
hence "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q'"
using assms(1) labs_def
unfolding valid_triple_n_def
by fastforce
thus ?thesis
using assms(2) RReturn
unfolding res_wf_def ass_conseq_def
by simp (metis (no_types, lifting) option.rel_cases option.sel)
qed
next
case RTrap
hence "(s, locs, ($$*vcsf)@($$*vcs)@es) \<Down>n{(labs@labsf', case_ret ret retf', i)} (s', locs', res)"
using reduce_to_n_not_break_n_return[OF local_assms(2)] labs_def(2)
by auto
hence "res_wf lvar_st (fs,ls,rs) res locs' s' hf vcsf Q'"
using assms(1) labs_def
unfolding valid_triple_n_def
by fastforce
thus ?thesis
using assms(2) RTrap
unfolding res_wf_def
by fastforce
qed
}
thus ?thesis
unfolding valid_triple_n_def
by blast
qed
lemma valid_triple_assms_n_conseq:
assumes "((fs,ls,rs)\<bullet>assms \<TTurnstile>_n {(P',es,Q')})"
"\<forall>vs h v_st. (list_all2 (\<lambda>L L'. ass_conseq L L' vs h v_st) ls ls') \<and>
(rel_option (\<lambda>R R'. ass_conseq R R' vs h v_st) rs rs') \<and>
(ass_sat P vs h v_st \<longrightarrow> ass_sat P' vs h v_st) \<and>
(ass_sat Q' vs h v_st \<longrightarrow> ass_sat Q vs h v_st)"
shows "((fs,ls',rs')\<bullet>assms \<TTurnstile>_n {(P,es,Q)})"
using valid_triple_n_conseq[OF _ assms(2)] assms(1)
unfolding valid_triples_assms_n_def valid_triples_n_def
by simp
end
end |
module Test.Int32
import Data.Prim.Int32
import Data.SOP
import Hedgehog
import Test.RingLaws
allInt32 : Gen Int32
allInt32 = int32 (linear (-0x80000000) 0xffffffff)
prop_ltMax : Property
prop_ltMax = property $ do
b8 <- forAll allInt32
(b8 <= MaxInt32) === True
prop_ltMin : Property
prop_ltMin = property $ do
b8 <- forAll allInt32
(b8 >= MinInt32) === True
prop_comp : Property
prop_comp = property $ do
[m,n] <- forAll $ np [allInt32, allInt32]
toOrdering (comp m n) === compare m n
export
props : Group
props = MkGroup "Int32" $
[ ("prop_ltMax", prop_ltMax)
, ("prop_ltMin", prop_ltMin)
, ("prop_comp", prop_comp)
] ++ ringProps allInt32
|
State Before: F : Type ?u.291052
α : Type u_2
β : Type u_1
γ : Type ?u.291061
ι : Type ?u.291064
κ : Type ?u.291067
inst✝¹ : SemilatticeSup α
inst✝ : OrderBot α
s : Finset β
h : Finset.Nonempty s
f : β → α
⊢ ↑(sup s f) = sup s (WithBot.some ∘ f) State After: no goals Tactic: simp only [← sup'_eq_sup h, coe_sup' h] |
function b = generative_model(A,D,m,modeltype,modelvar,params,epsilon)
%GENERATIVE_MODEL run generative model code
%
% B = GENERATIVE_MODEL(A,D,m,modeltype,modelvar,params)
%
% Generates synthetic networks using the models described in the study by
% Betzel et al (2016) in Neuroimage.
%
% Inputs:
% A, binary network of seed connections
% D, Euclidean distance/fiber length matrix
% m, number of connections that should be present in
% final synthetic network
% modeltype, specifies the generative rule (see below)
% modelvar, specifies whether the generative rules are based on
% power-law or exponential relationship
% ({'powerlaw'}|{'exponential})
% params, either a vector (in the case of the geometric
% model) or a matrix (for all other models) of
% parameters at which the model should be evaluated.
% epsilon, the baseline probability of forming a particular
% connection (should be a very small number
% {default = 1e-5}).
%
% Output:
% B, m x number of networks matrix of connections
%
%
% Full list of model types:
% (each model type realizes a different generative rule)
%
% 1. 'sptl' spatial model
% 2. 'neighbors' number of common neighbors
% 3. 'matching' matching index
% 4. 'clu-avg' average clustering coeff.
% 5. 'clu-min' minimum clustering coeff.
% 6. 'clu-max' maximum clustering coeff.
% 7. 'clu-diff' difference in clustering coeff.
% 8. 'clu-prod' product of clustering coeff.
% 9. 'deg-avg' average degree
% 10. 'deg-min' minimum degree
% 11. 'deg-max' maximum degree
% 12. 'deg-diff' difference in degree
% 13. 'deg-prod' product of degree
%
%
% Example usage:
%
% load demo_generative_models_data
%
% % get number of bi-directional connections
% m = nnz(A)/2;
%
% % get cardinality of network
% n = length(A);
%
% % set model type
% modeltype = 'neighbors';
%
% % set whether the model is based on powerlaw or exponentials
% modelvar = [{'powerlaw'},{'powerlaw'}];
%
% % choose some model parameters
% params = [-2,0.2; -5,1.2; -1,1.5];
% nparams = size(params,1);
%
% % generate synthetic networks
% B = generative_model(Aseed,D,m,modeltype,modelvar,params);
%
% % store them in adjacency matrix format
% Asynth = zeros(n,n,nparams);
% for i = 1:nparams;
% a = zeros(n); a(B(:,i)) = 1; a = a + a';
% Asynth(:,:,i) = a;
% end
%
% Reference: Betzel et al (2016) Neuroimage 124:1054-64.
%
% Richard Betzel, Indiana University/University of Pennsylvania, 2015
if ~exist('epsilon','var')
epsilon = 1e-5;
end
n = length(D);
nparams = size(params,1);
b = zeros(m,nparams);
switch modeltype
case 'clu-avg'
clu = clustering_coef_bu(A);
Kseed = bsxfun(@plus,clu(:,ones(1,n)),clu')/2;
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_clu_avg(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'clu-diff'
clu = clustering_coef_bu(A);
Kseed = abs(bsxfun(@minus,clu(:,ones(1,n)),clu'));
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_clu_diff(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'clu-max'
clu = clustering_coef_bu(A);
Kseed = bsxfun(@max,clu(:,ones(1,n)),clu');
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_clu_max(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'clu-min'
clu = clustering_coef_bu(A);
Kseed = bsxfun(@min,clu(:,ones(1,n)),clu');
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_clu_min(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'clu-prod'
clu = clustering_coef_bu(A);
Kseed = clu*clu';
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_clu_prod(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'deg-avg'
kseed = sum(A,2);
Kseed = bsxfun(@plus,kseed(:,ones(1,n)),kseed')/2;
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_deg_avg(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'deg-diff'
kseed = sum(A,2);
Kseed = abs(bsxfun(@minus,kseed(:,ones(1,n)),kseed'));
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_deg_diff(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'deg-max'
kseed = sum(A,2);
Kseed = bsxfun(@max,kseed(:,ones(1,n)),kseed');
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_deg_max(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'deg-min'
kseed = sum(A,2);
Kseed = bsxfun(@min,kseed(:,ones(1,n)),kseed');
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_deg_min(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'deg-prod'
kseed = sum(A,2);
Kseed = (kseed*kseed').*~eye(n);
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_deg_prod(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'neighbors'
Kseed = (A*A).*~eye(n);
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_nghbrs(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'matching'
Kseed = matching_ind(A);
Kseed = Kseed + Kseed';
for iparam = 1:nparams
eta = params(iparam,1);
gam = params(iparam,2);
b(:,iparam) = fcn_matching(A,Kseed,D,m,eta,gam,modelvar,epsilon);
end
case 'sptl'
for iparam = 1:nparams
eta = params(iparam,1);
b(:,iparam) = fcn_sptl(A,D,m,eta,modelvar{1});
end
end
function b = fcn_clu_avg(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
c = clustering_coef_bu(A);
k = sum(A,2);
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
k([uu,vv]) = k([uu,vv]) + 1;
bu = A(uu,:);
su = A(bu,bu);
bv = A(vv,:);
sv = A(bv,bv);
bth = bu & bv;
c(bth) = c(bth) + 2./(k(bth).^2 - k(bth));
c(uu) = nnz(su)/(k(uu)*(k(uu) - 1));
c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1));
c(k <= 1) = 0;
bth([uu,vv]) = true;
K(:,bth) = bsxfun(@plus,c(:,ones(1,sum(bth))),c(bth,:)')/2 + epsilon;
K(bth,:) = bsxfun(@plus,c(:,ones(1,sum(bth))),c(bth,:)')'/2 + epsilon;
switch mv2
case 'powerlaw'
Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam);
Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam);
case 'exponential'
Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam);
Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam);
end
Ff = Ff.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_clu_diff(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
c = clustering_coef_bu(A);
k = sum(A,2);
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
k([uu,vv]) = k([uu,vv]) + 1;
bu = A(uu,:);
su = A(bu,bu);
bv = A(vv,:);
sv = A(bv,bv);
bth = bu & bv;
c(bth) = c(bth) + 2./(k(bth).^2 - k(bth));
c(uu) = nnz(su)/(k(uu)*(k(uu) - 1));
c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1));
c(k <= 1) = 0;
bth([uu,vv]) = true;
K(:,bth) = abs(bsxfun(@minus,c(:,ones(1,sum(bth))),c(bth,:)')) + epsilon;
K(bth,:) = abs(bsxfun(@minus,c(:,ones(1,sum(bth))),c(bth,:)'))' + epsilon;
switch mv2
case 'powerlaw'
Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam);
Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam);
case 'exponential'
Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam);
Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam);
end
Ff = Ff.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_clu_max(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
c = clustering_coef_bu(A);
k = sum(A,2);
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
k([uu,vv]) = k([uu,vv]) + 1;
bu = A(uu,:);
su = A(bu,bu);
bv = A(vv,:);
sv = A(bv,bv);
bth = bu & bv;
c(bth) = c(bth) + 2./(k(bth).^2 - k(bth));
c(uu) = nnz(su)/(k(uu)*(k(uu) - 1));
c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1));
c(k <= 1) = 0;
bth([uu,vv]) = true;
K(:,bth) = bsxfun(@max,c(:,ones(1,sum(bth))),c(bth,:)') + epsilon;
K(bth,:) = bsxfun(@max,c(:,ones(1,sum(bth))),c(bth,:)')' + epsilon;
switch mv2
case 'powerlaw'
Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam);
Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam);
case 'exponential'
Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam);
Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam);
end
Ff = Ff.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_clu_min(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
c = clustering_coef_bu(A);
k = sum(A,2);
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
k([uu,vv]) = k([uu,vv]) + 1;
bu = A(uu,:);
su = A(bu,bu);
bv = A(vv,:);
sv = A(bv,bv);
bth = bu & bv;
c(bth) = c(bth) + 2./(k(bth).^2 - k(bth));
c(uu) = nnz(su)/(k(uu)*(k(uu) - 1));
c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1));
c(k <= 1) = 0;
bth([uu,vv]) = true;
K(:,bth) = bsxfun(@min,c(:,ones(1,sum(bth))),c(bth,:)') + epsilon;
K(bth,:) = bsxfun(@min,c(:,ones(1,sum(bth))),c(bth,:)')' + epsilon;
switch mv2
case 'powerlaw'
Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam);
Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam);
case 'exponential'
Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam);
Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam);
end
Ff = Ff.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_clu_prod(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
c = clustering_coef_bu(A);
k = sum(A,2);
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
k([uu,vv]) = k([uu,vv]) + 1;
bu = A(uu,:);
su = A(bu,bu);
bv = A(vv,:);
sv = A(bv,bv);
bth = bu & bv;
c(bth) = c(bth) + 2./(k(bth).^2 - k(bth));
c(uu) = nnz(su)/(k(uu)*(k(uu) - 1));
c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1));
c(k <= 1) = 0;
bth([uu,vv]) = true;
K(bth,:) = (c(bth,:)*c') + epsilon;
K(:,bth) = (c*c(bth,:)') + epsilon;
switch mv2
case 'powerlaw'
Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam);
Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam);
case 'exponential'
Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam);
Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam);
end
Ff = Ff.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_deg_avg(A,K,D,m,eta,gam,modelvar,epsilon)
n = length(D);
mseed = nnz(A)/2;
k = sum(A,2);
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
D = D(indx);
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
K = K + epsilon;
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
P = Fd.*Fk(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
w = [u(r),v(r)];
k(w) = k(w) + 1;
switch mv2
case 'powerlaw'
Fk(:,w) = [((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon].^gam;
Fk(w,:) = ([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon].^gam)';
case 'exponential'
Fk(:,w) = exp([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon]*gam);
Fk(w,:) = exp([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon]*gam)';
end
P = Fd.*Fk(indx);
b(i) = r;
P(b(1:i)) = 0;
end
b = indx(b);
function b = fcn_deg_diff(A,K,D,m,eta,gam,modelvar,epsilon)
n = length(D);
mseed = nnz(A)/2;
k = sum(A,2);
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
D = D(indx);
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
K = K + epsilon;
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
P = Fd.*Fk(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
w = [u(r),v(r)];
k(w) = k(w) + 1;
switch mv2
case 'powerlaw'
Fk(:,w) = (abs([k - k(w(1)), k - k(w(2))]) + epsilon).^gam;
Fk(w,:) = ((abs([k - k(w(1)), k - k(w(2))]) + epsilon).^gam)';
case 'exponential'
Fk(:,w) = exp((abs([k - k(w(1)), k - k(w(2))]) + epsilon)*gam);
Fk(w,:) = exp((abs([k - k(w(1)), k - k(w(2))]) + epsilon)*gam)';
end
P = Fd.*Fk(indx);
b(i) = r;
P(b(1:i)) = 0;
end
b = indx(b);
function b = fcn_deg_min(A,K,D,m,eta,gam,modelvar,epsilon)
n = length(D);
mseed = nnz(A)/2;
k = sum(A,2);
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
D = D(indx);
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
K = K + epsilon;
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
P = Fd.*Fk(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
w = [u(r),v(r)];
k(w) = k(w) + 1;
switch mv2
case 'powerlaw'
Fk(:,w) = [min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon].^gam;
Fk(w,:) = ([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon].^gam)';
case 'exponential'
Fk(:,w) = exp([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon]*gam);
Fk(w,:) = exp([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon]*gam)';
end
P = Fd.*Fk(indx);
b(i) = r;
P(b(1:i)) = 0;
end
b = indx(b);
function b = fcn_deg_max(A,K,D,m,eta,gam,modelvar,epsilon)
n = length(D);
mseed = nnz(A)/2;
k = sum(A,2);
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
D = D(indx);
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
K = K + epsilon;
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
P = Fd.*Fk(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
w = [u(r),v(r)];
k(w) = k(w) + 1;
switch mv2
case 'powerlaw'
Fk(:,w) = [max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon].^gam;
Fk(w,:) = ([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon].^gam)';
case 'exponential'
Fk(:,w) = exp([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon]*gam);
Fk(w,:) = exp([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon]*gam)';
end
P = Fd.*Fk(indx);
b(i) = r;
P(b(1:i)) = 0;
end
b = indx(b);
function b = fcn_deg_prod(A,K,D,m,eta,gam,modelvar,epsilon)
n = length(D);
mseed = nnz(A)/2;
k = sum(A,2);
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
D = D(indx);
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
K = K + epsilon;
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
P = Fd.*Fk(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
w = [u(r),v(r)];
k(w) = k(w) + 1;
switch mv2
case 'powerlaw'
Fk(:,w) = ([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon].^gam);
Fk(w,:) = (([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon].^gam)');
case 'exponential'
Fk(:,w) = exp([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon]*gam);
Fk(w,:) = exp([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon]*gam)';
end
P = Fd.*Fk(indx);
b(i) = r;
P(b(1:i)) = 0;
end
b = indx(b);
function b = fcn_nghbrs(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
A = A > 0;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
% gam = abs(gam);
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
x = A(uu,:);
y = A(:,vv);
A(uu,vv) = 1;
A(vv,uu) = 1;
K(uu,y) = K(uu,y) + 1;
K(y,uu) = K(y,uu) + 1;
K(vv,x) = K(vv,x) + 1;
K(x,vv) = K(x,vv) + 1;
switch mv2
case 'powerlaw'
Ff(uu,y) = Fd(uu,y).*(K(uu,y).^gam);
Ff(y,uu) = Ff(uu,y)';
Ff(vv,x) = Fd(vv,x).*(K(vv,x).^gam);
Ff(x,vv) = Ff(vv,x)';
case 'exponential'
Ff(uu,y) = Fd(uu,y).*exp(K(uu,y)*gam);
Ff(y,uu) = Ff(uu,y)';
Ff(vv,x) = Fd(vv,x).*exp(K(vv,x)*gam);
Ff(x,vv) = Ff(vv,x)';
end
Ff(A) = 0;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_matching(A,K,D,m,eta,gam,modelvar,epsilon)
K = K + epsilon;
n = length(D);
mseed = nnz(A)/2;
mv1 = modelvar{1};
mv2 = modelvar{2};
switch mv1
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
Ff = Fd.*Fk.*~A;
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Ff(indx);
for ii = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
uu = u(r);
vv = v(r);
A(uu,vv) = 1;
A(vv,uu) = 1;
updateuu = find(A*A(:,uu));
updateuu(updateuu == uu) = [];
updateuu(updateuu == vv) = [];
updatevv = find(A*A(:,vv));
updatevv(updatevv == uu) = [];
updatevv(updatevv == vv) = [];
c1 = [A(:,uu)', A(uu,:)];
for i = 1:length(updateuu)
j = updateuu(i);
c2 = [A(:,j)' A(j,:)];
use = ~(~c1&~c2);
use(uu) = 0; use(uu+n) = 0;
use(j) = 0; use(j+n) = 0;
ncon = sum(c1(use))+sum(c2(use));
if (ncon==0)
K(uu,j) = epsilon;
K(j,uu) = epsilon;
else
K(uu,j) = (2*(sum(c1(use)&c2(use))/ncon)) + epsilon;
K(j,uu) = K(uu,j);
end
end
c1 = [A(:,vv)', A(vv,:)];
for i = 1:length(updatevv)
j = updatevv(i);
c2 = [A(:,j)' A(j,:)];
use = ~(~c1&~c2);
use(vv) = 0; use(vv+n) = 0;
use(j) = 0; use(j+n) = 0;
ncon = sum(c1(use))+sum(c2(use));
if (ncon==0)
K(vv,j) = epsilon;
K(j,vv) = epsilon;
else
K(vv,j) = (2*(sum(c1(use)&c2(use))/ncon)) + epsilon;
K(j,vv) = K(vv,j);
end
end
switch mv2
case 'powerlaw'
Fk = K.^gam;
case 'exponential'
Fk = exp(gam*K);
end
Ff = Fd.*Fk.*~A;
P = Ff(indx);
end
b = find(triu(A,1));
function b = fcn_sptl(A,D,m,eta,modelvar)
n = length(D);
mseed = nnz(A)/2;
switch modelvar
case 'powerlaw'
Fd = D.^eta;
case 'exponential'
Fd = exp(eta*D);
end
[u,v] = find(triu(ones(n),1));
indx = (v - 1)*n + u;
P = Fd(indx).*~A(indx);
b = zeros(m,1);
b(1:mseed) = find(A(indx));
for i = (mseed + 1):m
C = [0; cumsum(P)];
r = sum(rand*C(end) >= C);
b(i) = r;
P = Fd(indx);
P(b(1:i)) = 0;
end
b = indx(b);
|
#ifndef ENVIRONMENT_INCLUDE
#define ENVIRONMENT_INCLUDE
#include <map>
#include <string>
#include <vector>
#include <gsl/string_span>
namespace execHelper {
namespace config {
using EnvArg = std::string;
using EnvArgs = std::vector<EnvArg>;
using EnvironmentCollection = std::map<std::string, EnvArg>;
using EnvironmentValue = std::pair<std::string, EnvArg>;
static const gsl::czstring<> ENVIRONMENT_KEY = "environment";
} // namespace config
} // namespace execHelper
#endif /* ENVIRONMENT_INCLUDE */
|
Formal statement is: lemma prime_nat_iff: "prime (n :: nat) \<longleftrightarrow> (1 < n \<and> (\<forall>m. m dvd n \<longrightarrow> m = 1 \<or> m = n))" Informal statement is: A natural number $n$ is prime if and only if $n > 1$ and the only divisors of $n$ are $1$ and $n$. |
State Before: G : Type ?u.32208
inst✝² : Group G
A : Type ?u.32214
inst✝¹ : AddGroup A
N : Type ?u.32220
inst✝ : Group N
a b k : ℤ
⊢ (fun x => x • a) k = b ↔ b = a * k State After: no goals Tactic: rw [mul_comm, eq_comm, ← smul_eq_mul] |
[STATEMENT]
lemma degree_Poly: "degree (Poly xs) \<le> length xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree (Poly xs) \<le> length xs
[PROOF STEP]
by (induct xs) simp_all |
`is_element/total_preord` := (A::set) -> proc(R)
local AA,a,b,ab,U;
global reason;
AA := [seq(seq([a,b],b in A),a in A)];
if not(`is_element/preord`(A)(R)) then
reason := [convert(procname,string),"R is not a preorder",R,reason];
return false;
fi;
if not(`is_total/preord`(A)(R)) then
reason := [convert(procname,string),"R is not a total preorder on A",R,A];
return false;
fi;
return true;
end;
`is_equal/total_preord` := (A::set) -> (R1,R2) -> `is_equal/preord`(A)(R1,R2);
`is_leq/total_preord` := (A::set) -> (R1,R2) -> `is_leq/preord`(A)(R1,R2);
######################################################################
`build/total_preord` := (A::set) -> proc(pi)
local m,r,i,a,b,R;
m := nops(pi);
r := table();
for i from 1 to m do
for a in pi[i] do
r[a] := i;
od;
od;
R := NULL;
for a in A do
for b in A do
if r[a] <= r[b] then R := R,[a,b]; fi;
od;
od;
R := {R};
return R;
end;
######################################################################
`random_element/total_preord` := (A::set) -> proc()
local pi,pi0,pi1,m,r,i,a,b,R;
pi := `random_element/partitions`(A)();
pi0 := map(u -> sort([op(u)]),pi);
pi0 := sort([op(pi0)]);
pi1 := combinat[randperm](pi0);
return `build/total_preord`(A)(pi1);
end;
######################################################################
`list_elements/total_preord` := proc(A::set)
local PI,pi,pi0,pi1,L;
PI := `list_elements/partitions`(A);
L := NULL;
for pi in PI do
pi0 := map(u -> sort([op(u)]),pi);
pi0 := sort([op(pi0)]);
for pi1 in combinat[permute](pi0) do
L := L,`build/total_preord`(A)(pi1);
od;
od;
L := [L];
return L;
end;
######################################################################
`count_elements/total_preord` := proc(A::set)
local n,k;
n := nops(A);
add(k! * Stirling2(n,k),k=1..n);
end;
|
# Redondeo
$\newcommand{\RR}{\mathbb{R}}$
$\newcommand{\FF}{\mathbb{F}}$
Tenemos una representación del conjunto no-numerable $\RR$ a través del conjunto finito $\FF$. ¿Cómo podemos pasar de un número real a un número flotante, si el número real no es exactamente representable (lo cual es el caso para ¡*casi todos* los números reales!)? Extenderemos para este fin los números reales y los flotantes con $\pm \infty$, y hablaremos de los reales extendidos $\RR^* := \RR \cup \{-\infty, +\infty \}$ y $\FF^* := \FF \cup \{-\infty, +\infty \}$.
Un mapeo $\bigcirc: \RR^* \to \FF^*$ es una *operación de redondeo* si
- Para toda $x \in \FF^*$, $\bigcirc(x) = x$.
- Si $x, y \in \RR^*$, y $x \le y$, entonces $\bigcirc(x) \le \bigcirc(y)$.
Es decir, el redondeo deja invariantes los números representables en el sistema de punto flotante, y preserva el orden.
Utilizaremos dos modos de redondeo: $\bigtriangleup$, que redondea para arriba (hacia $+\infty$), y $\bigtriangledown$, que redondea para abajo (hacia $-\infty$). Se definen como sigue:
- $\bigtriangleup \! (x) := \min \{y \in \FF^*: y \ge x \}$
- $\bigtriangledown(x) := \max \{y \in \FF^*: y \le x \}$
Hablamos de $x$ redondeado para arriba y $x$ redondeado para abajo, respectivamente; estos dos modos de redondeo se llaman modos de redondeo *dirigidos*.
Nota que no es evidente cómo implementar estas operaciones en la computadora, ya que ¡*no podemos representar los números reales originales*!
[1] (i) Si tuviéramos un número real positivo $x$ (es decir, con precisión infinita), ¿cómo podríamos encontrar $\bigtriangleup(x)$ y $\bigtriangledown(x)$?
(ii) Encuentra $\bigtriangleup(0.1)$ y $\bigtriangledown(0.1)$ para aritmética flotante de IEEE. ¿En cuánto difieren? ¿Qué podemos decir sobre el *error de redondeo*?
[2] Haz de nuevo el ejercicio anterior para $x=1.1$ y para $x=10.1$.
[3] ¿Qué pasa con $\bigtriangleup(x)$ y $\bigtriangledown(x)$ si $x \in \FF^*$?
[4] ¿Cuál es la relación entre $\bigtriangleup(-x)$ y $\bigtriangledown(x)$?
Existen otros modos de redondeo además de los redondeos arriba mencionados:
- Redondeo a cero (truncamiento): $\square_z(x) = {\rm sign}(x) \max\{y \in \FF^*: y\leq |x| \}$
- Redondeo al más cercano (*round to nearest*): como su nombre lo indica, se redondea al número de punto flotante más cercano, usando $\bigtriangleup(x)$ o $\bigtriangledown(x)$ según sea el caso. Definiendo $\mu = (\bigtriangleup(x) + \bigtriangledown(x))/2$, entonces:
\begin{equation}
\square_n(x) = \left\{
\begin{array}{1 1}
\bigtriangledown(x), &x\in[\bigtriangledown(x),\mu)\\
\bigtriangleup(x), &x\in[\mu,\bigtriangleup(x)].\\
\end{array}
\right.
\end{equation}
- Redondeo al más cercano *parejo* (*round to nearest even*): es parecido al modo anterior *logrando* que el redondeo hacia arriba y hacia abajo ocurran con la misma probabilidad. (La sutileza en la definición de $\square_n(x)$ está en que la definición involucra un intervalo cerrado y uno semicerrado.) Este redondeo involucra en la definición la paridad del último dígito de la representación de punto flotante de $\bigtriangleup(x)$ y $\bigtriangledown(x)$. Este modo de redondeo es el modo más común.
## Aritmética de punto flotante
¿Cómo podemos hacer aritmética en el mundo de punto flotante?
[5] Encuentra unos ejemplos de pares de números $x , y \in \FF$ tal que $x \oplus y \notin \FF$. (Aquí, $\FF$ denota a los flotantes de doble precisión de IEEE, y $\oplus$ es alguna operación aritmética entre $x$ y $y$.)
[6] ¿Qué podemos hacer al respecto?
[7] En los reales tenemos que, si se cumple $x+y = x+y'$, entonces $y = y'$. ¿Se cumple esto entre los números de punto flotante? Si tu respuesta es **no**, da un ejemplo.
[8] Analiza el caso de iterar el mapeo $f:[0,1] \to [0,1]$ dado por $f(x) = 3x \mathrm{\ mod\ } 1$, con la condición inicial $x_0 = \frac{1}{10}$:
1. ¿Qué pasa analíticamente?
2. ¿Qué pasa numéricamente?
3. ¿Qué pasa si consideras una condición inicial $x_0$ arbitraria?
[Nota: $\mathrm{mod\ } 1$ quiere decir que sólo consideramos la parte fraccionaria entre $0$ y $1$ de la respuesta en cada paso.]
## Aplicando redondeo para obtener resultados garantizados
Ya estamos en condiciones para empezar a hacer cálculos útiles.
Siguiendo al libro de Tucker, consideremos la suma infinita
$$S = \sum_{n=1}^\infty \frac{1}{n^2}.$$
Se sabe que $S = \frac{\pi^2}{6}$.
[9] Calcula $S$ numéricamente de manera ingenua.
Para calcular $S$ de forma numérica pero *garantizada*, tenemos dos tareas: debemos lidiar con la suma infinita, y luego garantizar que el resultado realmente contenga el valor verdadero.
[10] Sea la cola de la suma $T_N := \sum_{n=N+1}^\infty \frac{1}{n^2}$. Utiliza un argumento geométrico para mostrar que
$$\int_{N+1}^\infty \frac{1}{x^2} dx < T_N < \int_{N+1}^\infty \frac{1}{(x-1)^2} dx,$$
y así encuentra cotas para $T_N$.
[11] Usa redondeo para abajo y arriba para calcular cotas para la parte inicial $S_N := \sum_{n=1}^N n^{-2}$.
[Para cambiar el modo del redondeo en Julia, usamos
set_rounding(Float64, RoundUp)
]
[12] Utiliza tus dos últimos resultados para dar cotas *rigurosas* (es decir, garantizadas) para $S$.
Verifica que el valor verdadero sí esté contenido adentro de tus cotas.
[13] Repite el cálculo con `BigFloat` para obtener más precisión.
[En Julia, para cambiar la precisión de los `BigFloat`, usamos
set_bigfloat_precision(100).
]
|
\makeatletter \@ifundefined{rootpath}{\input{../../setup/preamble.tex}}\makeatother
\worksheetstart{Reflection}{1}{April 24, 2013}{Andreas}{../../}
This chapter presents a reflection of decisions made throughout the report. Common for the decisions is that they arguably had an impact of the conclusion. We will discuss the potential impact and reason over the impact the making different decisions.
%\lone[inline]{Reflection on choice of concurrency models}
%\lone[inline]{Evaluation of implementation effort}
%\lone[inline]{Reflection on choice of distance measure}
%\lone[inline]{Readability and Wriability as requirements. Tightly coupled to implementation}
%\lone[inline]{Count clock cycles since STM can perform extra work, where TL waits (wastes no cycles)}
%\lone[inline]{We wanted to compare the models, but there is variations of the models, which we did not know when we started}
%\lone[inline]{We using models to be more general about the concepts. But we use a specific implementations. There is a dilemma between not judging on the same implementations in concepts and performance, but also wanting to cover more than a specific implementation}
%\lone[i]{Reflect on the choice of using only Actor model and not CSP}
%\toby[i]{Måske også reflekterer over vores map-reduce valg af i forhold til implementationen - men synes ikke der er så meget at reflektere over}
%\kasper[inline]{Boxing af integers i Java implementation. int[] halverede tid sammenlignet med generic ArrayList Integer}
\section{Comparing Models}
Choosing to compare concurrency models instead of specific implementations of the models has presented a number of issues. Implementations of the selected concurrency models vary in how they choose to implement the model. Some actor implementations do not support all semantic properties and the strategy vary greatly in the \ac{STM} implementations. At times it proved difficult to distinguish what should be included in the descriptions and evaluations.
The performance test is based on a single implementation of each concurrency model. We have attempted to select competitive implementations of the models that gives a representative view of their capabilities but in the end the performance test only compares the selected implementations. We do however believe that such a test can give a indication of what to expect from the models. But as we will discuss in \bsref{sec:extended_performance_test}, a range of other tests would give a more complete comparison.
\section{Choice of Concurrency Models}
To select the concurrency models to investigate further, a preliminary investigatio was conducted in \bsref{chap:intro}. The investigation covered \ac{TL}, the actor model, \ac{Rx}, \ac{STM} and \ac{CSP} of which \ac{TL}, the actor model and \ac{STM} where selected for further investigation.
When the concurrency models where initially selected our estimation was that the \ac{TL} concurrency model and \ac{STM} where somewhat dissimilar. During the course of the project similarities between the two concurrency model became more clear. From the point of view of the programmer the two models handle concurrency very similarly, both utilise shared memory and require the application of synchronization to critical regions in order to avoid race conditions. How synchronization is achieved is however different.
\ac{CSP} is in many ways similar to the actor model, substituting actors message passing with channel based communication. Based on this the actor model was preferred as it has good support in a number of languages.
Including a concurrency model with a vastly different focus could have been of interest. As an example selecting a concurrency model with a focus on super computers, such as that of the language X10\cite{tardieu2014x10} described briefly in \bsref{chap:intro}, could provide a new perspective for the report.
\section{Choice of Characteristics}
For investigating the characteristics of the selected concurrency model, a number of characteristics was selected and discussed in \bsref{chap:char}. Implicit or Explicit Concurrency, Fault Restrictive or Expressive Model and Pessimistic or Optimistic Model were selected as they affect the use of concurrency models. Although we choose a wide selected, had other characteristics been chosen, it could have effected the result of the comparison of the characteristics.
Beside these characteristics a number of characteristics based on existing literature for evaluating programming languages where employed. The main focus being readability and writability. Traditionally these two characteristics are supported by a number of other characteristics. Only the subset applicable to evaluating concurrency models where employed. Data types and syntax design where discarded as they do not fit the purpose of the evaluation. The concurrency models do not encompass specific data types and the evaluation focus on the models instead of a particular syntax.
Besides the selected characteristics it could have been of interest to examine the maintainability of the selected concurrency models. Software maintenance is important as systems can live for many years and therefore must be adapted to the changing needs and corrected. Furthermore, the cost of software maintenance can constitute a significant portion of a software solutions total cost\cite[p. 17]{sebestaProLang}.
Evaluating the characteristics of concurrency models instead of specific implementations did present some difficulty. To support the evaluation a implementation of a concurrent problems such as the dining philosopher problem\cite[p. 673]{hoare1978communicating} or the santa claus problem\cite{trono1994new} could have been created. The evaluation of characteristics could then be based off the implementation, referring to it to clarify decisions. As the goal of this rapport was to evaluate the model and not a concrete implementation care should be taken not to focus on the details of specific concurrency model implementations employed.
\section{Performance Test}\label{sec:reflec_perf_test}
The choice to employ the $k$-means clustering algorithm for testing performance proved to be problematic. Firstly the work done by the algorithm is considered parallel according to \bsref{def:concurrency}. Parallelism builds upon concurrency but the goals are different. Evaluating the execution time of an inherently concurrent implementation can however be difficult, as the system does not have an overall task for which time can be measured.
Designing test cases that emphasised the performance of the concurrency models proved to be problematic. The exploratory test described in \bsref{sec:performance_test_design} provided some good insights which were used for developing the remaining test cases. It also revealed a very small difference between the performance of the selected concurrency models. As the Map-Reduce design of the implementations allowed for very limited use of synchronization, the concurrency model employed had limited impact on the overall performance. Therefore it was decided to create an additional line of testing where the concurrency models where put under a higher level of stress. The resulting test cases are described in \bsref{sec:performance_sync_intensive_desc}. The exploratory test provided the insights needed for designing these test cases.
The original idea was to have two tests: one scaling the number of vectors and one scaling the number of mappers. Each of these tests where to be run for 10 iterations. As the Map-Reduce design of the implementations allowed for very limited use of synchronization, the concurrency model employed had limited impact on the overall performance. As a result, the performance of the implementations were very similar. To overcome this issue the tests where moved from employing a large dataset over a small number of iterations to employing a smaller dataset and running for a high number of iterations. Reducing the size of dataset limits the time spent on clustering and increasing the number of iterations results in the code segments related to the concurrency models being executed more often. As a result the concurrent models had a larger impact on the results providing a clearer picture of their performance.
Employing an inherently concurrent problem such as the dining philosopher problem\cite[p. 673]{hoare1978communicating} or the santa claus problem\cite{trono1994new} could be an alternative to the $k$-means clustering algorithm. Throughput, such as the number of times philosophers gets to eat within a given time period, could have been measured as an alternative to execution time.
Counting the CPU time spent in addition to real time spent could provide a more accurate measure of the resources used by each concurrency model. Considering that \ac{STM} may have to abort and retry transactions leading to additional CPU time used, where \ac{TL} would simply wait. The impact of this, depends on the environment which the program runs in. If there is only a single task, all resources should be utilised, and waiting for another thread is considered a time waste. However, if there are other programs running, hogging the resources for failing transactions might lead to overall slower result of executing all the programs.
\section{Shared Clustering Implementation}
The choice of using the same code base for the clustering calculations across all concurrency model implementations was done to provide a common ground for the implementations. Utilising the same clustering code for each implementation ensures that the clustering takes an equal amount of time for all implementations. As such any variations in execution time can be contributed to the concurrency models and not the clustering code.
%Using the same code may pose the threat of making it difficult or impossible to spilt the $k$-means clustering algorithm up differently for each of the models. One split of the algorithm may fit better for a given model than the others.
Due to the common cluster code being developed simultaneously with the \ac{TL} implementation, it poses the threat of over-fitting the cluster code for the \ac{TL} concurrency model. That is, forcing the mindset of programming with \ac{TL} upon the other concurrency models through the cluster code. The code was however developed to fit the employed Map-Reduce strategy and not the actual \ac{TL} implementation. As such we are not of the impression that we have over fitted the common code to any of the implementations, as we were able to model both the actor model and \ac{STM} implementation without any restrictions posed from \ac{TL}. However as the cluster code is written in Java, we were forced to use the data structures from Java, such as the Java List, in the actor model and \ac{STM} implementations.
Additionally, during the project we changed the distance measure used for the $k$-means clustering algorithm because we wanted to have a less computational demanding distance measure. As a result of our common clustering code we were able to implement this distance measure a single time and have it take effect across all implementations quickly.
\worksheetend |
State Before: α : Type u_1
β : Type ?u.33505
γ : Type ?u.33508
dec : DecidableEq α
a b : α
x : List α
⊢ rmatch 1 x = true ↔ x = [] State After: no goals Tactic: induction x <;> simp [rmatch, matchEpsilon, *] |
Require Import Coq.Lists.List.
Require Import Coq.Arith.PeanoNat.
Require Import Coq.Arith.Bool_nat.
Require Import Init.Nat.
Import ListNotations.
Definition vertex := nat.
Definition edge := prod vertex vertex.
Inductive graph {X : Type} : Set :=
| Empty : graph
| Add_node : forall (x : X) (g : graph), x
Check " :> ".
Definition cover := list edge.
Definition size (c : cover) := length c.
Definition gt n m := negb (n <=? m).
Definition empty : graph := Graph [] [].
Definition incident (v : vertex) (e : edge) :=
match e with
| (x,y) => x = v \/ y = v
end.
Definition incident_b (v : vertex) (e : edge) :=
match e with
| (x,y) => orb (x =? v) (y =? v)
end.
Definition is_cover (es : cover) (g : graph) :=
incl es (e_list g) /\
forall v : vertex, In v (v_list g) ->
ex (fun e => In e (e_list g) /\ incident v e).
Fixpoint elem {X : Type} (f : X -> X -> bool) e l :=
match l with
| [] => false
| x::xs => if f e x then true else elem f e xs
end.
Definition incl_b {X : Type} f (l m : list X) := forallb (fun x => elem f x m) l.
Definition is_cover_b (es : cover) (g : graph) :=
andb
(incl_b (fun e1 e2 => match (e1,e2) with ((x1,y1),(x2,y2)) => andb (eqb x1 x2) (eqb y1 y2) end)
es (e_list g))
(forallb (fun v => existsb (incident_b v) es) (v_list g)).
Fixpoint all_subset {X : Type} (l : list X) :=
match l with
| [] => [[]]
| x::xs =>
let subsets := all_subset xs in
subsets ++ (map (fun e => x::e) subsets)
end.
Compute all_subset [1;2;3].
Check option.
Search (False -> _).
Check fold_left.
Definition min (l : list nat) d := fold_left (fun acc x => if x <? acc then x else acc) l d.
Fixpoint min_vc (g : graph) : option nat :=
let covers := filter (fun c => is_cover_b c g) (all_subset (e_list g)) in
match covers with
| [] => None
| x::xs =>
let k := min (map (fun c => length c) covers) (length x) in
Some k
end.
Compute min_vc (Graph [1;2;3;4;5] [(1,2);(3,2);(3,4);(5,4)]).
Theorem min_vc_correct : forall (k : nat) (g : graph),
min_vc g = Some k -> ~ ex (fun c => is_cover c g /\ size c <= k).
Proof.
Admitted. |
Engineering is extensive. It organizes the space around us, from our flat to the general urban foundation. This is why the students of architectural studies need architecture help in order to excel in this subject. The scope of architectural studies is very vast today and the students’ needs architecture homework help to deal with their homework and assignments. The architect can manage both inside design and the plan of vast lodging or open offices and can address worldwide urban difficulties, also.
However, the present architecture students are frequently included in "incredible" engineering, as well as work in "little structures, for example, the design of insides, both open and private. Furthermore, they can fill in as decorators, influencing insides to look awesome. In any case, it is difficult for an architect student to perform well before architecture homework help.
While examining design, students will also consider in different orders, for example, the historical backdrop of engineering. The historical backdrop of engineering is a scholarly teacher that explores the utilitarian, basic and stylish improvement of design in time and space as per social needs.
WHY GET ARCHITECTURE HOMEWORK HELP?
If you choose to choose this train, you might be looked with a ton of homework. In the event that you experience issues with assignments, you can ask for our architecture homework help. Our online architecture homework help will assist you with your tasks whenever you need. If you choose to select our online architecture homework help, you too can make certain that your own data will be totally secured. Your own information will never be imparted to an outsider, and nobody will ever realize that you utilized our administration. Each student can manage the cost of our architecture homework help, as we have reasonable costs. We comprehend that students can't spend a great deal of cash on such services – that is the reason you won't be charged a high cost on our site.
You have a chance to utilize our designer homework enable whenever you to need, as we work day in and day out. This is particularly helpful since various nations have diverse time zones. Also, we are here to help students from anywhere in the world.
The engineering degree is an expert study course planned to show you compositional outline, history, and hypothesis and supply you with all vital initial and development material courses. Students who are prepared to seek a degree in design are required to complete a graduate program and they can be benefited with our architecture homework help.
The engineering field is thought to be an awesome point for research; it is most appropriate for individuals who are captivated by structures, structures, and other physical segments. It rouses them to make something moving. Any subject identified with engineering is entangled and requires an abnormal state of responsibility, inventiveness, and going for headway.
Engineering students require hypothetical and down to earth abilities and architecture homework help to complete their assignments. In this way, if you are encountering any issues with your design task, you have gone to the ideal place. Here at MyCourseHelp, we have a very long time of involvement in offering first-rate answers for your assignments.
HOW CAN ARCHITECTURE HOMEWORK HELP MAKE YOUR TASKS EASY?
The Best Experts – We offer a dependable, dexterous, and devoted a group of specialists who particularly oversee engineering assignments.
Innovation – We compose just 100% unique, written falsification free papers, free from mistakes assignments.
All day Support and Live Chat – Our help group is prepared to help you all day and all night with any inquiry significant to your task. You can likewise have a live talk with your author.
100% Confidentiality – We ensure your protection and no outsider inclusion.
Reasonable Prices – We realize that each secondary school and the student has a restricted spending plan, so we set up comparing costs.
MyCourseHelp composes assignments on practically every teaches identified with the design. Regardless of whether you have a basic design task or an unpredictable contextual analysis, report or exposition, our specialists make it simple for you. MyCourseHelp has helped numerous students seek after training in building contemplates. In this manner, don't stress over your evaluations, simply arrange assignments online at MyCourseHelp.
In order to excel in the architecture field, a student has to keep few points in his/her mind. The assignment should be well planned and researched and this is where they’ll need architecture homework help. Keep your assignments at your first priority because this is where students will get the chance to show their potential to their teachers. What is important is to focus on the core requirement that will differentiate your assignments from the other students.
Keeping in mind the end goal to show signs of improvement marks, what is critical is to take after the learning rules arranged by our task essayists. Minorblunders and less attention to the rules can influence you to lose marks, regardless of all the diligent work. Another imperative angle is referencing and where you source your material from. Simply flipping through a few sites won't carry out the activity for you. Diaries and research papers offer substantial and not all that effortlessly accessible data that can give you that edge over others. |
# functions related to beta distributions
import .RFunctions:
betapdf,
betalogpdf,
betacdf,
betaccdf,
betalogcdf,
betalogccdf,
betainvcdf,
betainvccdf,
betainvlogcdf,
betainvlogccdf
# pdf for numbers with generic types
betapdf(α::Real, β::Real, x::Number) = x^(α - 1) * (1 - x)^(β - 1) / beta(α, β)
# logpdf for numbers with generic types
betalogpdf(α::Real, β::Real, x::Number) = (α - 1) * log(x) + (β - 1) * log1p(-x) - lbeta(α, β)
|
! { dg-do compile }
! Tests patch for PR29407, in which the declaration of 'my' as
! a local variable was ignored, so that the procedure and namelist
! attributes for 'my' clashed..
!
! Contributed by Tobias Burnus <[email protected]>
!
program main
implicit none
contains
subroutine my
end subroutine my
subroutine bar
integer :: my
namelist /ops/ my
end subroutine bar
end program main
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_sort_nat_TSortCount
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
datatype Tree = TNode "Tree" "Nat" "Tree" | TNil
fun plus :: "Nat => Nat => Nat" where
"plus (Z) y = y"
| "plus (S z) y = S (plus z y)"
fun le :: "Nat => Nat => bool" where
"le (Z) y = True"
| "le (S z) (Z) = False"
| "le (S z) (S x2) = le z x2"
fun flatten :: "Tree => Nat list => Nat list" where
"flatten (TNode q z r) y = flatten q (cons2 z (flatten r y))"
| "flatten (TNil) y = y"
fun count :: "'a => 'a list => Nat" where
"count x (nil2) = Z"
| "count x (cons2 z ys) =
(if (x = z) then plus (S Z) (count x ys) else count x ys)"
fun add :: "Nat => Tree => Tree" where
"add x (TNode q z r) =
(if le x z then TNode (add x q) z r else TNode q z (add x r))"
| "add x (TNil) = TNode TNil x TNil"
fun toTree :: "Nat list => Tree" where
"toTree (nil2) = TNil"
| "toTree (cons2 y xs) = add y (toTree xs)"
fun tsort :: "Nat list => Nat list" where
"tsort x = flatten (toTree x) (nil2)"
theorem property0 :
"((count x (tsort xs)) = (count x xs))"
oops
end
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.doc_commands
import Mathlib.PostPort
namespace Mathlib
namespace tactic
namespace interactive
/--
This is a "finishing" tactic modification of `simp`. It has two forms.
* `simpa [rules, ...] using e` will simplify the goal and the type of
`e` using `rules`, then try to close the goal using `e`.
Simplifying the type of `e` makes it more likely to match the goal
(which has also been simplified). This construction also tends to be
more robust under changes to the simp lemma set.
* `simpa [rules, ...]` will simplify the goal and the type of a
hypothesis `this` if present in the context, then try to close the goal using
the `assumption` tactic. -/
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import data.fin.basic
import order.succ_pred.basic
/-!
# Successors and predecessors of naturals
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file, we show that `ℕ` is both an archimedean `succ_order` and an archimedean `pred_order`.
-/
open function order
namespace nat
@[reducible] -- so that Lean reads `nat.succ` through `succ_order.succ`
instance : succ_order ℕ :=
{ succ := succ,
..succ_order.of_succ_le_iff succ (λ a b, iff.rfl) }
@[reducible] -- so that Lean reads `nat.pred` through `pred_order.pred`
instance : pred_order ℕ :=
{ pred := pred,
pred_le := pred_le,
min_of_le_pred := λ a ha, begin
cases a,
{ exact is_min_bot },
{ exact (not_succ_le_self _ ha).elim }
end,
le_pred_of_lt := λ a b h, begin
cases b,
{ exact (a.not_lt_zero h).elim },
{ exact le_of_succ_le_succ h }
end,
le_of_pred_lt := λ a b h, begin
cases a,
{ exact b.zero_le },
{ exact h }
end }
@[simp] lemma succ_eq_succ : order.succ = succ := rfl
@[simp] lemma pred_eq_pred : order.pred = pred := rfl
lemma succ_iterate (a : ℕ) : ∀ n, succ^[n] a = a + n
| 0 := rfl
| (n + 1) := by { rw [function.iterate_succ', add_succ], exact congr_arg _ n.succ_iterate }
lemma pred_iterate (a : ℕ) : ∀ n, pred^[n] a = a - n
| 0 := rfl
| (n + 1) := by { rw [function.iterate_succ', sub_succ], exact congr_arg _ n.pred_iterate }
instance : is_succ_archimedean ℕ :=
⟨λ a b h, ⟨b - a, by rw [succ_eq_succ, succ_iterate, add_tsub_cancel_of_le h]⟩⟩
instance : is_pred_archimedean ℕ :=
⟨λ a b h, ⟨b - a, by rw [pred_eq_pred, pred_iterate, tsub_tsub_cancel_of_le h]⟩⟩
/-! ### Covering relation -/
protected lemma covby_iff_succ_eq {m n : ℕ} : m ⋖ n ↔ m + 1 = n := succ_eq_iff_covby.symm
end nat
@[simp, norm_cast] lemma fin.coe_covby_iff {n : ℕ} {a b : fin n} : (a : ℕ) ⋖ b ↔ a ⋖ b :=
and_congr_right' ⟨λ h c hc, h hc, λ h c ha hb, @h ⟨c, hb.trans b.prop⟩ ha hb⟩
alias fin.coe_covby_iff ↔ _ covby.coe_fin
|
module Libraries.Text.PrettyPrint.Prettyprinter.Doc
import Data.List
import public Data.List1
import Data.Maybe
import Data.SnocList
import Data.String
import public Libraries.Data.String.Extra
%hide Data.String.lines
%hide Data.String.lines'
%hide Data.String.unlines
%hide Data.String.unlines'
%default total
export
textSpaces : Int -> String
textSpaces n = Extra.replicate (integerToNat $ cast n) ' '
||| Maximum number of characters that fit in one line.
public export
data PageWidth : Type where
||| The `Int` is the number of characters, including whitespace, that fit in a line.
||| The `Double` is the ribbon, the fraction of the toal page width that can be printed on.
AvailablePerLine : Int -> Double -> PageWidth
||| The layouters should not introduce line breaks.
Unbounded : PageWidth
data FlattenResult : Type -> Type where
Flattened : a -> FlattenResult a
AlreadyFlat : FlattenResult a
NeverFlat : FlattenResult a
Functor FlattenResult where
map f (Flattened a) = Flattened (f a)
map _ AlreadyFlat = AlreadyFlat
map _ NeverFlat = NeverFlat
||| Fusion depth parameter.
public export
data FusionDepth : Type where
||| Do not dive deep into nested documents.
Shallow : FusionDepth
||| Recurse into all parts of the `Doc`. May impact performace.
Deep : FusionDepth
||| This data type represents pretty documents that have
||| been annotated with an arbitrary data type `ann`.
public export
data Doc : Type -> Type where
Empty : Doc ann
Chara : (c : Char) -> Doc ann -- Invariant: not '\n'
Text : (len : Int) -> (text : String) -> Doc ann -- Invariant: at least two characters long and no '\n'
Line : Doc ann
FlatAlt : Lazy (Doc ann) -> Lazy (Doc ann) -> Doc ann
Cat : Doc ann -> Doc ann -> Doc ann
Nest : (i : Int) -> Doc ann -> Doc ann
Union : Lazy (Doc ann) -> Lazy (Doc ann) -> Doc ann -- Invariant: the first line of the first document should be
-- longer than the first lines of the second one
Column : (Int -> Doc ann) -> Doc ann
WithPageWidth : (PageWidth -> Doc ann) -> Doc ann
Nesting : (Int -> Doc ann) -> Doc ann
Annotated : ann -> Doc ann -> Doc ann
export
Semigroup (Doc ann) where
(<+>) = Cat
export
Monoid (Doc ann) where
neutral = Empty
||| Layout a document depending on which column it starts at.
export
column : (Int -> Doc ann) -> Doc ann
column = Column
||| Lays out a document with the current nesting level increased by `i`.
export
nest : Int -> Doc ann -> Doc ann
nest 0 x = x
nest i x = Nest i x
||| Layout a document depending on the current nesting level.
export
nesting : (Int -> Doc ann) -> Doc ann
nesting = Nesting
||| Lays out a document, and makes the column width of it available to a function.
export
width : Doc ann -> (Int -> Doc ann) -> Doc ann
width doc f = column (\colStart => doc <+> column (\colEnd => f (colEnd - colStart)))
||| Layout a document depending on the page width, if one has been specified.
export
pageWidth : (PageWidth -> Doc ann) -> Doc ann
pageWidth = WithPageWidth
||| Lays out a document with the nesting level set to the current column.
export
align : Doc ann -> Doc ann
align d = column (\k => nesting (\i => nest (k - i) d))
||| Lays out a document with a nesting level set to the current column plus `i`.
||| Negative values are allowed, and decrease the nesting level accordingly.
export
hang : Int -> Doc ann -> Doc ann
hang i d = align (nest i d)
||| Insert a number of spaces.
export
spaces : Int -> Doc ann
spaces n = if n <= 0
then Empty
else if n == 1
then Chara ' '
else Text n (textSpaces n)
||| Indents a document with `i` spaces, starting from the current cursor position.
export
indent : Int -> Doc ann -> Doc ann
indent i d = hang i (spaces i <+> d)
||| Lays out a document. It then appends spaces until the width is equal to `i`.
||| If the width is already larger, nothing is appended.
export
fill : Int -> Doc ann -> Doc ann
fill n doc = width doc (\w => spaces $ n - w)
infixr 6 <++>
||| Concatenates two documents with a space in between.
export
(<++>) : Doc ann -> Doc ann -> Doc ann
x <++> y = x <+> Chara ' ' <+> y
||| The empty document behaves like `pretty ""`, so it has a height of 1.
export
emptyDoc : Doc ann
emptyDoc = Empty
||| Behaves like `space` if the resulting output fits the page, otherwise like `line`.
export
softline : Doc ann
softline = Union (Chara ' ') Line
||| Like `softline`, but behaves like `neutral` if the resulting output does not fit
||| on the page.
export
softline' : Doc ann
softline' = Union neutral Line
||| A line break, even when grouped.
export
hardline : Doc ann
hardline = Line
flatten : Doc ann -> Doc ann
flatten Empty = Empty
flatten (Chara x) = Chara x
flatten (Text len x) = Text len x
flatten Line = Empty
flatten (FlatAlt _ y) = flatten y
flatten (Cat x y) = Cat (flatten x) (flatten y)
flatten (Nest i x) = Nest i (flatten x)
flatten (Union x _) = flatten x
flatten (Column f) = Column (\x => flatten $ f x)
flatten (WithPageWidth f) = WithPageWidth (\x => flatten $ f x)
flatten (Nesting f) = Nesting (\x => flatten $ f x)
flatten (Annotated ann x) = Annotated ann (flatten x)
changesUponFlattening : Doc ann -> FlattenResult (Doc ann)
changesUponFlattening Empty = AlreadyFlat
changesUponFlattening (Chara x) = AlreadyFlat
changesUponFlattening (Text x y) = AlreadyFlat
changesUponFlattening Line = NeverFlat
changesUponFlattening (FlatAlt _ y) = Flattened (flatten y)
changesUponFlattening (Cat x y) = case (changesUponFlattening x, changesUponFlattening y) of
(NeverFlat, _) => NeverFlat
(_, NeverFlat) => NeverFlat
(Flattened x', Flattened y') => Flattened (Cat x' y')
(Flattened x', AlreadyFlat) => Flattened (Cat x' y)
(AlreadyFlat , Flattened y') => Flattened (Cat x y')
(AlreadyFlat , AlreadyFlat) => AlreadyFlat
changesUponFlattening (Nest i x) = map (Nest i) (changesUponFlattening x)
changesUponFlattening (Union x _) = Flattened x
changesUponFlattening (Column f) = Flattened (Column (flatten . f))
changesUponFlattening (WithPageWidth f) = Flattened (WithPageWidth (flatten . f))
changesUponFlattening (Nesting f) = Flattened (Nesting (flatten . f))
changesUponFlattening (Annotated ann x) = map (Annotated ann) (changesUponFlattening x)
||| Tries laying out a document into a single line by removing the contained
||| line breaks; if this does not fit the page, or has an `hardline`, the document
||| is laid out without changes.
export
group : Doc ann -> Doc ann
group (Union x y) = Union x y
group (FlatAlt x y) = case changesUponFlattening y of
Flattened y' => Union y' x
AlreadyFlat => Union y x
NeverFlat => x
group x = case changesUponFlattening x of
Flattened x' => Union x' x
AlreadyFlat => x
NeverFlat => x
||| By default renders the first document, When grouped renders the second, with
||| the first as fallback when there is not enough space.
export
flatAlt : Lazy (Doc ann) -> Lazy (Doc ann) -> Doc ann
flatAlt = FlatAlt
||| Advances to the next line and indents to the current nesting level.
export
line : Doc ann
line = FlatAlt Line (Chara ' ')
||| Like `line`, but behaves like `neutral` if the line break is undone by `group`.
export
line' : Doc ann
line' = FlatAlt Line Empty
||| First lays out the document. It then appends spaces until the width is equal to `i`.
||| If the width is already larger than `i`, the nesting level is decreased by `i`
||| and a line is appended.
export
fillBreak : Int -> Doc ann -> Doc ann
fillBreak f x = width x (\w => if w > f
then nest f line'
else spaces $ f - w)
||| Concatenate all documents element-wise with a binary function.
export
concatWith : (Doc ann -> Doc ann -> Doc ann) -> List (Doc ann) -> Doc ann
concatWith f [] = neutral
concatWith f (x :: xs) = foldl f x xs
||| Concatenates all documents horizontally with `(<++>)`.
export
hsep : List (Doc ann) -> Doc ann
hsep = concatWith (<++>)
||| Concatenates all documents above each other. If a `group` undoes the line breaks,
||| the documents are separated with a space instead.
export
vsep : List (Doc ann) -> Doc ann
vsep = concatWith (\x, y => x <+> line <+> y)
||| Concatenates the documents horizontally with `(<++>)` as long as it fits the page,
||| then inserts a line and continues.
export
fillSep : List (Doc ann) -> Doc ann
fillSep = concatWith (\x, y => x <+> softline <+> y)
||| Tries laying out the documents separated with spaces and if this does not fit,
||| separates them with newlines.
export
sep : List (Doc ann) -> Doc ann
sep = group . vsep
||| Concatenates all documents horizontally with `(<+>)`.
export
hcat : List (Doc ann) -> Doc ann
hcat = concatWith (<+>)
||| Vertically concatenates the documents. If it is grouped, the line breaks are removed.
export
vcat : List (Doc ann) -> Doc ann
vcat = concatWith (\x, y => x <+> line' <+> y)
||| Concatenates documents horizontally with `(<+>)` as log as it fits the page, then
||| inserts a line and continues.
export
fillCat : List (Doc ann) -> Doc ann
fillCat = concatWith (\x, y => x <+> softline' <+> y)
||| Tries laying out the documents separated with nothing, and if it does not fit the page,
||| separates them with newlines.
export
cat : List (Doc ann) -> Doc ann
cat = group . vcat
||| Appends `p` to all but the last document.
export
punctuate : Doc ann -> List (Doc ann) -> List (Doc ann)
punctuate _ [] = []
punctuate _ [d] = [d]
punctuate p (d :: ds) = (d <+> p) :: punctuate p ds
export
plural : (Num amount, Eq amount) => doc -> doc -> amount -> doc
plural one multiple n = if n == 1 then one else multiple
||| Encloses the document between two other documents using `(<+>)`.
export
enclose : Doc ann -> Doc ann -> Doc ann -> Doc ann
enclose l r x = l <+> x <+> r
||| Reordering of `encloses`.
||| Example: concatWith (surround (pretty ".")) [pretty "Text", pretty "PrettyPrint", pretty "Doc"]
||| Text.PrettyPrint.Doc
export
surround : Doc ann -> Doc ann -> Doc ann -> Doc ann
surround x l r = l <+> x <+> r
||| Concatenates the documents separated by `s` and encloses the resulting document by `l` and `r`.
export
encloseSep : Doc ann -> Doc ann -> Doc ann -> List (Doc ann) -> Doc ann
encloseSep l r s [] = l <+> r
encloseSep l r s [d] = l <+> d <+> r
encloseSep l r s ds = cat (zipWith (<+>) (l :: replicate (length ds `minus` 1) s) ds) <+> r
unsafeTextWithoutNewLines : String -> Doc ann
unsafeTextWithoutNewLines str = case strM str of
StrNil => Empty
StrCons c cs => if cs == ""
then Chara c
else Text (cast $ length str) str
||| Adds an annotation to a document.
export
annotate : ann -> Doc ann -> Doc ann
annotate = Annotated
||| Changes the annotations of a document. Individual annotations can be removed,
||| changed, or replaced by multiple ones.
export
alterAnnotations : (ann -> List ann') -> Doc ann -> Doc ann'
alterAnnotations re Empty = Empty
alterAnnotations re (Chara c) = Chara c
alterAnnotations re (Text l t) = Text l t
alterAnnotations re Line = Line
alterAnnotations re (FlatAlt x y) = FlatAlt (alterAnnotations re x) (alterAnnotations re y)
alterAnnotations re (Cat x y) = Cat (alterAnnotations re x) (alterAnnotations re y)
alterAnnotations re (Nest i x) = Nest i (alterAnnotations re x)
alterAnnotations re (Union x y) = Union (alterAnnotations re x) (alterAnnotations re y)
alterAnnotations re (Column f) = Column (\x => alterAnnotations re $ f x)
alterAnnotations re (WithPageWidth f) = WithPageWidth (\x => alterAnnotations re $ f x)
alterAnnotations re (Nesting f) = Nesting (\x => alterAnnotations re $ f x)
alterAnnotations re (Annotated ann x) = foldr Annotated (alterAnnotations re x) (re ann)
||| Removes all annotations.
export
unAnnotate : Doc ann -> Doc xxx
unAnnotate = alterAnnotations (const [])
||| Changes the annotations of a document.
export
reAnnotate : (ann -> ann') -> Doc ann -> Doc ann'
reAnnotate re = alterAnnotations (pure . re)
||| Alter the document's annotations.
export
Functor Doc where
map = reAnnotate
||| Overloaded converison to `Doc`.
public export
interface Pretty a where
pretty : a -> Doc ann
pretty x = prettyPrec Open x
prettyPrec : Prec -> a -> Doc ann
prettyPrec _ x = pretty x
export
Pretty String where
pretty str = let str' = if "\n" `isSuffixOf` str then dropLast 1 str else str in
vsep $ map unsafeTextWithoutNewLines $ forget $ lines str'
public export
FromString (Doc ann) where
fromString = pretty
||| Variant of `encloseSep` with braces and comma as separator.
export
list : List (Doc ann) -> Doc ann
list = group . encloseSep (flatAlt (pretty "[ ") (pretty "["))
(flatAlt (pretty " ]") (pretty "]"))
(pretty ", ")
||| Variant of `encloseSep` with parentheses and comma as separator.
export
tupled : List (Doc ann) -> Doc ann
tupled = group . encloseSep (flatAlt (pretty "( ") (pretty "("))
(flatAlt (pretty " )") (pretty ")"))
(pretty ", ")
export
Pretty a => Pretty (List a) where
pretty = align . list . map pretty
export
Pretty a => Pretty (List1 a) where
pretty = pretty . forget
export
[prettyListMaybe] Pretty a => Pretty (List (Maybe a)) where
pretty = pretty . catMaybes
where catMaybes : List (Maybe a) -> List a
catMaybes [] = []
catMaybes (Nothing :: xs) = catMaybes xs
catMaybes ((Just x) :: xs) = x :: catMaybes xs
export
Pretty () where
pretty _ = pretty "()"
export
Pretty Bool where
pretty True = pretty "True"
pretty False = pretty "False"
export
Pretty Char where
pretty '\n' = line
pretty c = Chara c
export Pretty Nat where pretty = unsafeTextWithoutNewLines . show
export Pretty Int where pretty = unsafeTextWithoutNewLines . show
export Pretty Integer where pretty = unsafeTextWithoutNewLines . show
export Pretty Double where pretty = unsafeTextWithoutNewLines . show
export Pretty Bits8 where pretty = unsafeTextWithoutNewLines . show
export Pretty Bits16 where pretty = unsafeTextWithoutNewLines . show
export Pretty Bits32 where pretty = unsafeTextWithoutNewLines . show
export Pretty Bits64 where pretty = unsafeTextWithoutNewLines . show
export
(Pretty a, Pretty b) => Pretty (a, b) where
pretty (x, y) = tupled [pretty x, pretty y]
export
Pretty a => Pretty (Maybe a) where
pretty = maybe neutral pretty
||| Combines text nodes so they can be rendered more efficiently.
export
fuse : FusionDepth -> Doc ann -> Doc ann
fuse depth (Cat Empty x) = fuse depth x
fuse depth (Cat x Empty) = fuse depth x
fuse depth (Cat (Chara c1) (Chara c2)) = Text 2 (strCons c1 (strCons c2 ""))
fuse depth (Cat (Text lt t) (Chara c)) = Text (lt + 1) (t ++ singleton c)
fuse depth (Cat (Chara c) (Text lt t)) = Text (lt + 1) (strCons c t)
fuse depth (Cat (Text l1 t1) (Text l2 t2)) = Text (l1 + l2) (t1 ++ t2)
fuse depth (Cat (Chara x) (Cat (Chara y) z)) =
let sub = Text 2 (strCons x (strCons y "")) in
fuse depth $ assert_smaller (Cat (Chara x) (Cat (Chara y) z)) (Cat sub z)
fuse depth (Cat (Text lx x) (Cat (Chara y) z)) =
let sub = Text (lx + 1) (x ++ singleton y) in
fuse depth $ assert_smaller (Cat (Text lx x) (Cat (Chara y) z)) (Cat sub z)
fuse depth (Cat (Chara x) (Cat (Text ly y) z)) =
let sub = Text (ly + 1) (strCons x y) in
fuse depth $ assert_smaller (Cat (Chara x) (Cat (Text ly y) z)) (Cat sub z)
fuse depth (Cat (Text lx x) (Cat (Text ly y) z)) =
let sub = Text (lx + ly) (x ++ y) in
fuse depth $ assert_smaller (Cat (Text lx x) (Cat (Text ly y) z)) (Cat sub z)
fuse depth (Cat (Cat x (Chara y)) z) =
let sub = fuse depth (Cat (Chara y) z) in
assert_total $ fuse depth (Cat x sub)
fuse depth (Cat (Cat x (Text ly y)) z) =
let sub = fuse depth (Cat (Text ly y) z) in
assert_total $ fuse depth (Cat x sub)
fuse depth (Cat x y) = Cat (fuse depth x) (fuse depth y)
fuse depth (Nest i (Nest j x)) = fuse depth $ assert_smaller (Nest i (Nest j x)) (Nest (i + j) x)
fuse depth (Nest _ Empty) = Empty
fuse depth (Nest _ (Text lx x)) = Text lx x
fuse depth (Nest _ (Chara x)) = Chara x
fuse depth (Nest 0 x) = fuse depth x
fuse depth (Nest i x) = Nest i (fuse depth x)
fuse depth (Annotated ann x) = Annotated ann (fuse depth x)
fuse depth (FlatAlt x y) = FlatAlt (fuse depth x) (fuse depth y)
fuse depth (Union x y) = Union (fuse depth x) (fuse depth y)
fuse Shallow (Column f) = Column f
fuse depth (Column f) = Column (\x => fuse depth $ f x)
fuse Shallow (WithPageWidth f) = WithPageWidth f
fuse depth (WithPageWidth f) = WithPageWidth (\x => fuse depth $ f x)
fuse Shallow (Nesting f) = Nesting f
fuse depth (Nesting f) = Nesting (\x => fuse depth $ f x)
fuse depth x = x
||| This data type represents laid out documents and is used by the display functions.
public export
data SimpleDocStream : Type -> Type where
SEmpty : SimpleDocStream ann
SChar : (c : Char) -> (rest : Lazy (SimpleDocStream ann)) -> SimpleDocStream ann
SText : (len : Int) -> (text : String) -> (rest : Lazy (SimpleDocStream ann)) -> SimpleDocStream ann
SLine : (i : Int) -> (rest : SimpleDocStream ann) -> SimpleDocStream ann
SAnnPush : ann -> (rest : SimpleDocStream ann) -> SimpleDocStream ann
SAnnPop : (rest : SimpleDocStream ann) -> SimpleDocStream ann
internalError : SimpleDocStream ann
internalError = let msg = "<internal pretty printing error>" in
SText (cast $ length msg) msg SEmpty
data AnnotationRemoval = Remove | DontRemove
||| Changes the annotation of a document to a different annotation or none.
export
alterAnnotationsS : (ann -> Maybe ann') -> SimpleDocStream ann -> SimpleDocStream ann'
alterAnnotationsS re = fromMaybe internalError . go []
where
go : List AnnotationRemoval -> SimpleDocStream ann -> Maybe (SimpleDocStream ann')
go stack SEmpty = pure SEmpty
go stack (SChar c rest) = SChar c . delay <$> go stack rest
go stack (SText l t rest) = SText l t . delay <$> go stack rest
go stack (SLine l rest) = SLine l <$> go stack rest
go stack (SAnnPush ann rest) = case re ann of
Nothing => go (Remove :: stack) rest
Just ann' => SAnnPush ann' <$> go (DontRemove :: stack) rest
go stack (SAnnPop rest) = case stack of
[] => Nothing
DontRemove :: stack' => SAnnPop <$> go stack' rest
Remove :: stack' => go stack' rest
||| Removes all annotations.
export
unAnnotateS : SimpleDocStream ann -> SimpleDocStream xxx
unAnnotateS SEmpty = SEmpty
unAnnotateS (SChar c rest) = SChar c (unAnnotateS rest)
unAnnotateS (SText l t rest) = SText l t (unAnnotateS rest)
unAnnotateS (SLine l rest) = SLine l (unAnnotateS rest)
unAnnotateS (SAnnPush ann rest) = unAnnotateS rest
unAnnotateS (SAnnPop rest) = unAnnotateS rest
||| Changes the annotation of a document.
export
reAnnotateS : (ann -> ann') -> SimpleDocStream ann -> SimpleDocStream ann'
reAnnotateS re SEmpty = SEmpty
reAnnotateS re (SChar c rest) = SChar c (reAnnotateS re rest)
reAnnotateS re (SText l t rest) = SText l t (reAnnotateS re rest)
reAnnotateS re (SLine l rest) = SLine l (reAnnotateS re rest)
reAnnotateS re (SAnnPush ann rest) = SAnnPush (re ann) (reAnnotateS re rest)
reAnnotateS re (SAnnPop rest) = SAnnPop (reAnnotateS re rest)
export
Functor SimpleDocStream where
map = reAnnotateS
||| Collects all annotations from a document.
export
collectAnnotations : Monoid m => (ann -> m) -> SimpleDocStream ann -> m
collectAnnotations f SEmpty = neutral
collectAnnotations f (SChar c rest) = collectAnnotations f rest
collectAnnotations f (SText l t rest) = collectAnnotations f rest
collectAnnotations f (SLine l rest) = collectAnnotations f rest
collectAnnotations f (SAnnPush ann rest) = f ann <+> collectAnnotations f rest
collectAnnotations f (SAnnPop rest) = collectAnnotations f rest
||| Transform a document based on its annotations.
export
traverse : Applicative f => (ann -> f ann') -> SimpleDocStream ann -> f (SimpleDocStream ann')
traverse f SEmpty = pure SEmpty
traverse f (SChar c rest) = SChar c . delay <$> traverse f rest
traverse f (SText l t rest) = SText l t . delay <$> traverse f rest
traverse f (SLine l rest) = SLine l <$> traverse f rest
traverse f (SAnnPush ann rest) = SAnnPush <$> f ann <*> traverse f rest
traverse f (SAnnPop rest) = SAnnPop <$> traverse f rest
data WhitespaceStrippingState = AnnotationLevel Int | RecordedWithespace (List Int) Int
dropWhileEnd : (a -> Bool) -> List a -> List a
dropWhileEnd p = foldr (\x, xs => if p x && isNil xs then [] else x :: xs) []
||| Removes all trailing space characters.
export
removeTrailingWhitespace : SimpleDocStream ann -> SimpleDocStream ann
removeTrailingWhitespace = fromMaybe internalError . go (RecordedWithespace [] 0)
where
prependEmptyLines : List Int -> SimpleDocStream ann -> SimpleDocStream ann
prependEmptyLines is sds0 = foldr (\_, sds => SLine 0 sds) sds0 is
commitWhitespace : List Int -> Int -> SimpleDocStream ann -> SimpleDocStream ann
commitWhitespace [] 0 sds = sds
commitWhitespace [] 1 sds = SChar ' ' sds
commitWhitespace [] n sds = SText n (textSpaces n) sds
commitWhitespace (i :: is) n sds = prependEmptyLines is (SLine (i + n) sds)
go : WhitespaceStrippingState -> SimpleDocStream ann -> Maybe (SimpleDocStream ann)
go (AnnotationLevel _) SEmpty = pure SEmpty
go l@(AnnotationLevel _) (SChar c rest) = SChar c . delay <$> go l rest
go l@(AnnotationLevel _) (SText lt text rest) = SText lt text . delay <$> go l rest
go l@(AnnotationLevel _) (SLine i rest) = SLine i <$> go l rest
go (AnnotationLevel l) (SAnnPush ann rest) = SAnnPush ann <$> go (AnnotationLevel (l + 1)) rest
go (AnnotationLevel l) (SAnnPop rest) =
if l > 1
then SAnnPop <$> go (AnnotationLevel (l - 1)) rest
else SAnnPop <$> go (RecordedWithespace [] 0) rest
go (RecordedWithespace _ _) SEmpty = pure SEmpty
go (RecordedWithespace lines spaces) (SChar ' ' rest) = go (RecordedWithespace lines (spaces + 1)) rest
go (RecordedWithespace lines spaces) (SChar c rest) =
do rest' <- go (RecordedWithespace [] 0) rest
pure $ commitWhitespace lines spaces (SChar c rest')
go (RecordedWithespace lines spaces) (SText l text rest) =
let stripped = pack $ dropWhileEnd (== ' ') $ unpack text
strippedLength = cast $ length stripped
trailingLength = l - strippedLength in
if strippedLength == 0
then go (RecordedWithespace lines (spaces + l)) rest
else do rest' <- go (RecordedWithespace [] trailingLength) rest
pure $ commitWhitespace lines spaces (SText strippedLength stripped rest')
go (RecordedWithespace lines spaces) (SLine i rest) = go (RecordedWithespace (i :: lines) 0) rest
go (RecordedWithespace lines spaces) (SAnnPush ann rest) =
do rest' <- go (AnnotationLevel 1) rest
pure $ commitWhitespace lines spaces (SAnnPush ann rest')
go (RecordedWithespace lines spaces) (SAnnPop _) = Nothing
public export
FittingPredicate : Type -> Type
FittingPredicate ann = Int -> Int -> Maybe Int -> SimpleDocStream ann -> Bool
data LayoutPipeline ann = Nil | Cons Int (Doc ann) (LayoutPipeline ann) | UndoAnn (LayoutPipeline ann)
export
defaultPageWidth : PageWidth
defaultPageWidth = AvailablePerLine 80 1
round : Double -> Int
round x = if x > 0
then if x - floor x < 0.5 then cast $ floor x else cast $ ceiling x
else if ceiling x - x < 0.5 then cast $ ceiling x else cast $ floor x
||| The remaining width on the current line.
remainingWidth : Int -> Double -> Int -> Int -> Int
remainingWidth lineLength ribbonFraction lineIndent currentColumn =
let columnsLeftInLine = lineLength - currentColumn
ribbonWidth = (max 0 . min lineLength . round) (cast lineLength * ribbonFraction)
columnsLeftInRibbon = lineIndent + ribbonWidth - currentColumn in
min columnsLeftInLine columnsLeftInRibbon
public export
record LayoutOptions where
constructor MkLayoutOptions
layoutPageWidth : PageWidth
export
defaultLayoutOptions : LayoutOptions
defaultLayoutOptions = MkLayoutOptions defaultPageWidth
||| The Wadler/Leijen layout algorithm.
export
layoutWadlerLeijen : FittingPredicate ann -> PageWidth -> Doc ann -> SimpleDocStream ann
layoutWadlerLeijen fits pageWidth_ doc = best 0 0 (Cons 0 doc Nil)
where
initialIndentation : SimpleDocStream ann -> Maybe Int
initialIndentation (SLine i _) = Just i
initialIndentation (SAnnPush _ s) = initialIndentation s
initialIndentation (SAnnPop s) = initialIndentation s
initialIndentation _ = Nothing
selectNicer : Int -> Int -> SimpleDocStream ann -> Lazy (SimpleDocStream ann) -> SimpleDocStream ann
selectNicer lineIndent currentColumn x y =
if fits lineIndent currentColumn (initialIndentation y) x then x else y
best : Int -> Int -> LayoutPipeline ann -> SimpleDocStream ann
best _ _ Nil = SEmpty
best nl cc (UndoAnn ds) = SAnnPop (best nl cc ds)
best nl cc (Cons i Empty ds) = best nl cc ds
best nl cc (Cons i (Chara c) ds) = SChar c (best nl (cc + 1) ds)
best nl cc (Cons i (Text l t) ds) = SText l t (best nl (cc + l) ds)
best nl cc (Cons i Line ds) = let x = best i i ds
i' = case x of
SEmpty => 0
SLine _ _ => 0
_ => i in
SLine i' x
best nl cc c@(Cons i (FlatAlt x y) ds) = best nl cc $ assert_smaller c (Cons i x ds)
best nl cc (Cons i (Cat x y) ds) = assert_total $ best nl cc (Cons i x (Cons i y ds))
best nl cc c@(Cons i (Nest j x) ds) = best nl cc $ assert_smaller c (Cons (i + j) x ds)
best nl cc c@(Cons i (Union x y) ds) = let x' = best nl cc $ assert_smaller c (Cons i x ds)
y' = delay $ best nl cc $ assert_smaller c (Cons i y ds) in
selectNicer nl cc x' y'
best nl cc c@(Cons i (Column f) ds) = best nl cc $ assert_smaller c (Cons i (f cc) ds)
best nl cc c@(Cons i (WithPageWidth f) ds) = best nl cc $ assert_smaller c (Cons i (f pageWidth_) ds)
best nl cc c@(Cons i (Nesting f) ds) = best nl cc $ assert_smaller c (Cons i (f i) ds)
best nl cc c@(Cons i (Annotated ann x) ds) = SAnnPush ann $ best nl cc $ assert_smaller c (Cons i x (UndoAnn ds))
||| Layout a document with unbounded page width.
export
layoutUnbounded : Doc ann -> SimpleDocStream ann
layoutUnbounded = layoutWadlerLeijen (\_, _, _, sdoc => True) Unbounded
fits : Int -> SimpleDocStream ann -> Bool
fits w s = if w < 0 then False
else case s of
SEmpty => True
SChar _ x => fits (w - 1) x
SText l _ x => fits (w - l) x
SLine i x => True
SAnnPush _ x => fits w x
SAnnPop x => fits w x
||| The default layout algorithm.
export
layoutPretty : LayoutOptions -> Doc ann -> SimpleDocStream ann
layoutPretty (MkLayoutOptions pageWidth_@(AvailablePerLine lineLength ribbonFraction)) =
layoutWadlerLeijen (\lineIndent, currentColumn, _, sdoc =>
fits (remainingWidth lineLength ribbonFraction lineIndent currentColumn) sdoc) pageWidth_
layoutPretty (MkLayoutOptions Unbounded) = layoutUnbounded
||| Layout algorithm with more lookahead than layoutPretty.
export
layoutSmart : LayoutOptions -> Doc ann -> SimpleDocStream ann
layoutSmart (MkLayoutOptions pageWidth_@(AvailablePerLine lineLength ribbonFraction)) =
layoutWadlerLeijen fits pageWidth_
where
fits : Int -> Int -> Maybe Int -> SimpleDocStream ann -> Bool
fits lineIndent currentColumn initialIndentY sdoc = go availableWidth sdoc
where
availableWidth : Int
availableWidth = remainingWidth lineLength ribbonFraction lineIndent currentColumn
minNestingLevel : Int
minNestingLevel = case initialIndentY of
Just i => min i currentColumn
Nothing => currentColumn
go : Int -> SimpleDocStream ann -> Bool
go w s = if w < 0
then False
else case s of
SEmpty => True
SChar _ x => go (w - 1) $ assert_smaller s x
SText l _ x => go (w - l) $ assert_smaller s x
SLine i x => if minNestingLevel < i
then go (lineLength - i) $ assert_smaller s x
else True
SAnnPush _ x => go w x
SAnnPop x => go w x
layoutSmart (MkLayoutOptions Unbounded) = layoutUnbounded
||| Lays out the document without adding any indentation. This layouter is very fast.
export
layoutCompact : Doc ann -> SimpleDocStream ann
layoutCompact doc = scan 0 [doc]
where
scan : Int -> List (Doc ann) -> SimpleDocStream ann
scan _ [] = SEmpty
scan col (Empty :: ds) = scan col ds
scan col ((Chara c) :: ds) = SChar c (scan (col + 1) ds)
scan col ((Text l t) :: ds) = SText l t (scan (col + l) ds)
scan col s@((FlatAlt x _) :: ds) = scan col $ assert_smaller s (x :: ds)
scan col (Line :: ds) = SLine 0 (scan 0 ds)
scan col s@((Cat x y) :: ds) = scan col $ assert_smaller s (x :: y :: ds)
scan col s@((Nest _ x) :: ds) = scan col $ assert_smaller s (x :: ds)
scan col s@((Union _ y) :: ds) = scan col $ assert_smaller s (y :: ds)
scan col s@((Column f) :: ds) = scan col $ assert_smaller s (f col :: ds)
scan col s@((WithPageWidth f) :: ds) = scan col $ assert_smaller s (f Unbounded :: ds)
scan col s@((Nesting f) :: ds) = scan col $ assert_smaller s (f 0 :: ds)
scan col s@((Annotated _ x) :: ds) = scan col $ assert_smaller s (x :: ds)
------------------------------------------------------------------------
-- Turn the document into a string
------------------------------------------------------------------------
export
renderShow : SimpleDocStream ann -> (String -> String)
renderShow SEmpty = id
renderShow (SChar c x) = (strCons c) . renderShow x
renderShow (SText _ t x) = (t ++) . renderShow x
renderShow (SLine i x) = ((strCons '\n' $ textSpaces i) ++) . renderShow x
renderShow (SAnnPush _ x) = renderShow x
renderShow (SAnnPop x) = renderShow x
export
Show (Doc ann) where
show doc = renderShow (layoutPretty defaultLayoutOptions doc) ""
------------------------------------------------------------------------
-- Turn the document into a string, and a list of annotation spans
------------------------------------------------------------------------
public export
record Span (a : Type) where
constructor MkSpan
start : Nat
length : Nat
property : a
export
Functor Span where
map f = { property $= f }
export
Foldable Span where
foldr c n span = c span.property n
export
Traversable Span where
traverse f (MkSpan start width prop)
= MkSpan start width <$> f prop
export
Show a => Show (Span a) where
show (MkSpan start width prop)
= concat {t = List} [ "[", show start, "-", show width, "]"
, show prop
]
export
displaySpans : SimpleDocStream a -> (String, List (Span a))
displaySpans p =
let (bits, anns) = go Z [<] [<] [] p in
(concat bits, anns)
where
go : (index : Nat) ->
(doc : SnocList String) ->
(spans : SnocList (Span a)) ->
(ann : List (Nat, a)) -> -- starting index, < current
SimpleDocStream a ->
(List String, List (Span a))
go index doc spans ann SEmpty = (doc <>> [], spans <>> [])
go index doc spans ann (SChar c rest)
= go (S index) (doc :< cast c) spans ann rest
go index doc spans ann (SText len text rest)
= go (integerToNat (cast len) + index) (doc :< text) spans ann rest
go index doc spans ann (SLine i rest)
= let text = strCons '\n' (textSpaces i) in
go (S (integerToNat $ cast i) + index) (doc :< text) spans ann rest
go index doc spans ann (SAnnPush a rest)
= go index doc spans ((index, a) :: ann) rest
go index doc spans ((start, a) :: ann) (SAnnPop rest)
= let span = MkSpan start (minus index start) a in
go index doc (spans :< span) ann rest
go index doc spans [] (SAnnPop rest)
= go index doc spans [] rest
|
(* Title: HOL/Imperative_HOL/ex/Sorted_List.thy
Author: Lukas Bulwahn, TU Muenchen
*)
section \<open>Sorted lists as representation of finite sets\<close>
theory Sorted_List
imports MainRLT
begin
text \<open>Merge function for two distinct sorted lists to get compound distinct sorted list\<close>
fun merge :: "('a::linorder) list \<Rightarrow> 'a list \<Rightarrow> 'a list"
where
"merge (x#xs) (y#ys) =
(if x < y then x # merge xs (y#ys) else (if x > y then y # merge (x#xs) ys else x # merge xs ys))"
| "merge xs [] = xs"
| "merge [] ys = ys"
text \<open>The function package does not derive automatically the more general rewrite rule as follows:\<close>
lemma merge_Nil[simp]: "merge [] ys = ys"
by (cases ys) auto
lemma set_merge[simp]: "set (merge xs ys) = set xs \<union> set ys"
by (induct xs ys rule: merge.induct, auto)
lemma sorted_merge[simp]:
"List.sorted (merge xs ys) = (List.sorted xs \<and> List.sorted ys)"
by (induct xs ys rule: merge.induct, auto)
lemma distinct_merge[simp]: "\<lbrakk> distinct xs; distinct ys; List.sorted xs; List.sorted ys \<rbrakk> \<Longrightarrow> distinct (merge xs ys)"
by (induct xs ys rule: merge.induct, auto)
text \<open>The remove function removes an element from a sorted list\<close>
primrec remove :: "('a :: linorder) \<Rightarrow> 'a list \<Rightarrow> 'a list"
where
"remove a [] = []"
| "remove a (x#xs) = (if a > x then (x # remove a xs) else (if a = x then xs else x#xs))"
lemma remove': "sorted xs \<and> distinct xs \<Longrightarrow> sorted (remove a xs) \<and> distinct (remove a xs) \<and> set (remove a xs) = set xs - {a}"
apply (induct xs)
apply (auto)
done
lemma set_remove[simp]: "\<lbrakk> sorted xs; distinct xs \<rbrakk> \<Longrightarrow> set (remove a xs) = set xs - {a}"
using remove' by auto
lemma sorted_remove[simp]: "\<lbrakk> sorted xs; distinct xs \<rbrakk> \<Longrightarrow> sorted (remove a xs)"
using remove' by auto
lemma distinct_remove[simp]: "\<lbrakk> sorted xs; distinct xs \<rbrakk> \<Longrightarrow> distinct (remove a xs)"
using remove' by auto
lemma remove_insort_cancel: "remove a (insort a xs) = xs"
apply (induct xs)
apply simp
apply auto
done
lemma remove_insort_commute: "\<lbrakk> a \<noteq> b; sorted xs \<rbrakk> \<Longrightarrow> remove b (insort a xs) = insort a (remove b xs)"
apply (induct xs)
apply auto
apply (case_tac xs)
apply auto
done
lemma notinset_remove: "x \<notin> set xs \<Longrightarrow> remove x xs = xs"
apply (induct xs)
apply auto
done
lemma remove1_eq_remove:
"sorted xs \<Longrightarrow> distinct xs \<Longrightarrow> remove1 x xs = remove x xs"
apply (induct xs)
apply (auto)
apply (subgoal_tac "x \<notin> set xs")
apply (simp add: notinset_remove)
apply fastforce
done
lemma sorted_remove1:
"sorted xs \<Longrightarrow> sorted (remove1 x xs)"
apply (induct xs)
apply (auto)
done
subsection \<open>Efficient member function for sorted lists\<close>
primrec smember :: "'a list \<Rightarrow> 'a::linorder \<Rightarrow> bool" where
"smember [] x \<longleftrightarrow> False"
| "smember (y#ys) x \<longleftrightarrow> x = y \<or> (x > y \<and> smember ys x)"
lemma "sorted xs \<Longrightarrow> smember xs x \<longleftrightarrow> (x \<in> set xs)"
by (induct xs) (auto)
end
|
[STATEMENT]
lemma emeasure_HLD_nxt:
assumes [measurable]: "Measurable.pred S P"
shows "emeasure (T s) {\<omega>\<in>space (T s). (X \<cdot> P) \<omega>} =
(\<integral>\<^sup>+x. emeasure (T x) {\<omega>\<in>space (T x). P \<omega>} * indicator X x \<partial>K s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. emeasure (T s) {\<omega> \<in> space (T s). HLD X \<omega> \<and> nxt P \<omega>} = \<integral>\<^sup>+x\<in>X. emeasure (T x) {\<omega> \<in> space (T x). P \<omega>}\<partial>measure_pmf (K s)
[PROOF STEP]
by (subst emeasure_Collect_T)
(auto intro!: nn_integral_cong_AE simp: AE_measure_pmf_iff split: split_indicator) |
data D : _ where
D : _
|
State Before: R : Type u_1
inst✝ : Semiring R
f : R[X]
a : ℕ
h : a ∈ support (eraseLead f)
⊢ a < natDegree f State After: R : Type u_1
inst✝ : Semiring R
f : R[X]
a : ℕ
h : a ≠ natDegree f ∧ a ∈ support f
⊢ a < natDegree f Tactic: rw [eraseLead_support, mem_erase] at h State Before: R : Type u_1
inst✝ : Semiring R
f : R[X]
a : ℕ
h : a ≠ natDegree f ∧ a ∈ support f
⊢ a < natDegree f State After: no goals Tactic: exact (le_natDegree_of_mem_supp a h.2).lt_of_ne h.1 |
-- --------------------------------------------------------------- [ Model.idr ]
-- Module : Model.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module Text.Markup.Edda.Model.Processed
import Data.AVL.Dict
import Text.Markup.Edda.Model.Common
%default total
%access public export
data Edda : EddaTy -> Type where
Text : String -> Edda INLINE
Sans : String -> Edda INLINE
Scap : String -> Edda INLINE
Mono : String -> Edda INLINE
Verb : String -> Edda INLINE
Code : String -> Edda INLINE
Math : String -> Edda INLINE
Emph : List (Edda INLINE) -> Edda INLINE
Bold : List (Edda INLINE) -> Edda INLINE
Strike : List (Edda INLINE) -> Edda INLINE
Uline : List (Edda INLINE) -> Edda INLINE
Quote : QuoteTy -> List (Edda INLINE) -> Edda INLINE
Parens : ParenTy -> List (Edda INLINE) -> Edda INLINE
Ref : String -> Edda INLINE
Cite : CiteSty -> String -> Edda INLINE
Hyper : String -> List (Edda INLINE) -> Edda INLINE
FNote : String -> List (Edda INLINE) -> Edda INLINE
Space : Edda INLINE
Newline : Edda INLINE
Tab : Edda INLINE
Colon : Edda INLINE
Semi : Edda INLINE
FSlash : Edda INLINE
BSlash : Edda INLINE
Apostrophe : Edda INLINE
SMark : Edda INLINE
Hyphen : Edda INLINE
Comma : Edda INLINE
Plus : Edda INLINE
Bang : Edda INLINE
Period : Edda INLINE
QMark : Edda INLINE
Hash : Edda INLINE
Equals : Edda INLINE
Dollar : Edda INLINE
Pipe : Edda INLINE
Ellipsis : Edda INLINE
EmDash : Edda INLINE
EnDash : Edda INLINE
LAngle : Edda INLINE
RAngle : Edda INLINE
LBrace : Edda INLINE
RBrace : Edda INLINE
LParen : Edda INLINE
RParen : Edda INLINE
LBrack : Edda INLINE
RBrack : Edda INLINE
MiscPunc : Char -> Edda INLINE
-- ------------------------------------------------------------------ [ Blocks ]
HRule : Edda BLOCK
Empty : Edda BLOCK
Figure : (label : Maybe String)
-> (caption : List (Edda INLINE))
-> (attrs : Dict String String)
-> (url : Edda INLINE)
-> Edda BLOCK
DList : (kvpairs : List (List (Edda INLINE), List (Edda INLINE)))
-> Edda BLOCK
Section : (depth : Nat)
-> (label : Maybe String)
-> (title : List (Edda INLINE))
-> (attrs : Dict String String)
-> (body : List (Edda BLOCK))
-> Edda BLOCK
OList : List (List (Edda INLINE)) -> Edda BLOCK
BList : List (List (Edda INLINE)) -> Edda BLOCK
Comment : String -> Edda BLOCK
Equation : (label : Maybe String) -> (eq : String) -> Edda BLOCK
Literal : Maybe String
-> List (Edda INLINE)
-> String
-> Edda BLOCK
Listing : Maybe String
-> (List (Edda INLINE))
-> (Maybe String)
-> (Maybe String)
-> Dict String String
-> String
-> Edda BLOCK
Para : List (Edda INLINE) -> Edda BLOCK
Quotation : Maybe String -> List (Edda INLINE)-> Edda BLOCK
Theorem : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Corollary : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Lemma : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Proposition : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Proof : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Definition : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Exercise : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Note : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Remark : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Problem : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Question : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Solution : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Example : Maybe String -> List (Edda INLINE) -> List (Edda INLINE) -> Edda BLOCK
Snippet : (snippet : List $ Edda ty)
-> (prf : ValidSnippet ty)
-> Edda SNIPPET
Doc : (title : List (Edda INLINE))
-> (attrs : Dict String String)
-> (body : List (Edda BLOCK))
-> Edda DOC
-- --------------------------------------------------------------------- [ EOF ]
|
-- Propiedades de la composición de funciones
-- ==========================================
import tactic
open function
variables {X Y Z W : Type}
-- ----------------------------------------------------
-- Ej. 1. Demostrar que
-- id ∘ f = f
-- ----------------------------------------------------
-- 1ª demostración
example
(f : X → Y)
: id ∘ f = f :=
begin
ext,
calc (id ∘ f) x = id (f x) : by rw comp_app
... = f x : by rw id.def,
end
-- 2ª demostración
example
(f : X → Y)
: id ∘ f = f :=
begin
ext,
rw comp_app,
rw id.def,
end
-- 3ª demostración
example
(f : X → Y)
: id ∘ f = f :=
begin
ext,
rw [comp_app, id.def],
end
-- 4ª demostración
example
(f : X → Y)
: id ∘ f = f :=
begin
ext,
calc (id ∘ f) x = id (f x) : rfl
... = f x : rfl,
end
-- 5ª demostración
example
(f : X → Y)
: id ∘ f = f :=
rfl
-- 6ª demostración
example
(f : X → Y)
: id ∘ f = f :=
-- by library_search
left_id f
-- 7ª demostración
example
(f : X → Y)
: id ∘ f = f :=
comp.left_id f
-- ----------------------------------------------------
-- Ej. 2. Demostrar que
-- f ∘ id = f
-- ----------------------------------------------------
-- 1ª demostración
example
(f : X → Y)
: f ∘ id = f :=
begin
ext,
calc (f ∘ id) x = f (id x) : by rw comp_app
... = f x : by rw id.def,
end
-- 2ª demostración
example
(f : X → Y)
: f ∘ id = f :=
begin
ext,
rw comp_app,
rw id.def,
end
-- 3ª demostración
example
(f : X → Y)
: f ∘ id = f :=
begin
ext,
rw [comp_app, id.def],
end
-- 4ª demostración
example
(f : X → Y)
: f ∘ id = f :=
begin
ext,
calc (f ∘ id) x = f (id x) : rfl
... = f x : rfl,
end
-- 5ª demostración
example
(f : X → Y)
: f ∘ id = f :=
rfl
-- 6ª demostración
example
(f : X → Y)
: f ∘ id = f :=
-- by library_search
right_id f
-- 7ª demostración
example
(f : X → Y)
: f ∘ id = f :=
comp.right_id f
-- ----------------------------------------------------
-- Ej. 3. Demostrar que
-- (f ∘ g) ∘ h = f ∘ (g ∘ h)
-- ----------------------------------------------------
-- 1ª demostración
example
(f : Z → W)
(g : Y → Z)
(h : X → Y)
: (f ∘ g) ∘ h = f ∘ (g ∘ h) :=
begin
ext,
calc ((f ∘ g) ∘ h) x
= (f ∘ g) (h x) : by rw comp_app
... = f (g (h x)) : by rw comp_app
... = f ((g ∘ h) x) : by rw comp_app
... = (f ∘ (g ∘ h)) x : by rw comp_app
end
-- 2ª demostración
example
(f : Z → W)
(g : Y → Z)
(h : X → Y)
: (f ∘ g) ∘ h = f ∘ (g ∘ h) :=
begin
ext,
rw comp_app,
end
-- 3ª demostración
example
(f : Z → W)
(g : Y → Z)
(h : X → Y)
: (f ∘ g) ∘ h = f ∘ (g ∘ h) :=
begin
ext,
calc ((f ∘ g) ∘ h) x
= (f ∘ g) (h x) : rfl
... = f (g (h x)) : rfl
... = f ((g ∘ h) x) : rfl
... = (f ∘ (g ∘ h)) x : rfl
end
-- 4ª demostración
example
(f : Z → W)
(g : Y → Z)
(h : X → Y)
: (f ∘ g) ∘ h = f ∘ (g ∘ h) :=
rfl
-- 5ª demostración
example
(f : Z → W)
(g : Y → Z)
(h : X → Y)
: (f ∘ g) ∘ h = f ∘ (g ∘ h) :=
comp.assoc f g h
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj5eqsynthconj6 : forall (lv0 : natural), (@eq natural (plus lv0 Zero) (lv0)).
Admitted.
QuickChick conj5eqsynthconj6.
|
""" Adopted from: https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py """
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample))
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.