text
stringlengths 0
3.34M
|
---|
import data.fintype.card
import tactic.zify
import tactic.ring
import tactic.linarith
import defs
open sum
variables {α β α' β' : Type} {γ : β → Type}
def propagate_aux (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) : ℕ → (α → bool) × bool
| 0 := next_bit init_carry (λ i, x i 0)
| (n+1) :=
next_bit (propagate_aux n).1 (λ i, x i (n+1))
def propagate (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) (i : ℕ) : bool :=
(propagate_aux init_carry next_bit x i).2
@[simp] def propagate_carry (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool))
(x : β → ℕ → bool) : ℕ → (α → bool)
| 0 := next_bit init_carry (λ i, x i 0)
| (n+1) := next_bit (propagate_carry n) (λ i, x i (n+1))
@[simp] def propagate_carry2 (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool))
(x : β → ℕ → bool) : ℕ → (α → bool)
| 0 := init_carry
| (n+1) := next_bit (propagate_carry2 n) (λ i, x i n)
lemma propagate_carry2_succ (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool))
(x : β → ℕ → bool) (n : ℕ) :
propagate_carry2 init_carry next_bit x (n+1) =
propagate_carry init_carry next_bit x n :=
by induction n; simp *
@[simp] lemma propagate_aux_fst_eq_carry (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) : ∀ n : ℕ,
(propagate_aux init_carry next_bit x n).1 =
propagate_carry init_carry (λ c b, (next_bit c b).1) x n
| 0 := rfl
| (n+1) := by rw [propagate_aux, propagate_carry, propagate_aux_fst_eq_carry]
@[simp] lemma propagate_zero (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) :
propagate init_carry next_bit x 0 = (next_bit init_carry (λ i, x i 0)).2 :=
rfl
lemma propagate_succ (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) (i : ℕ) :
propagate init_carry next_bit x (i+1) = (next_bit
(propagate_carry init_carry (λ c b, (next_bit c b).1) x i)
(λ j, x j (i+1))).2 :=
by rw [← propagate_aux_fst_eq_carry]; refl
lemma propagate_succ2 (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β → ℕ → bool) (i : ℕ) :
propagate init_carry next_bit x (i+1) = (next_bit
(propagate_carry2 init_carry (λ c b, (next_bit c b).1) x (i+1))
(λ j, x j (i+1))).2 :=
by rw [propagate_carry2_succ, ← propagate_aux_fst_eq_carry]; refl
lemma propagate_carry_propagate {δ : β → Type*} {β' : Type}
(f : Π a, δ a → β') : Π (n : ℕ) (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool))
(init_carry_x : Π a, γ a → bool)
(next_bit_x : Π a (carry : γ a → bool) (bits : δ a → bool),
(γ a → bool) × bool)
(x : β' → ℕ → bool),
propagate_carry init_carry next_bit (λ a, propagate (init_carry_x a)
(next_bit_x a) (λ d, x (f a d))) n =
propagate_carry
(λ a : α ⊕ (Σ a, γ a), sum.elim init_carry (λ b : Σ a, γ a,
init_carry_x b.1 b.2) a)
(λ (carry : (α ⊕ (Σ a, γ a)) → bool) (bits : β' → bool),
-- first compute (propagate (init_carry_x a) (next_bit_x a) (x a) n)
let f : Π (a : β), (γ a → bool) × bool := λ a, next_bit_x a (λ d,
carry (inr ⟨a, d⟩)) (λ d, bits (f a d)) in
let g : (α → bool) := (next_bit (carry ∘ inl) (λ a, (f a).2)) in
sum.elim g (λ x, (f x.1).1 x.2)
)
x n ∘ inl
| 0 init_carry next_bit init_carry_x next_bit_x x := rfl
| (n+1) init_carry next_bit init_carry_x next_bit_x x := begin
have := propagate_carry_propagate n,
clear_aux_decl,
simp only [propagate_carry, propagate_succ, elim_inl] at *,
conv_lhs { simp only [this] },
clear this,
ext,
congr,
ext,
congr,
induction n with n ih,
{ simp },
{ simp [ih] }
end
lemma propagate_propagate {δ : β → Type*} {β' : Type}
(f : Π a, δ a → β') : Π (n : ℕ) (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(init_carry_x : Π a, γ a → bool)
(next_bit_x : Π a (carry : γ a → bool) (bits : δ a → bool),
(γ a → bool) × bool)
(x : β' → ℕ → bool),
propagate init_carry next_bit (λ a, propagate (init_carry_x a)
(next_bit_x a) (λ d, x (f a d))) n =
propagate
(λ a : α ⊕ (Σ a, γ a), sum.elim init_carry (λ b : Σ a, γ a,
init_carry_x b.1 b.2) a)
(λ (carry : (α ⊕ (Σ a, γ a)) → bool) (bits : β' → bool),
-- first compute (propagate (init_carry_x a) (next_bit_x a) (x a) n)
let f : Π (a : β), (γ a → bool) × bool := λ a, next_bit_x a (λ d,
carry (inr ⟨a, d⟩)) (λ d, bits (f a d)) in
let g : (α → bool) × bool := (next_bit (carry ∘ inl) (λ a, (f a).2)) in
(sum.elim g.1 (λ x, (f x.1).1 x.2), g.2)
)
x n
| 0 init_carry next_bit init_carry_x next_bit_x x := rfl
| (n+1) init_carry next_bit init_carry_x next_bit_x x := begin
simp only [propagate_succ],
clear_aux_decl,
rw [propagate_carry_propagate],
congr,
ext,
congr,
induction n with n ih,
{ simp },
{ simp [ih] }
end
lemma propagate_carry_change_vars {β' : Type}
(init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool))
(x : β' → ℕ → bool) (i : ℕ)
(change_vars : β → β') :
propagate_carry init_carry next_bit (λ b, x (change_vars b)) i =
propagate_carry init_carry (λ (carry : α → bool) (bits : β' → bool),
next_bit carry (λ b, bits (change_vars b))) x i :=
begin
induction i,
{ simp },
{ simp * }
end
lemma propagate_change_vars {β' : Type}
(init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(x : β' → ℕ → bool) (i : ℕ)
(change_vars : β → β') :
propagate init_carry next_bit (λ b, x (change_vars b)) i =
propagate init_carry (λ (carry : α → bool) (bits : β' → bool),
next_bit carry (λ b, bits (change_vars b))) x i :=
begin
induction i with i ih,
{ refl },
{ simp only [propagate_succ, propagate_carry_change_vars, ih] }
end
open term
@[simp] def arity : term → ℕ
| (var n) := n+1
| zero := 0
| one := 0
| neg_one := 0
| (and t₁ t₂) := max (arity t₁) (arity t₂)
| (or t₁ t₂) := max (arity t₁) (arity t₂)
| (xor t₁ t₂) := max (arity t₁) (arity t₂)
| (not t) := arity t
| (ls t) := arity t
| (add t₁ t₂) := max (arity t₁) (arity t₂)
| (sub t₁ t₂) := max (arity t₁) (arity t₂)
| (neg t) := arity t
| (incr t) := arity t
| (decr t) := arity t
@[simp] def term.eval_fin : Π (t : term) (vars : fin (arity t) → ℕ → bool), ℕ → bool
| (var n) vars := vars (fin.last n)
| zero vars := zero_seq
| one vars := one_seq
| neg_one vars := neg_one_seq
| (and t₁ t₂) vars :=
and_seq (term.eval_fin t₁
(λ i, vars (fin.cast_le (by simp [arity]) i)))
(term.eval_fin t₂
(λ i, vars (fin.cast_le (by simp [arity]) i)))
| (or t₁ t₂) vars :=
or_seq (term.eval_fin t₁
(λ i, vars (fin.cast_le (by simp [arity]) i)))
(term.eval_fin t₂
(λ i, vars (fin.cast_le (by simp [arity]) i)))
| (xor t₁ t₂) vars :=
xor_seq (term.eval_fin t₁
(λ i, vars (fin.cast_le (by simp [arity]) i)))
(term.eval_fin t₂
(λ i, vars (fin.cast_le (by simp [arity]) i)))
| (not t) vars := not_seq (term.eval_fin t vars)
| (ls t) vars := ls_seq (term.eval_fin t vars)
| (add t₁ t₂) vars :=
add_seq (term.eval_fin t₁
(λ i, vars (fin.cast_le (by simp [arity]) i)))
(term.eval_fin t₂
(λ i, vars (fin.cast_le (by simp [arity]) i)))
| (sub t₁ t₂) vars :=
sub_seq (term.eval_fin t₁
(λ i, vars (fin.cast_le (by simp [arity]) i)))
(term.eval_fin t₂
(λ i, vars (fin.cast_le (by simp [arity]) i)))
| (neg t) vars := neg_seq (term.eval_fin t vars)
| (incr t) vars := incr_seq (term.eval_fin t vars)
| (decr t) vars := decr_seq (term.eval_fin t vars)
lemma eval_fin_eq_eval (t : term) (vars : ℕ → ℕ → bool) :
term.eval_fin t (λ i, vars i) = term.eval t vars :=
begin
induction t;
dsimp [term.eval_fin, term.eval, arity] at *; simp *
end
lemma id_eq_propagate (x : ℕ → bool) :
x = propagate empty.elim (λ _ (y : unit → bool), (empty.elim, y ())) (λ _, x) :=
by ext n; cases n; refl
lemma zero_eq_propagate :
zero_seq = propagate empty.elim (λ (_ _ : empty → bool), (empty.elim, ff)) empty.elim :=
by ext n; cases n; refl
lemma one_eq_propagate :
one_seq = propagate (λ _ : unit, tt) (λ f (_ : empty → bool), (λ _, ff, f ())) empty.elim :=
begin
ext n,
cases n with n,
{ refl },
{ cases n,
{ simp [one_seq, propagate_succ] },
{ simp [one_seq, propagate_succ] } }
end
lemma and_eq_propagate (x y : ℕ → bool) :
and_seq x y = propagate empty.elim
(λ _ (y : bool → bool), (empty.elim, y tt && y ff)) (λ b, cond b x y) :=
by ext n; cases n; simp [propagate, propagate_aux, and_seq]
lemma or_eq_propagate (x y : ℕ → bool) :
or_seq x y = propagate empty.elim
(λ _ (y : bool → bool), (empty.elim, y tt || y ff)) (λ b, cond b x y) :=
by ext n; cases n; simp [propagate, propagate_aux, or_seq]
lemma xor_eq_propagate (x y : ℕ → bool) :
xor_seq x y = propagate empty.elim
(λ _ (y : bool → bool), (empty.elim, bxor (y tt) (y ff))) (λ b, cond b x y) :=
by ext n; cases n; simp [propagate, propagate_aux, xor_seq]
lemma not_eq_propagate (x : ℕ → bool) :
not_seq x = propagate empty.elim (λ _ (y : unit → bool), (empty.elim, bnot (y ()))) (λ _, x) :=
by ext n; cases n; simp [propagate, propagate_aux, not_seq]
lemma ls_eq_propagate (x : ℕ → bool) :
ls_seq x = propagate (λ _ : unit, ff) (λ (carry x : unit → bool), (x, carry ())) (λ _, x) :=
begin
ext n,
cases n with n,
{ refl },
{ cases n,
{ simp [ls_seq, propagate_succ] },
{ simp [ls_seq, propagate_succ] } }
end
lemma add_seq_aux_eq_propagate_carry (x y : ℕ → bool) (n : ℕ) :
(add_seq_aux x y n).2 = propagate_carry (λ _, ff)
(λ (carry : unit → bool) (bits : bool → bool),
λ _, (bits tt && bits ff) || (bits ff && carry ()) || (bits tt && carry ()))
(λ b, cond b x y) n () :=
begin
induction n,
{ simp [add_seq_aux] },
{ simp [add_seq_aux, *] }
end
lemma add_eq_propagate (x y : ℕ → bool) :
add_seq x y = propagate (λ _, ff)
(λ (carry : unit → bool) (bits : bool → bool),
(λ _, (bits tt && bits ff) || (bits ff && carry ()) || (bits tt && carry ()),
bxor (bits tt) (bxor (bits ff) (carry ()))))
(λ b, cond b x y) :=
begin
ext n,
cases n with n,
{ simp [add_seq, add_seq_aux] },
{ cases n,
{ simp [add_seq, add_seq_aux, propagate_succ] },
{ simp [add_seq, add_seq_aux, add_seq_aux_eq_propagate_carry,
propagate_succ] } }
end
lemma sub_seq_aux_eq_propagate_carry (x y : ℕ → bool) (n : ℕ) :
(sub_seq_aux x y n).2 = propagate_carry (λ _, ff)
(λ (carry : unit → bool) (bits : bool → bool),
λ _, (bnot (bits tt) && (bits ff)) ||
(bnot (bxor (bits tt) (bits ff))) && carry ())
(λ b, cond b x y) n () :=
begin
induction n,
{ simp [sub_seq_aux] },
{ simp [sub_seq_aux, *] }
end
lemma sub_eq_propagate (x y : ℕ → bool) :
sub_seq x y = propagate (λ _, ff)
(λ (carry : unit → bool) (bits : bool → bool),
(λ _, (bnot (bits tt) && (bits ff)) ||
((bnot (bxor (bits tt) (bits ff))) && carry ()),
bxor (bits tt) (bxor (bits ff) (carry ()))))
(λ b, cond b x y) :=
begin
ext n,
cases n with n,
{ simp [sub_seq, sub_seq_aux] },
{ cases n,
{ simp [sub_seq, sub_seq_aux, propagate_succ] },
{ simp [sub_seq, sub_seq_aux, sub_seq_aux_eq_propagate_carry,
propagate_succ] } }
end
lemma neg_seq_aux_eq_propagate_carry (x : ℕ → bool) (n : ℕ) :
(neg_seq_aux x n).2 = propagate_carry (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
λ _, (bnot (bits ())) && (carry ()))
(λ _, x) n () :=
begin
induction n,
{ simp [neg_seq_aux] },
{ simp [neg_seq_aux, *] }
end
lemma neg_eq_propagate (x : ℕ → bool) :
neg_seq x = propagate (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
(λ _, (bnot (bits ())) && (carry ()), bxor (bnot (bits ())) (carry ())))
(λ _, x) :=
begin
ext n,
cases n with n,
{ simp [neg_seq, neg_seq_aux] },
{ cases n,
{ simp [neg_seq, neg_seq_aux, propagate_succ] },
{ simp [neg_seq, neg_seq_aux, neg_seq_aux_eq_propagate_carry,
propagate_succ] } }
end
lemma incr_seq_aux_eq_propagate_carry (x : ℕ → bool) (n : ℕ) :
(incr_seq_aux x n).2 = propagate_carry (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
λ _, (bits ()) && carry ())
(λ _, x) n () :=
begin
induction n,
{ simp [incr_seq_aux] },
{ simp [incr_seq_aux, *] }
end
lemma incr_eq_propagate (x : ℕ → bool) :
incr_seq x = propagate (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
(λ _, (bits ()) && carry (), bxor (bits ()) (carry ())))
(λ _, x) :=
begin
ext n,
cases n with n,
{ simp [incr_seq, incr_seq_aux] },
{ cases n,
{ simp [incr_seq, incr_seq_aux, propagate_succ] },
{ simp [incr_seq, incr_seq_aux, incr_seq_aux_eq_propagate_carry,
propagate_succ] } }
end
lemma decr_seq_aux_eq_propagate_carry (x : ℕ → bool) (n : ℕ) :
(decr_seq_aux x n).2 = propagate_carry (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
λ _, (bnot (bits ())) && carry ())
(λ _, x) n () :=
begin
induction n,
{ simp [decr_seq_aux] },
{ simp [decr_seq_aux, *] }
end
lemma decr_eq_propagate (x : ℕ → bool) :
decr_seq x = propagate (λ _, tt)
(λ (carry : unit → bool) (bits : unit → bool),
(λ _, (bnot (bits ())) && carry (), bxor (bits ()) (carry ())))
(λ _, x) :=
begin
ext n,
cases n with n,
{ simp [decr_seq, decr_seq_aux] },
{ cases n,
{ simp [decr_seq, decr_seq_aux, propagate_succ] },
{ simp [decr_seq, decr_seq_aux, decr_seq_aux_eq_propagate_carry,
propagate_succ] } }
end
structure propagate_struc (arity : Type) : Type 1 :=
( α : Type )
[ i : fintype α ]
( init_carry : α → bool )
( next_bit : Π (carry : α → bool) (bits : arity → bool),
(α → bool) × bool )
attribute [instance] propagate_struc.i
namespace propagate_struc
variables {arity : Type} (p : propagate_struc arity)
def eval : (arity → ℕ → bool) → ℕ → bool :=
propagate p.init_carry p.next_bit
def change_vars {arity2 : Type} (change_vars : arity → arity2) :
propagate_struc arity2 :=
{ α := p.α,
i := p.i,
init_carry := p.init_carry,
next_bit := λ carry bits, p.next_bit carry (λ i, bits (change_vars i)) }
def compose [fintype arity]
(new_arity : Type)
(q_arity : arity → Type)
(vars : Π (a : arity), q_arity a → new_arity)
(q : Π (a : arity), propagate_struc (q_arity a)) :
propagate_struc (new_arity) :=
{ α := p.α ⊕ (Σ a, (q a).α),
i := by letI := p.i; apply_instance,
init_carry := sum.elim p.init_carry (λ x, (q x.1).init_carry x.2),
next_bit := λ carry bits,
let f : Π (a : arity), ((q a).α → bool) × bool := λ a, (q a).next_bit (λ d,
carry (inr ⟨a, d⟩)) (λ d, bits (vars a d)) in
let g : (p.α → bool) × bool := (p.next_bit (carry ∘ inl) (λ a, (f a).2)) in
(sum.elim g.1 (λ x, (f x.1).1 x.2), g.2) }
lemma eval_compose [fintype arity]
(new_arity : Type)
(q_arity : arity → Type)
(vars : Π (a : arity), q_arity a → new_arity)
(q : Π (a : arity), propagate_struc (q_arity a))
(x : new_arity → ℕ → bool):
(p.compose new_arity q_arity vars q).eval x =
p.eval (λ a, (q a).eval (λ i, x (vars _ i))) :=
begin
ext n,
simp only [eval, compose, propagate_propagate]
end
def and : propagate_struc bool :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, bits tt && bits ff) }
@[simp] lemma eval_and (x : bool → ℕ → bool) : and.eval x = and_seq (x tt) (x ff) :=
by ext n; cases n; simp [and, and_seq, eval, propagate_succ]
def or : propagate_struc bool :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, bits tt || bits ff) }
@[simp] lemma eval_or (x : bool → ℕ → bool) : or.eval x = or_seq (x tt) (x ff) :=
by ext n; cases n; simp [or, or_seq, eval, propagate_succ]
def xor : propagate_struc bool :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, bxor (bits tt) (bits ff)) }
@[simp] lemma eval_xor (x : bool → ℕ → bool) : xor.eval x = xor_seq (x tt) (x ff) :=
by ext n; cases n; simp [xor, xor_seq, eval, propagate_succ]
def add : propagate_struc bool :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, ff,
next_bit := λ (carry : unit → bool) (bits : bool → bool),
(λ _, (bits tt && bits ff) || (bits ff && carry ()) || (bits tt && carry ()),
bxor (bits tt) (bxor (bits ff) (carry ()))) }
@[simp] lemma eval_add (x : bool → ℕ → bool) : add.eval x = add_seq (x tt) (x ff) :=
begin
dsimp [add, eval],
rw [add_eq_propagate],
congr,
funext b,
cases b; refl
end
def sub : propagate_struc bool :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, ff,
next_bit := λ (carry : unit → bool) (bits : bool → bool),
(λ _, (bnot (bits tt) && (bits ff)) ||
((bnot (bxor (bits tt) (bits ff))) && carry ()),
bxor (bits tt) (bxor (bits ff) (carry ()))) }
@[simp] lemma eval_sub (x : bool → ℕ → bool) : sub.eval x = sub_seq (x tt) (x ff) :=
begin
dsimp [sub, eval],
rw [sub_eq_propagate],
congr,
funext b,
cases b; refl
end
def neg : propagate_struc unit :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, tt,
next_bit := λ (carry : unit → bool) (bits : unit → bool),
(λ _, (bnot (bits ())) && (carry ()), bxor (bnot (bits ())) (carry ())) }
@[simp] lemma eval_neg (x : unit → ℕ → bool) : neg.eval x = neg_seq (x ()) :=
begin
dsimp [neg, eval],
rw [neg_eq_propagate],
congr,
funext b,
cases b; refl
end
def not : propagate_struc unit :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, bnot (bits ())) }
@[simp] lemma eval_not (x : unit → ℕ → bool) : not.eval x = not_seq (x ()) :=
by ext n; cases n; simp [not, not_seq, eval, propagate_succ]
def zero : propagate_struc (fin 0) :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, ff) }
@[simp] lemma eval_zero (x : fin 0 → ℕ → bool) : zero.eval x = zero_seq :=
by ext n; cases n; simp [zero, zero_seq, eval, propagate_succ]
def one : propagate_struc (fin 0) :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, tt,
next_bit := λ carry bits, (λ _, ff, carry ()) }
@[simp] lemma eval_one (x : fin 0 → ℕ → bool) : one.eval x = one_seq :=
by ext n; cases n; simp [one, one_seq, eval, propagate_succ2]
def neg_one : propagate_struc (fin 0) :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, tt) }
@[simp] lemma eval_neg_one (x : fin 0 → ℕ → bool) : neg_one.eval x = neg_one_seq :=
by ext n; cases n; simp [neg_one, neg_one_seq, eval, propagate_succ2]
def ls : propagate_struc unit :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, ff,
next_bit := λ carry bits, (bits, carry ()) }
@[simp] lemma eval_ls (x : unit → ℕ → bool) : ls.eval x = ls_seq (x ()) :=
by ext n; cases n; simp [ls, ls_seq, eval, propagate_succ2]
def var (n : ℕ) : propagate_struc (fin (n+1)) :=
{ α := empty,
i := by apply_instance,
init_carry := empty.elim,
next_bit := λ carry bits, (empty.elim, bits (fin.last n)) }
@[simp] lemma eval_var (n : ℕ) (x : fin (n+1) → ℕ → bool) : (var n).eval x = x (fin.last n) :=
by ext m; cases m; simp [var, eval, propagate_succ]
def incr : propagate_struc unit :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, tt,
next_bit := λ carry bits, (λ _, bits () && carry (), bxor (bits ()) (carry ())) }
@[simp] lemma eval_incr (x : unit → ℕ → bool) : incr.eval x = incr_seq (x ()) :=
begin
dsimp [incr, eval],
rw [incr_eq_propagate],
congr,
funext b,
cases b; refl
end
def decr : propagate_struc unit :=
{ α := unit,
i := by apply_instance,
init_carry := λ _, tt,
next_bit := λ carry bits, (λ _, bnot (bits ()) && carry (), bxor (bits ()) (carry ())) }
@[simp] lemma eval_decr (x : unit → ℕ → bool) : decr.eval x = decr_seq (x ()) :=
begin
dsimp [decr, eval],
rw [decr_eq_propagate],
congr,
funext b,
cases b; refl
end
end propagate_struc
structure propagate_solution (t : term) extends propagate_struc (fin (arity t)) :=
( good : t.eval_fin = to_propagate_struc.eval )
def compose_unary
(p : propagate_struc unit)
{t : term}
(q : propagate_solution t) :
propagate_struc (fin (arity t)) :=
p.compose
(fin (arity t))
_
(λ _ , id)
(λ _, q.to_propagate_struc)
def compose_binary
(p : propagate_struc bool)
{t₁ t₂ : term}
(q₁ : propagate_solution t₁)
(q₂ : propagate_solution t₂) :
propagate_struc (fin (max (arity t₁) (arity t₂))) :=
p.compose (fin (max (arity t₁) (arity t₂)))
(λ b, fin (cond b (arity t₁) (arity t₂)))
(λ b i, fin.cast_le (by cases b; simp) i)
(λ b, bool.rec q₂.to_propagate_struc q₁.to_propagate_struc b)
@[simp] lemma compose_unary_eval
(p : propagate_struc unit)
{t : term}
(q : propagate_solution t)
(x : fin (arity t) → ℕ → bool) :
(compose_unary p q).eval x = p.eval (λ _, t.eval_fin x) :=
begin
rw [compose_unary, propagate_struc.eval_compose, q.good],
refl
end
@[simp] lemma compose_binary_eval
(p : propagate_struc bool)
{t₁ t₂ : term}
(q₁ : propagate_solution t₁)
(q₂ : propagate_solution t₂)
(x : fin (max (arity t₁) (arity t₂)) → ℕ → bool) :
(compose_binary p q₁ q₂).eval x = p.eval
(λ b, cond b (t₁.eval_fin (λ i, x (fin.cast_le (by simp) i)))
(t₂.eval_fin (λ i, x (fin.cast_le (by simp) i)))) :=
begin
rw [compose_binary, propagate_struc.eval_compose, q₁.good, q₂.good],
congr,
ext b,
cases b; refl
end
instance {α β : Type*} [fintype α] [fintype β] (b : bool) :
fintype (cond b α β) :=
by cases b; dsimp; apply_instance
lemma cond_propagate {α α' β β' : Type}
(init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool),
(α → bool) × bool)
(init_carry' : α' → bool)
(next_bit' : Π (carry : α' → bool) (bits : β' → bool),
(α' → bool) × bool)
{γ : Type} (fβ : β → γ) (fβ' : β' → γ)
(x : γ → ℕ → bool) (b : bool) :
cond b (propagate init_carry next_bit (λ b, (x (fβ b))))
(propagate init_carry' next_bit' (λ b, (x (fβ' b)))) =
propagate (show cond b α α' → bool, from bool.rec init_carry' init_carry b)
(show Π (carry : cond b α α' → bool) (bits : cond b β β' → bool),
(cond b α α' → bool) × bool,
from bool.rec next_bit' next_bit b)
(show cond b β β' → ℕ → bool, from bool.rec (λ b, (x (fβ' b))) (λ b, (x (fβ b))) b) :=
by cases b; refl
def term_eval_eq_propagate : Π (t : term),
propagate_solution t
| (var n) :=
{ to_propagate_struc := propagate_struc.var n,
good := by ext; simp [term.eval_fin] }
| zero :=
{ to_propagate_struc := propagate_struc.zero,
good := by ext; simp [term.eval_fin] }
| one :=
{ to_propagate_struc := propagate_struc.one,
good := by ext; simp [term.eval_fin] }
| neg_one :=
{ to_propagate_struc := propagate_struc.neg_one,
good := by ext; simp [term.eval_fin] }
| (and t₁ t₂) :=
let q₁ := term_eval_eq_propagate t₁ in
let q₂ := term_eval_eq_propagate t₂ in
{ to_propagate_struc := compose_binary propagate_struc.and q₁ q₂,
good := by ext; simp; refl }
| (or t₁ t₂) :=
let q₁ := term_eval_eq_propagate t₁ in
let q₂ := term_eval_eq_propagate t₂ in
{ to_propagate_struc := compose_binary propagate_struc.or q₁ q₂,
good := by ext; simp; refl }
| (xor t₁ t₂) :=
let q₁ := term_eval_eq_propagate t₁ in
let q₂ := term_eval_eq_propagate t₂ in
{ to_propagate_struc := compose_binary propagate_struc.xor q₁ q₂,
good := by ext; simp; refl }
| (ls t) :=
let q := term_eval_eq_propagate t in
{ to_propagate_struc := by dsimp [arity]; exact compose_unary propagate_struc.ls q,
good := by ext; simp; refl }
| (not t) :=
let q := term_eval_eq_propagate t in
{ to_propagate_struc := by dsimp [arity]; exact compose_unary propagate_struc.not q,
good := by ext; simp; refl }
| (add t₁ t₂) :=
let q₁ := term_eval_eq_propagate t₁ in
let q₂ := term_eval_eq_propagate t₂ in
{ to_propagate_struc := compose_binary propagate_struc.add q₁ q₂,
good := by ext; simp; refl }
| (sub t₁ t₂) :=
let q₁ := term_eval_eq_propagate t₁ in
let q₂ := term_eval_eq_propagate t₂ in
{ to_propagate_struc := compose_binary propagate_struc.sub q₁ q₂,
good := by ext; simp; refl }
| (neg t) :=
let q := term_eval_eq_propagate t in
{ to_propagate_struc := by dsimp [arity]; exact compose_unary propagate_struc.neg q,
good := by ext; simp; refl }
| (incr t) :=
let q := term_eval_eq_propagate t in
{ to_propagate_struc := by dsimp [arity]; exact compose_unary propagate_struc.incr q,
good := by ext; simp; refl }
| (decr t) :=
let q := term_eval_eq_propagate t in
{ to_propagate_struc := by dsimp [arity]; exact compose_unary propagate_struc.decr q,
good := by ext; simp; refl }
variables
(init_carry : α → bool)
(next_carry : Π (carry : α → bool) (bits : β → bool), (α → bool))
(next_bit : Π (carry : α → bool) (bits : β → bool), (α → bool) × bool)
variables [fintype α] [fintype α']
open fintype
lemma exists_repeat_carry (seq : β → ℕ → bool) :
∃ n m : fin (2 ^ (card α) + 1),
propagate_carry2 init_carry next_carry seq n =
propagate_carry2 init_carry next_carry seq m ∧
n < m :=
begin
by_contra h,
letI : decidable_eq α := classical.dec_eq α,
push_neg at h,
have := λ a b hab, (le_antisymm (h a b hab) (h b a hab.symm)).symm,
simpa using fintype.card_le_of_injective _ this
end
lemma propagate_carry2_eq_of_seq_eq_lt (seq₁ seq₂ : β → ℕ → bool)
(init_carry : α → bool)
(next_carry : Π (carry : α → bool) (bits : β → bool), (α → bool))
(i : ℕ) (h : ∀ (b) j < i, seq₁ b j = seq₂ b j) :
propagate_carry2 init_carry next_carry seq₁ i =
propagate_carry2 init_carry next_carry seq₂ i :=
begin
induction i with i ih,
{ simp [propagate_carry2] },
{ simp [propagate_carry2, h _ i (nat.lt_succ_self i)],
rw ih,
exact λ b j hj, h b j (nat.lt_succ_of_lt hj) }
end
lemma propagate_eq_of_seq_eq_le (seq₁ seq₂ : β → ℕ → bool)
(init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool), (α → bool) × bool)
(i : ℕ) (h : ∀ (b) j ≤ i, seq₁ b j = seq₂ b j) :
propagate init_carry next_bit seq₁ i =
propagate init_carry next_bit seq₂ i :=
begin
cases i,
{ simp [propagate_zero, h _ 0 (le_refl _)] },
{ simp only [propagate_succ2, propagate_succ2, h _ _ (le_refl _)],
congr' 2,
apply propagate_carry2_eq_of_seq_eq_lt,
exact λ b j hj, h b j (le_of_lt hj) }
end
lemma propagate_carry2_eq_of_carry_eq (seq₁ seq₂ : β → ℕ → bool)
(m n : ℕ)
(h₁ : propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₁ m =
propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₂ n) (x : ℕ)
(h₃ : ∀ y b, y ≤ x → seq₁ b (m + y) = seq₂ b (n + y)) :
propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₁ (m + x) =
propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₂ (n + x) :=
begin
induction x with x ih generalizing seq₁ seq₂,
{ simp * at * },
{ simp only [h₃ x _ (nat.le_succ _),
nat.add_succ, propagate_carry2, add_zero] at *,
rw [ih],
assumption,
exact λ y b h, h₃ y b (nat.le_succ_of_le h) }
end
lemma propagate_eq_of_carry_eq (seq₁ seq₂ : β → ℕ → bool)
(m n : ℕ)
(h₁ : propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₁ m =
propagate_carry2 init_carry
(λ carry bits, (next_bit carry bits).1) seq₂ n) (x : ℕ)
(h₃ : ∀ y b, y ≤ x → seq₁ b (m + y) = seq₂ b (n + y)) :
propagate init_carry next_bit seq₁ (m + x) =
propagate init_carry next_bit seq₂ (n + x) :=
begin
cases x,
{ cases m,
{ cases n,
{ simp [h₃ 0 _ (le_refl _), propagate_carry2, *] at * },
{ simp [*, h₃ 0 _ (le_refl _), propagate_succ2] at *,
rw [← h₁] } },
{ cases n,
{ simp [*, propagate_succ2] at *,
simp [← h₃ 0 _ rfl] },
{ rw [propagate_succ2, h₁, propagate_succ2],
have := h₃ 0,
simp * at * } } },
{ simp only [nat.add_succ, propagate_succ2, add_zero],
simp [← nat.add_succ, h₃ _ _ (le_refl _)],
congr' 2,
apply propagate_carry2_eq_of_carry_eq,
assumption,
assumption }
end
lemma propagate_carry_propagate_carry_add (x : β → ℕ → bool) :
∀ (init_carry : α → bool)
(next_carry : Π (carry : α → bool) (bits : β → bool), (α → bool)),
∀ n i : ℕ,
propagate_carry2 (propagate_carry2 init_carry next_carry x n)
next_carry (λ b k, x b (k + n)) i =
propagate_carry2 init_carry next_carry x (i + n)
| init_carry next_carry 0 0 := by simp [propagate_carry2]
| init_carry next_carry (n+1) 0 :=
by simp [propagate_carry, propagate_carry2_succ]
| init_carry next_carry n (i+1) := begin
rw [propagate_carry2, add_assoc,
propagate_carry_propagate_carry_add],
simp only [nat.one_add, nat.add_one, nat.succ_add, nat.add_succ,
add_zero, propagate_carry2, zero_add]
end
lemma exists_repeat : ∀ (seq : β → ℕ → bool)
(n : ℕ),
∃ (m < 2 ^ (card α)) (seq2 : β → ℕ → bool),
propagate init_carry next_bit seq2 m = propagate init_carry next_bit seq n
| seq n :=
begin
by_cases hn2 : n < 2 ^ card α,
{ exact ⟨n, hn2, seq, rfl⟩ },
{ rcases exists_repeat_carry (propagate_carry2 init_carry (λ c b, (next_bit c b).1) seq
(n - 2 ^ card α))
(λ carry bits, (next_bit carry bits).1)
(λ b i, seq b (i + (n - 2^ (card α)))) with ⟨a, b, h₁, h₂⟩,
simp only [propagate_carry_propagate_carry_add] at h₁,
rcases have wf : n - (b - a) < n,
from nat.sub_lt (lt_of_lt_of_le (pow_pos two_pos _) (le_of_not_gt hn2))
(nat.sub_pos_of_lt h₂),
exists_repeat (λ c i, if i < a + (n - 2 ^ card α) then seq c i else
seq c (i + (b - a))) (n - (b - a)) with ⟨m, hmle, seq2, hm⟩,
use [m, hmle, seq2],
rw [hm], clear hm,
have h1 : n - (b - a) = (a + (n - 2 ^ (card α))) + (2 ^ card α - b),
{ zify,
rw [nat.cast_sub, nat.cast_sub, nat.cast_sub, nat.cast_sub],
ring,
exact nat.le_of_lt_succ b.2,
simp * at *,
exact le_of_lt h₂,
exact le_trans (nat.sub_le _ _) (le_trans (nat.le_of_lt_succ b.2)
(by simp * at *)) },
rw h1,
have h2 : n = (b + (n - 2 ^ card α)) + (2 ^ card α - b),
{ zify,
rw [nat.cast_sub, nat.cast_sub],
ring,
exact nat.le_of_lt_succ b.2,
simp * at *, },
conv_rhs { rw h2 },
refine propagate_eq_of_carry_eq _ _ _ _ _ _ _ _ _,
{ have h : ↑b + (n - 2 ^ card α) = (a + (n - 2 ^ card α)) + (b - a),
{ zify,
rw [nat.cast_sub, nat.cast_sub],
ring,
exact le_of_lt h₂,
simp * at * },
rw [← h₁],
apply propagate_carry2_eq_of_seq_eq_lt,
simp { contextual := tt } },
{ intros y c hc,
simp only [add_lt_iff_neg_left, not_lt_zero', if_false],
congr' 1,
zify,
rw [nat.cast_sub, nat.cast_sub],
ring,
exact le_of_lt h₂,
simp * at * } },
end
using_well_founded { rel_tac := λ _ _, `[exact ⟨_, measure_wf psigma.snd⟩] }
lemma propagate_eq_zero_iff (init_carry : α → bool)
(next_bit : Π (carry : α → bool) (bits : β → bool), (α → bool) × bool) :
(∀ seq, propagate init_carry next_bit seq = zero_seq) ↔
(∀ seq, ∀ i < 2 ^ (card α), propagate init_carry next_bit seq i = ff) :=
begin
split,
{ intros h i _,
simp [h, zero_seq] },
{ intros h seq,
funext i,
rcases exists_repeat init_carry next_bit seq i with ⟨j, hj, seq2, hseq2⟩,
rw [← hseq2, h seq2 j hj, zero_seq] }
end
lemma eq_iff_xor_seq_eq_zero (seq₁ seq₂ : ℕ → bool) :
(∀ i, seq₁ i = seq₂ i) ↔ (∀ i, xor_seq seq₁ seq₂ i = zero_seq i) :=
begin
simp [function.funext_iff, xor_seq, zero_seq],
split,
{ intros, simp * },
{ intros h a,
specialize h a,
cases (seq₁ a); cases (seq₂ a); simp * at *, }
end
lemma eval_eq_iff_xor_seq_eq_zero (t₁ t₂ : term) :
t₁.eval = t₂.eval ↔ (t₁.xor t₂).eval_fin = λ _, zero_seq :=
begin
simp only [function.funext_iff, term.eval, term.eval_fin,
← eq_iff_xor_seq_eq_zero, ← eval_fin_eq_eval],
split,
{ intros h seq n,
have := h (λ j, if hj : j < (arity (t₁.xor t₂)) then seq ⟨j, hj⟩ else λ _, ff) n,
simp at this,
convert this },
{ intros h seq m,
exact h (λ j, seq j) _ }
end
|
/-
Copyright (c) 2019 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Johannes Hölzl
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.linear_algebra.finsupp
import Mathlib.linear_algebra.direct_sum.tensor_product
import Mathlib.PostPort
universes u u_1 v u_2 u_3 u_4 u_5
namespace Mathlib
/-!
# Results on direct sums and finitely supported functions.
1. The linear equivalence between finitely supported functions `ι →₀ M` and
the direct sum of copies of `M` indexed by `ι`.
2. The tensor product of ι →₀ M and κ →₀ N is linearly equivalent to (ι × κ) →₀ (M ⊗ N).
-/
/-- The finitely supported functions ι →₀ M are in linear equivalence with the direct sum of
copies of M indexed by ι. -/
def finsupp_lequiv_direct_sum (R : Type u) (M : Type v) [ring R] [add_comm_group M] [module R M]
(ι : Type u_1) [DecidableEq ι] : linear_equiv R (ι →₀ M) (direct_sum ι fun (i : ι) => M) :=
linear_equiv.of_linear
(coe_fn finsupp.lsum
((fun (this : ι → linear_map R M (direct_sum ι fun (i : ι) => M)) => this)
(direct_sum.lof R ι fun (i : ι) => M)))
(direct_sum.to_module R ι (ι →₀ M) finsupp.lsingle) sorry sorry
@[simp] theorem finsupp_lequiv_direct_sum_single (R : Type u) (M : Type v) [ring R]
[add_comm_group M] [module R M] (ι : Type u_1) [DecidableEq ι] (i : ι) (m : M) :
coe_fn (finsupp_lequiv_direct_sum R M ι) (finsupp.single i m) =
coe_fn (direct_sum.lof R ι (fun (i : ι) => M) i) m :=
finsupp.sum_single_index
(linear_map.map_zero
((fun (this : ι → linear_map R M (direct_sum ι fun (i : ι) => M)) => this)
(direct_sum.lof R ι fun (i : ι) => M) i))
@[simp] theorem finsupp_lequiv_direct_sum_symm_lof (R : Type u) (M : Type v) [ring R]
[add_comm_group M] [module R M] (ι : Type u_1) [DecidableEq ι] (i : ι) (m : M) :
coe_fn (linear_equiv.symm (finsupp_lequiv_direct_sum R M ι))
(coe_fn (direct_sum.lof R ι (fun (i : ι) => M) i) m) =
finsupp.single i m :=
direct_sum.to_module_lof R i m
/-- The tensor product of ι →₀ M and κ →₀ N is linearly equivalent to (ι × κ) →₀ (M ⊗ N). -/
def finsupp_tensor_finsupp (R : Type u_1) (M : Type u_2) (N : Type u_3) (ι : Type u_4)
(κ : Type u_5) [comm_ring R] [add_comm_group M] [module R M] [add_comm_group N] [module R N] :
linear_equiv R (tensor_product R (ι →₀ M) (κ →₀ N)) (ι × κ →₀ tensor_product R M N) :=
linear_equiv.trans
(tensor_product.congr (finsupp_lequiv_direct_sum R M ι) (finsupp_lequiv_direct_sum R N κ))
(linear_equiv.trans (tensor_product.direct_sum R ι κ (fun (i : ι) => M) fun (i : κ) => N)
(linear_equiv.symm (finsupp_lequiv_direct_sum R (tensor_product R M N) (ι × κ))))
@[simp] theorem finsupp_tensor_finsupp_single (R : Type u_1) (M : Type u_2) (N : Type u_3)
(ι : Type u_4) (κ : Type u_5) [comm_ring R] [add_comm_group M] [module R M] [add_comm_group N]
[module R N] (i : ι) (m : M) (k : κ) (n : N) :
coe_fn (finsupp_tensor_finsupp R M N ι κ)
(tensor_product.tmul R (finsupp.single i m) (finsupp.single k n)) =
finsupp.single (i, k) (tensor_product.tmul R m n) :=
sorry
@[simp] theorem finsupp_tensor_finsupp_symm_single (R : Type u_1) (M : Type u_2) (N : Type u_3)
(ι : Type u_4) (κ : Type u_5) [comm_ring R] [add_comm_group M] [module R M] [add_comm_group N]
[module R N] (i : ι × κ) (m : M) (n : N) :
coe_fn (linear_equiv.symm (finsupp_tensor_finsupp R M N ι κ))
(finsupp.single i (tensor_product.tmul R m n)) =
tensor_product.tmul R (finsupp.single (prod.fst i) m) (finsupp.single (prod.snd i) n) :=
sorry
end Mathlib |
State Before: b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr : 0 ≤ r
⊢ ⌊logb (↑b) r⌋ = Int.log b r State After: case inl
b✝ x y : ℝ
b : ℕ
hb : 1 < b
hr : 0 ≤ 0
⊢ ⌊logb (↑b) 0⌋ = Int.log b 0
case inr
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
⊢ ⌊logb (↑b) r⌋ = Int.log b r Tactic: obtain rfl | hr := hr.eq_or_lt State Before: case inr
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
⊢ ⌊logb (↑b) r⌋ = Int.log b r State After: case inr
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ⌊logb (↑b) r⌋ = Int.log b r Tactic: have hb1' : 1 < (b : ℝ) := Nat.one_lt_cast.mpr hb State Before: case inr
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ⌊logb (↑b) r⌋ = Int.log b r State After: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ⌊logb (↑b) r⌋ ≤ Int.log b r
case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ Int.log b r ≤ ⌊logb (↑b) r⌋ Tactic: apply le_antisymm State Before: case inl
b✝ x y : ℝ
b : ℕ
hb : 1 < b
hr : 0 ≤ 0
⊢ ⌊logb (↑b) 0⌋ = Int.log b 0 State After: no goals Tactic: rw [logb_zero, Int.log_zero_right, Int.floor_zero] State Before: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ⌊logb (↑b) r⌋ ≤ Int.log b r State After: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ ↑⌊logb (↑b) r⌋ ≤ r Tactic: rw [← Int.zpow_le_iff_le_log hb hr, ← rpow_int_cast b] State Before: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ ↑⌊logb (↑b) r⌋ ≤ r State After: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ ↑⌊logb (↑b) r⌋ ≤ ↑b ^ logb (↑b) r Tactic: refine' le_of_le_of_eq _ (rpow_logb (zero_lt_one.trans hb1') hb1'.ne' hr) State Before: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ ↑⌊logb (↑b) r⌋ ≤ ↑b ^ logb (↑b) r State After: no goals Tactic: exact rpow_le_rpow_of_exponent_le hb1'.le (Int.floor_le _) State Before: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ Int.log b r ≤ ⌊logb (↑b) r⌋ State After: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ Int.log b r ≤ r Tactic: rw [Int.le_floor, le_logb_iff_rpow_le hb1' hr, rpow_int_cast] State Before: case inr.a
b✝ x y : ℝ
b : ℕ
r : ℝ
hb : 1 < b
hr✝ : 0 ≤ r
hr : 0 < r
hb1' : 1 < ↑b
⊢ ↑b ^ Int.log b r ≤ r State After: no goals Tactic: exact Int.zpow_log_le_self hb hr |
(*Quelle: https://github.com/wjzz/PumpingLemma/blob/master/Dfa.v*)
(* In this file we try to formalize some notions from the
formal languages field, precisely
Deterministic Finite-State Automata.
*)
(** Die Datei wurde dahin veraendert, dass nur noch einfache Taktiken verwerdet werden.
Die Beweise sind in die einzelnen Teilbeweise unterteilt und dies wird durch die Zeichen
-, +, * Sichtbar gemacht. Um mehr als 3 Ebenen zu schachteln, werden die gleichen Zeichen
wiederverwendet, nur dass sie mit einer geschweiften Klammer umrahmt sind.
*)
Load DFA_cons.
Section Transitions.
Fixpoint word_replicate (n : nat) (l : list Sigma) : list Sigma :=
match n with
| O => nil
| S n' => l ++ word_replicate n' l
end.
(* Wenn es eine Schleife im Automaten gibt, kann man diese nutzen,
um das Wort aufzublaehen an dieser Stelle und bleibt im gleichen Zustand. *)
Theorem delta_hat_cons_loop: forall n : nat, forall q : Q, forall xs : list Sigma,
delta_hat_cons q xs = q -> delta_hat_cons q (word_replicate n xs) = q.
Proof.
induction n as [|n'].
- intros q xs H.
simpl.
reflexivity.
(* n = S n; *)
- intros q xs H.
simpl.
rewrite delta_hat_cons_app.
rewrite H.
apply IHn'.
assumption.
Qed.
Fixpoint inits {X : Type} (l : list X) : list (list X) :=
match l with
| nil => nil :: nil
| x :: xs => nil :: map (cons x) (inits xs)
end.
Eval compute in (inits (1 :: 2 :: nil)).
Eval compute in (inits nil : list (list nat)).
Theorem inits_len : forall X : Type, forall l : list X,
length (inits l) = S (length l).
Proof.
induction l.
- simpl.
reflexivity.
(* l = cons _ _ *)
- simpl.
rewrite map_length.
congruence.
Qed.
Theorem inits_dec_1 :
forall X : Type,
forall l : list X,
forall y : list X,
forall xs ys : list (list X),
inits l = xs ++ (y :: ys) ->
exists zs : list X, l = y ++ zs.
Proof.
intros X l.
induction l as [|h l'].
(* l ist nil *)
- intros y xs ys H.
(* y muss nil sein *)
simpl in H.
assert (xs = nil) as Hxs.
+ destruct xs.
* { reflexivity. }
* { destruct xs.
- simpl in H.
inversion H.
- simpl in H.
inversion H.
}
+ subst xs.
simpl in H.
inversion H.
exists nil.
simpl.
reflexivity.
(* l ist h :: l' *)
- intros y xs ys H.
simpl in H.
destruct xs.
+ simpl in H.
inversion H.
exists (h :: l').
simpl.
reflexivity.
+ simpl in H.
inversion H.
apply map_dec_2 in H2.
destruct H2 as [xss].
destruct H0 as [yss].
destruct H0.
destruct H2.
destruct yss as [|y0 yss].
* { inversion H3. }
* { simpl in H3.
apply IHl' in H0.
destruct H0 as [zs].
inversion H3.
simpl.
exists zs.
subst.
reflexivity.
}
Qed.
Theorem inits_dec_2 :
forall X : Type,
forall l : list X,
forall y z : list X,
forall xs ys zs : list (list X),
inits l = xs ++ (y :: ys) ++ (z :: zs) ->
exists ds : list X, z = y ++ ds /\ length ds > 0.
Proof.
intros X l.
induction l as [|h l'].
- intros y z xs ys zs H.
(* Impossible case, inits nil has 1 elem,
the list on RHO has >= 2 elems *)
destruct xs.
+ simpl in H.
inversion H.
subst y.
simpl in *.
inversion H.
destruct ys.
* { simpl in *.
inversion H1.
}
* { simpl in *.
inversion H1.
}
+ simpl in *.
inversion H.
subst l.
inversion H.
destruct xs.
* { simpl in *.
inversion H2.
}
* { simpl in *.
inversion H1.
}
(* l = h :: l' *)
(* we need more info *)
- destruct l' as [|h' ls].
+ intros y z xs ys zs H.
simpl in *.
destruct xs.
* { simpl in *.
inversion H.
subst y.
inversion H.
destruct ys.
- simpl in *.
inversion H1.
exists (h :: nil).
split.
+ reflexivity.
+ simpl.
unfold gt.
unfold lt.
apply le_n.
- inversion H1.
simpl in *.
clear H2.
clear H.
destruct ys.
* { simpl in *.
inversion H4.
}
* { simpl in *.
inversion H4.
} }
* { inversion H.
destruct xs.
- simpl in *.
inversion H2.
destruct ys.
+ simpl in *.
inversion H4.
+ simpl in *.
inversion H4.
- inversion H2.
destruct xs.
+ simpl in H4.
inversion H4.
+ simpl in H4.
inversion H4.
}
(* l = h :: h' :: ls *)
(* finally we can use the ind. hyp. *)
+ intros y z xs ys zs H.
remember (h' :: ls) as l.
simpl in *.
destruct xs.
* { simpl in *.
inversion H.
exists z.
split.
- simpl.
reflexivity.
- apply map_dec_2 in H2.
destruct H2 as [xss].
destruct H0 as [yss].
destruct H0.
destruct H2.
destruct yss.
+ simpl in *.
inversion H3.
+ simpl in *.
inversion H3.
simpl.
unfold gt.
unfold lt.
apply le_n_S.
apply le_0_n.
}
* { simpl in *.
inversion H.
subst l0.
clear H.
assert (map (cons h) (inits l) = xs ++ (y :: ys) ++ (z :: zs)).
- simpl.
assumption.
- apply map_dec_3 in H.
destruct H as [xss].
destruct H as [yss].
destruct H as [zss].
destruct H.
destruct H0.
destruct H1.
destruct yss.
+ simpl in *.
inversion H1.
+ simpl in *.
inversion H1.
destruct zss.
* { simpl in *.
inversion H3.
}
* { simpl in *.
inversion H3.
apply IHl' in H.
destruct H as [ds].
destruct H.
exists ds.
split.
- subst.
reflexivity.
- assumption.
} }
Qed.
Theorem inits_dec :
forall X : Type,
forall l : list X,
forall b c : list X,
forall ass bs cs : list (list X),
inits l = ass ++ (b :: bs) ++ (c :: cs) ->
(exists ds : list X, c = b ++ ds /\ length ds > 0) /\
exists es : list X, l = c ++ es.
Proof.
intros X l b c ass bs cs H.
remember H as H2.
clear HeqH2.
apply inits_dec_2 in H.
destruct H as [ds].
split.
- exists ds.
destruct H.
split.
+ apply H.
+ apply H0.
- rewrite app_assoc in H2.
apply inits_dec_1 in H2.
assumption.
Qed.
Definition prefixes (q : Q) (l : list Sigma) : list Q :=
map (delta_hat_cons q) (inits l).
Lemma prefixes_len : forall l : list Sigma, forall q : Q,
length (prefixes q l) = S (length l).
Proof.
intros.
unfold prefixes.
rewrite map_length.
rewrite inits_len.
reflexivity.
Qed.
(** Das Pumping Lemma: *)
Theorem pumping_lemma : forall w : list Sigma,
accepted_word_cons w -> Q_size <= length w ->
exists xs : list Sigma,
exists ys : list Sigma,
exists zs : list Sigma,
length ys > 0 /\
(* length ys < Q_size -> *)
w = xs ++ ys ++ zs /\
forall n : nat,
accepted_word (xs ++ (word_replicate n ys) ++ zs).
Proof.
intros w acc len_w.
(* Let's look at which state the DFA is after reading
epsilon, w0, w0w1, .. w. *)
set (pref := prefixes q0 w).
assert (Hpref : Q_size < length pref).
- unfold pref.
rewrite prefixes_len.
unfold lt.
apply le_n_S.
apply len_w.
- assert (HRep : repeats pref).
+ apply pigeon_hole_Repeats.
apply Hpref.
+ set (Hx := repeats_decomp Q pref HRep).
destruct Hx.
destruct H.
destruct H.
destruct H.
unfold pref in H.
unfold prefixes in H.
set (Hx := map_dec_3 (list Sigma) Q (ext q0)
(inits w) x0 (x :: x1) (x :: x2) H).
destruct Hx.
destruct H0.
destruct H0.
destruct H0.
destruct H1.
destruct H2.
(* x4 und x5 koennen nicht nil sein *)
destruct x4 as [|y x4].
* { inversion H2. }
* { destruct x5 as [|y2 x5].
- inversion H3.
- set (Hx := inits_dec _ w y y2 x3 x4 x5 H0).
destruct Hx.
destruct H4.
destruct H5.
destruct H4.
exists y.
exists x6.
exists x7.
split.
+ apply H6.
+ split.
* { subst y2.
rewrite H5.
rewrite app_assoc.
reflexivity.
}
* { unfold accepted_word in *.
intros n.
assert (ext q0 (y ++ (word_replicate n x6) ++ x7) = ext q0 w).
- rewrite ext_app.
rewrite ext_app.
simpl in H2.
inversion H2.
simpl in H3.
inversion H3.
assert (ext (ext q0 y) x6 = ext q0 y).
+ pattern (ext q0 y) at 2.
rewrite H8.
rewrite <- H10.
rewrite H4.
rewrite ext_app.
reflexivity.
+ rewrite ext_loop.
rewrite H5.
rewrite ext_app.
congruence.
rewrite H7.
reflexivity.
- rewrite H7.
apply acc.
} }
Qed. |
wiki:WikiPedia:Victor_H._Fazio Vic Fazio served as congressman for Californias Thrid District for 20 years, from 1979 to 1999. He currently sits on the board of wiki:Wikipedia:Northrop_Grumman Northrop Grumman, a defense conglomerate which makes, among other things, wiki:Wikipedia:Nimitz_class_aircraft_carrier really big aircraft carriers, wiki:Wikipedia:Nuclear_submarine nuclear submarines, wiki:Wikipedia:B2_Spirit stealth bombers, and wiki:Wikipedia:Strategic_Defense_Initiative space lasers. He currently has a Vic Fazio Yolo Wildlife Area wildlife area and a Highway 113 highway named after him.
|
myTestRule {
#Input parameter is:
# Property list
#Create key-value string
msiString2KeyValPair(*Str,*KVpair);
#Write out strin
writeLine("stdout","Initial property list is");
msiPrintKeyValPair("stdout",*KVpair);
#Clear properties list
msiPropertiesClear(*KVpair);
#Verify property was cleared
writeLine("stdout","Changed property list is null");
msiPrintKeyValPair("stdout",*KVpair);
}
INPUT *Str="key1=value1"
OUTPUT ruleExecOut
|
[STATEMENT]
lemma brc_cond_abs: "brc_cond \<Sigma> \<longleftrightarrow> (brc_\<alpha> \<Sigma>)\<in>br'_cond"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. brc_cond \<Sigma> = (brc_\<alpha> \<Sigma> \<in> br'_cond)
[PROOF STEP]
apply (cases \<Sigma>)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b c. \<Sigma> = (a, b, c) \<Longrightarrow> brc_cond \<Sigma> = (brc_\<alpha> \<Sigma> \<in> br'_cond)
[PROOF STEP]
apply (simp add: brc_cond_def br'_cond_def brc_\<alpha>_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
(** * Poly: 다형성과 고차원 함수 *)
(* 마지막 알림: 연습문제들에 대한 해답을 공개적으로 접근 가능한 곳에
두지 마시오. 감사합니다!! *)
(* Coq에서 일부 귀찮은 경고들을 내지 않도록 하려면: *)
Set Warnings "-notation-overridden,-parsing".
Require Export Lists.
(*** 다형성 *)
(** 이 장에서 함수형 프로그래밍의 기본 개념들을 계속 발전시킨다.
중요한 새로운 생각은 _다형성_ (함수가 다루는 데이터의 타입에 관해
함수를 추상화)과 _고차원 함수_ (함수를 데이터로 다루기)이다.
다형성부터 시작한다. *)
(* ================================================================= *)
(** ** 다형 리스트 *)
(** 지난 두 장에서 단지 숫자 리스트를 가지고 논의해왔었다. 흥미로운 프로그램이라면 분명히
다른 타입의 원소들로 구성된 리스트들을 다룰 있어야 한다. 문자열 리스트, 부울 리스트,
리스트의 리스트 등. 각 종류의 리스트에 대해 새로운 귀납적 데이터 타입을 _정의할 수도_
있다. 예를 들어... *)
Inductive boollist : Type :=
| bool_nil : boollist
| bool_cons : bool -> boollist -> boollist.
(** ... 하지만 이렇게 매번 새로 정의하는 것은 금방 지루한 일이 될
것이다. 왜냐하면 각 데이터 타입에 대해 다른 생성자 이름들을
만들어야 하기 때문이기도 하고, 더 중요한 이유는 각 새로운 데이터
타입 정의 별로 리스트를 다루는 함수들 ([length], [rev], 등)을 모두
새로운 버전을 정의해야 하기 때문이다.
*)
(** 이런 반복을 모두 피하기 위해 콕은 _다형성_ 귀납적 타입을 정의하는
방법을 지원한다. 예를 들어 _다형 리스트_ 데이터 타입은 다음과
같다.
*)
Inductive list (X:Type) : Type :=
| nil : list X
| cons : X -> list X -> list X.
(** 이 정의는 이전 장의 [natlist] 정의와 똑같다. 다만 [cons] 생성자의
[nat] 인자를 임의의 타입 [X]로 대체하고, [X]에 대한 바인딩을
헤더에 추가했으며, 생성자들 타입에 [natlist]를 [list X]로 바꾼
것이 다르다. ([nil]과 [cons] 생성자 이름들을 재사용할 수
있다. 왜냐하면 [natlist] 정의는 [Module] 정의 안에 있고 이 정의는
현재 범위 밖에 있기 때문이다.)
[list]의 실체는 무엇일까? 한 가지 좋은 설명은 [list]는 [Type]에서
[Inductive] 정의로 매핑하는 _함수_라고 생각하는 것이다. 또는 달리
설명하자면, [list]는 [Type]에서 [Type]으로 매핑하는 함수라고
얘기할 수 있다. 어떤 특정 타입 [X]에 대해 [list X] 타입은 타입
[X]의 원소들로 구성된 리스트들의 집합을 [Inductive](귀납적으로)
정의한다. *)
Check list.
(* ===> list : Type -> Type *)
(** [list] 정의의 인자 [X]는 생성자 [nil]과 [cons]의 인자가 된다. 즉,
[nil]과 [cons]는 다형성 생성자로, 이 생성자에 만들고자 하는
리스트의 타입을 인자로 제공해야 한다. 예를 들어, [nil nat]은 [nat]
타입의 빈 리스트이다. *)
Check (nil nat).
(* ===> nil nat : list nat *)
(** 동일한 설명으로, [cons nat]은 [list nat] 타입의 리스트에 [nat]
타입의 원소를 포함시킨다. 여기 자연수 3만을 포함하는 리스트를
구성하는 예가 있다.
*)
Check (cons nat 3 (nil nat)).
(* ===> cons nat 3 (nil nat) : list nat *)
(** [nil]의 타입은 무엇이 될까? 그 정의로부터 [list X] 타입을 읽을 수
있지만, [list]의 인자인 [X]에 무엇이 바인딩되는지 모른다. [Type ->
list X]으로 [X]의 의미를 설명할 수 없다. [(X : Type) -> list X]로
조금 더 설명할 수 있다. 이러한 상황에 대한 콕의 표기법은 [forall X
: Type, list X]이다. *)
Check nil.
(* ===> nil : forall X : Type, list X *)
(** 비슷한 상황으로, [cons]의 타입은 그 정의로부터 [X -> list X ->
list X]로 읽을 수 있다. 하지만 콕의 표기법으로 [X]의 의미를
설명하자면 [forall X, X -> list X -> list X]이다.
*)
Check cons.
(* ===> cons : forall X : Type, X -> list X -> list X *)
(** (표기법에 관한 풀이: .v 파일에서 "forall" 한정자를 문자로
작성한다. 이 파일로부터 생성한 HTML 파일에서 그리고 다양한
통합개발환경에서 .v 파일을 보여주는 방식에서 ( 표시 방식을 적절히
설정하면) [forall]은 흔히 알고 있는 수학의 "뒤집어진 A"로
표시한다. 하지만 몇 군데에서는 "forall"를 작성하는 것을 여전히
보게 될 것이다. 이것은 조판 방식의 스타일일 뿐 의미에서는 차이가
없다.) *)
(** 리스트 생성자를 사용할 때마다 타입 인자를 지정하는 것은 어색한
부담으로 보일 수도 있지만 이 부담을 줄이는 방법을 곧 알게 될
것이다. *)
Check (cons nat 2 (cons nat 1 (nil nat))).
(** (여기서 [nil]과 [cons]를 명시적으로 작성해왔는데 그 이유는 새로운
버전의 리스트에 대해 [ [] ]과 [::] 표기법을 아직 정의하지 않았기
때문이다. 곧 이 표기법을 정의할 것이다.) *)
(** 이제 돌아가 이전에 작성한 모든 리스트 처리 함수들의 다형성
버전들을 만들 수 있다. 여기 [repeat]를 예로 보면: *)
Fixpoint repeat (X : Type) (x : X) (count : nat) : list X :=
match count with
| 0 => nil X
| S count' => cons X x (repeat X x count')
end.
(** [nil]과 [cons]에서와 같이 [repeat]를 사용할 때도 우선 타입 인자에
이 함수를 적용하고 그런 다음 이 타입의 요소(와 숫자)에 적용한다:
*)
Example test_repeat1 :
repeat nat 4 2 = cons nat 4 (cons nat 4 (nil nat)).
Proof. reflexivity. Qed.
(** [repeat]를 다른 종류의 리스트를 만들기 위해 사용하려면 간단히
적절한 타입 인자를 지정하기만 하면 된다. *)
Example test_repeat2 :
repeat bool false 1 = cons bool false (nil bool).
Proof. reflexivity. Qed.
Module MumbleGrumble.
(** **** 연습문제: 별 두 개 (mumble_grumble) *)
(** 다음 두 개의 귀납적 정의 타입들을 고려하자. *)
Inductive mumble : Type :=
| a : mumble
| b : mumble -> nat -> mumble
| c : mumble.
Inductive grumble (X:Type) : Type :=
| d : mumble -> grumble X
| e : X -> grumble X.
(** 어떤 타입 [X]에 대해 [grumble X] 타입의 원소는 다음 중 어느 것인가?
- [d (b a 5)]
- [d mumble (b a 5)]
- [d bool (b a 5)]
- [e bool true]
- [e mumble (b c 0)]
- [e bool (b c 0)]
- [c]
(* 여기를 채우시오 *)
*)
(** [] *)
End MumbleGrumble.
(* ----------------------------------------------------------------- *)
(** *** 타입 주석 추론 *)
(** [repeat] 정의를 다시 작성해보자. 이번에는 어느 인자의 타입도
지정하지 않을 것이다. 콕 시스템은 여전히 이 정의를 받아들일까?
*)
Fixpoint repeat' X x count : list X :=
match count with
| 0 => nil X
| S count' => cons X x (repeat' X x count')
end.
(** 정말로 받아들인다. 콕은 [repeat']에 어떤 타입을 매겼을지 보자: *)
Check repeat'.
(* ===> forall X : Type, X -> nat -> list X *)
Check repeat.
(* ===> forall X : Type, X -> nat -> list X *)
(** [repeat]와 정확히 동일한 타입이다. 콕은 _타입 유추_를 사용하여
[X]의 타입, [x]의 타입, [count]의 타입이 무엇이어야 하는지 추론할
수 있다. 예를 들어, [X]는 [cons]에 대한 인자로서 사용되기 때문에
[Type]이어야 한다. 왜냐하면 [cons]는 첫 번째 인자로 [Type]을
기대하기 때문이다. 그리고, [count]를 [0]과 [S]로 매칭 하므로 [nat]
타입이어야 한다, 등등.
이 강력한 방법으로 우리는 모든 곳에 타입 주석을 항상 명시적으로
작성할 필요는 없다. 물론 명시적으로 타입을 작성해놓으면 문서로써
그리고 제대로 작성되었는지 여부를 검사하는데도 여전히 매우
유용하다. 여러분의 코드에서 지나치게 많이 타입 주석을 달아놓거나
(혼란스럽고 산만할 수 있다) 너무 적게 작성하지 않도록 (여러분의
코드를 이해하기 위해 독자가 머리 속으로 타입 유추를 수행해야 한다)
적절한 균형을 찾도록 노력해야 한다.
*)
(* ----------------------------------------------------------------- *)
(** *** 타입 인자 합성 *)
(** 다형 타입 함수를 사용하려면 다른 인자들과 함께 타입 인자들도
전달해야 한다. 예를 들어, 위의 [repeat] 함수의 몸체에서 재귀
호출은 타입 [X]를 함께 전달해야 한다. 그러나 [repeat]의 두 번째
인자는 [X] 타입의 원소이기 때문에 첫 번째 인자는 오직 [X]만
가능하다는 것은 온전히 명백하다. 따라서 왜 우리가 이 타입 인자
[X]를 명시적으로 작성해야 하는가?
다행히도 콕에서는 이런 종류의 중복을 피할 수 있다. 어떠한 타입
인자 대신 "묵시적 인자" [_]를 작성할 수 있는데, 이 것은 "콕
시스템에서 스스로 이 자리에 나올 것을 파악하도록 해주세요"라고
해석할 수 있다. 더 정확히 설명하자면, 콕 시스템에서 [_]를 만날 때
그 상황에서 사용 가능한 모든 정보를 _통합_할 것이다. 적용하려는
함수의 타입 다른 인자들의 타입들, 적용을 하는 문맥에서 기대하는
타입 등등. 그 결과로 [_]를 대체할 구체적인 타입을 결정한다.
이 타입 인자 합성은 타입 주석 유추와 비슷하게 들릴 수도
있다. 정말로 이 두 절차는 동일한 방법에 의존한다. 아래처럼 함수의
어떤 인자들의 타입들을 그냥 생략하는 대신
repeat' X x count : list X :=
이 타입들을 [_]로 대체할 수도 있다.
repeat' (X : _) (x : _) (count : _) : list X :=
이 것은 콕 시스템으로 하여금 빠진 정보를 유추하도록 지시하는
것이다.
묵시적 인자들을 사용하면 이 [repeat] 함수를 다음과 같이 작성할 수
있다: *)
Fixpoint repeat'' X x count : list X :=
match count with
| 0 => nil _
| S count' => cons _ x (repeat'' _ x count')
end.
(** 이 예에서, [X] 대신 [_]를 작성하기 때문에 그다지 많이 노력을
줄이지 않는다. 하지만 많은 경우에 키를 누르고 코드를 읽는 면에
있어서 사소하지 않은 차이를 보인다. 예를 들어 숫자들을 포함하는
리스트 [1], [2], [3]을 작성하기를 원한다고 가정하자. 이렇게
작성하는 대신에... *)
Definition list123 :=
cons nat 1 (cons nat 2 (cons nat 3 (nil nat))).
(** ...인자 합성을 사용하여 다음과 같이 작성한다: *)
Definition list123' :=
cons _ 1 (cons _ 2 (cons _ 3 (nil _))).
(* ----------------------------------------------------------------- *)
(** *** 묵시적 인자 *)
(** 한 단계 더 나아가 콕 시스템으로 하여금 주어진 함수의 타입 인자들을
_항상_ 유추하도록 설정하여 대부분의 경우 [_]를 작성하는 것도 피할
수도 있다.
다음의 [Arguments] 지시어를 사용하여 함수 이름을 지정하고 그
함수의 인자 이름들을 나열한다. 이때 중괄호로 묶인 인자들을 묵시적
인자로 다루도록 지시한다. (만일 정의의 어떤 인자들에 이름이 없다면
보통 생성자의 경우 그러한데, 와일드카드 패턴 [_]으로 표시할 수
있다.) *)
Arguments nil {X}.
Arguments cons {X} _ _.
Arguments repeat {X} x count.
(** 이제 타입 인자들을 전혀 작성할 필요가 없다: *)
Definition list123'' := cons 1 (cons 2 (cons 3 nil)).
(** 또 다른 방법으로, 함수를 선언할 때 어떤 인자를 묵시적이라고 선언할
수 있다. 해당 인자를 괄호 대신 중괄호로 감싸면 된다. 예를 들어:
*)
Fixpoint repeat''' {X : Type} (x : X) (count : nat) : list X :=
match count with
| 0 => nil
| S count' => cons x (repeat''' x count')
end.
(** ([repeat''']의 재귀 호출에 타입 인자를 제공할 필요 조차도 없었음을
보라. 타입 인자를 제공하면 무효가 될 것이다!)
앞으로 가능하면 이 스타일을 사용할 것이지만 [Inductive] 생성자들에
대해서는 명시적으로 [Argument] 선언하는 것을 계속 사용할 것이다.
그 이유는 귀납적 타입의 인자를 묵시적으로 선언하면 그 타입 자체가
묵시적이 되기 때문이다. 예를 들어 [list] 타입을 다음과 같이 선언해보자:
*)
Inductive list' {X:Type} : Type :=
| nil' : list'
| cons' : X -> list' -> list'.
(** [X]를 [list'] 자체를 포함하는 귀납적 정의 _전체_에 대해 묵시적으로
선언하기 때문에 [list' nat]이나 [list' bool] 등으로 작성하지
못하고 단지 [list']로 이제 작성해야 한다. 이것은 의도하지 않게
지나치게 나아간 것이다.
*)
(** 이제 새로운 다형 리스트에 관한 두 세 가지 표준 리스트 함수들을
다시 구현함으로써 마무리짓자... *)
Fixpoint app {X : Type} (l1 l2 : list X)
: (list X) :=
match l1 with
| nil => l2
| cons h t => cons h (app t l2)
end.
Fixpoint rev {X:Type} (l:list X) : list X :=
match l with
| nil => nil
| cons h t => app (rev t) (cons h nil)
end.
Fixpoint length {X : Type} (l : list X) : nat :=
match l with
| nil => 0
| cons _ l' => S (length l')
end.
Example test_rev1 :
rev (cons 1 (cons 2 nil)) = (cons 2 (cons 1 nil)).
Proof. reflexivity. Qed.
Example test_rev2:
rev (cons true nil) = cons true nil.
Proof. reflexivity. Qed.
Example test_length1: length (cons 1 (cons 2 (cons 3 nil))) = 3.
Proof. reflexivity. Qed.
(* ----------------------------------------------------------------- *)
(** *** 명시적으로 타입 인자들을 작성하기 *)
(** 때때로 콕 시스템은 타입 인자를 결정하기에 충분한 주변 정보를
가지고 있지 않은 경우에 [Implicit]로 인자들을 선언할 때 작은
문제가 발생할 수 있다. 그런 경우에 콕 시스템에 직접 그 인자를 이번
단 한 번만 지정하기를 원할 필요가 있다. 예를 들어 다음과 같이
작성하는 경우에: *)
Fail Definition mynil := nil.
(** ([Definition] 앞의 [Fail] 속성은 _어떠한_ 명령어와 함께 사용할 수
있다. 그 의미는 이 명령어를 실행하면 정말로 실패한다는 것을
알리는데 사용한다. 만일 이 명령어가 실패하면 콕 시스템은 해당
에러 메시지를 출력하지만 그 다음을 계속해서 처리한다.)
여기에서 콕 시스템은 [nil]에 어떤 타입 인자를 제공해야 할지 몰라서
에러를 낸다. 명시적으로 타입을 선언하여 콕 시스템이 [nil]의
"적용"에 도달할 때 더 많은 정보를 갖도록 도와줄 수 있다:
*)
Definition mynil : list nat := nil.
(** 다른 방법으로는 함수 이름 앞에 [@]을 두어 묵시적 인자들을
명시적으로 작성하도록 강제할 수 있다. *)
Check @nil.
Definition mynil' := @nil nat.
(** 인자 합성과 묵시적 인자를 사용하면 이전과 같이 리스트에 대한
편리한 표기법을 사용할 수 있다. 생성자 타입 인자들을 묵시적으로
만들었기 때문에 콕 시스템은 이 표기법을 사용할 때 마다 이 인자들을
자동으로 유추할 것이다. *)
Notation "x :: y" := (cons x y)
(at level 60, right associativity).
Notation "[ ]" := nil.
Notation "[ x ; .. ; y ]" := (cons x .. (cons y []) ..).
Notation "x ++ y" := (app x y)
(at level 60, right associativity).
(** 이제 리스트를 우리가 바라던대로 그대로 작성할 수 있다: *)
Definition list123''' := [1; 2; 3].
(* ----------------------------------------------------------------- *)
(** *** 연습문제 *)
(** **** 연습문제: 별 두 개, 선택 사항 (poly_exercises) *)
(** 여기 두 세가지 간단한 연습문제가 있다. [Lists] 장에 있는
연습문제들과 유사한데, 다형성을 가지고 연습하도록 구성되어
있다. 아래에서 증명을 완성하시오. *)
Theorem app_nil_r : forall (X:Type), forall l:list X,
l ++ [] = l.
Proof.
(* 여기를 채우시오 *) Admitted.
Theorem app_assoc : forall A (l m n:list A),
l ++ m ++ n = (l ++ m) ++ n.
Proof.
(* 여기를 채우시오 *) Admitted.
Lemma app_length : forall (X:Type) (l1 l2 : list X),
length (l1 ++ l2) = length l1 + length l2.
Proof.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** **** 연습문제: 별 두 개, 선택 사항 (more_poly_exercises) *)
(** 다음은 조금 더 흥미로운 연습문제들이다... *)
Theorem rev_app_distr: forall X (l1 l2 : list X),
rev (l1 ++ l2) = rev l2 ++ rev l1.
Proof.
(* 여기를 채우시오 *) Admitted.
Theorem rev_involutive : forall X : Type, forall l : list X,
rev (rev l) = l.
Proof.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** 다형성을 갖춘 쌍 *)
(** 동일한 패턴을 따라 지난 장에서 정의했던 숫자 쌍의 타입 정의를 보통
_곱_이라고 부르는 _다형성을 갖춘 숫자 쌍_으로 일반화 시킬 수 있다: *)
Inductive prod (X Y : Type) : Type :=
| pair : X -> Y -> prod X Y.
Arguments pair {X} {Y} _ _.
(** 리스트에 대해 그랬던 것 처럼 타입 인자들을 묵시적으로 선언하고
익숙한 표기법을 정의한다. *)
Notation "( x , y )" := (pair x y).
(** [Notation] 방법으로 곱 _타입_의 표준 표기법을 정의할 수도 있다: *)
Notation "X * Y" := (prod X Y) : type_scope.
(** ([: type_scope] 주석은 콕 시스템에게 이 축약 표기는 타입을 파싱할
때 사용되기만 해야 한다고 알려준다. 이렇게 해야 곱셈 기호와 충돌을
피한다.) *)
(** 처음에는 [(x,y)]와 [X*Y]를 혼동할 수 있다. [(x,y)]는 두 개의 다른 값들을
조합해서 만든 _값_이고 [X*Y]는 두 개의 다른 타입들로 만든 _타입_이다.
만일 [x]가 [X] 타입이고 [y]가 [Y] 타입이면 [(x,y)]는 [X*Y] 타입이다. *)
(** 첫 번째 원소와 두 번째 원소를 꺼내는 함수들은 이제 어떠한 함수형
프로그래밍 언어에서 있는 것과 상당히 비슷하게 보인다. *)
Definition fst {X Y : Type} (p : X * Y) : X :=
match p with
| (x, y) => x
end.
Definition snd {X Y : Type} (p : X * Y) : Y :=
match p with
| (x, y) => y
end.
(** 다음 함수는 두 개의 리스트들을을 받아 쌍들의 리스트로
조합한다. 다른 함수형 언어에서 종종 [zip]이라 부르는데, 우리는 콕
표준 라이브러리와의 일관성을 위해 [combine]이라 부른다. *)
Fixpoint combine {X Y : Type} (lx : list X) (ly : list Y)
: list (X*Y) :=
match lx, ly with
| [], _ => []
| _, [] => []
| x :: tx, y :: ty => (x, y) :: (combine tx ty)
end.
(** **** 연습문제: 별 한 개, 선택 사항 (combine_checks) *)
(** 다음 질문들에 대한 답을 종이 위에 작성해 답해보고 콕 시스템으로 그 답을
검사해보시오:
- [combine]의 타입은 무엇인가 (즉, [Check @combine]으로
무엇을 출력하는가?)
- 아래 명령은 무엇을 출력하는가?
Compute (combine [1;2] [false;false;true;true]).
*)
(** [] *)
(** **** 연습문제: 별 두 개, 추천 (split) *)
(** 함수 [split]은 [combine]의 오른쪽 역 함수이다. 쌍들의 리스트를
받아 리스트들의 쌍을 리턴한다. 많은 함수형 언어에서 [unzip]이라
부른다.
아래에서 [split]의 정의를 채우시오. 반드시 주어진 단위 테스트를
통과하도록 확인하시오. *)
Fixpoint split {X Y : Type} (l : list (X*Y))
: (list X) * (list Y)
(* 이 줄을 ":= _당신의 정의_ ."로 바꾸시오 *). Admitted.
Example test_split:
split [(1,false);(2,false)] = ([1;2],[false;false]).
Proof.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** 다형성을 갖춘 선택 *)
(** 일단 마지막 다형 타입: _다형성을 갖운 선택_,은 이전 장에서
[natoption]을 일반화한 것이다: *)
Inductive option (X:Type) : Type :=
| Some : X -> option X
| None : option X.
Arguments Some {X} _.
Arguments None {X}.
(** [nth_error] 함수를 어떤 타입의 리스트에 대해서도 동작하도록 이제
다시 작성할 수 있다. *)
Fixpoint nth_error {X : Type} (l : list X) (n : nat)
: option X :=
match l with
| [] => None
| a :: l' => if beq_nat n O then Some a else nth_error l' (pred n)
end.
Example test_nth_error1 : nth_error [4;5;6;7] 0 = Some 4.
Proof. reflexivity. Qed.
Example test_nth_error2 : nth_error [[1];[2]] 1 = Some [2].
Proof. reflexivity. Qed.
Example test_nth_error3 : nth_error [true] 2 = None.
Proof. reflexivity. Qed.
(** **** 연습문제: 별 한 개, 선택사항 (hd_error_poly) *)
(** 이전 장의 [hd_error] 함수의 다형성을 갖춘 버전을 완성하시오. 아래에
있는 단위 테스트들을 모두 통과하도록 확인하시오. *)
Definition hd_error {X : Type} (l : list X) : option X
(* 이 줄을 ":= _당신의 정의_ ."로 대체하시오. *). Admitted.
(** 다시 한 번, 묵시적 인자들을 강제로 명시적으로 작성하게 만들려면 그
함수 이름 앞에 [@]을 사용할 수 있다. *)
Check @hd_error.
Example test_hd_error1 : hd_error [1;2] = Some 1.
(* 여기를 채우시오 *) Admitted.
Example test_hd_error2 : hd_error [[1];[2]] = Some [1].
(* 여기를 채우시오 *) Admitted.
(** [] *)
(* ################################################################# *)
(** * 데이터로서 함수 *)
(** 모든 함수형 언어들 (ML, Haskell, Scheme, Scala, Clojure, 등)을
포함한 많은 다른 현대 프로그래밍 언어와 같이 콕 시스템은 함수들을
일등 시민으로 다룬다. 즉, 함수들을 다른 함수들의 인자로 전달하고
그 결과로 리턴하며 자료 구조에 저장하는 등등. *)
(* ================================================================= *)
(** ** 고차원 함수 *)
(** 다른 함수들을 다루는 함수들은 보통 _고차원_ 함수라 부른다. 여기
간단한 고차원 함수가 있다: *)
Definition doit3times {X:Type} (f:X->X) (n:X) : X :=
f (f (f n)).
(** 여기 인자 [f]는 그 자체로 함수 ([X]에서 [X]로
매핑하는)이다. [doit3times]의 몸체에서 [f]를 어떤 값 [n]에 세 번
적용한다. *)
Check @doit3times.
(* ===> doit3times : forall X : Type, (X -> X) -> X -> X *)
Example test_doit3times: doit3times minustwo 9 = 3.
Proof. reflexivity. Qed.
Example test_doit3times': doit3times negb true = false.
Proof. reflexivity. Qed.
(* ================================================================= *)
(** ** 필터(Filter) *)
(** 여기 더 유용한 고차원 함수가 있다. [X] 타입의 리스트와 [X]에 관한
_술어_([X]를 [bool]로 매핑하는 함수)를 받아 그 리스트를
"필터링"하고 이 술어가 [true]인 원소들만을 포함하는 새로운
리스트를 리턴한다. *)
Fixpoint filter {X:Type} (test: X->bool) (l:list X)
: (list X) :=
match l with
| [] => []
| h :: t => if test h then h :: (filter test t)
else filter test t
end.
(** 예를 들어, [filter]를 술어 [evenb]와 숫자 리스트 [l]에 적용하면
[l]의 짝수들만을 포함하는 리스트를 리턴한다. *)
Example test_filter1: filter evenb [1;2;3;4] = [2;4].
Proof. reflexivity. Qed.
Definition length_is_1 {X : Type} (l : list X) : bool :=
beq_nat (length l) 1.
Example test_filter2:
filter length_is_1
[ [1; 2]; [3]; [4]; [5;6;7]; []; [8] ]
= [ [3]; [4]; [8] ].
Proof. reflexivity. Qed.
(** [Lists] 장에서 정의한 [countoddmembers]을 [filter]를 사용하여
간단하게 정의할 수 있다. *)
Definition countoddmembers' (l:list nat) : nat :=
length (filter oddb l).
Example test_countoddmembers'1: countoddmembers' [1;0;3;1;4;5] = 4.
Proof. reflexivity. Qed.
Example test_countoddmembers'2: countoddmembers' [0;2;4] = 0.
Proof. reflexivity. Qed.
Example test_countoddmembers'3: countoddmembers' nil = 0.
Proof. reflexivity. Qed.
(* ================================================================= *)
(** ** 이름이 없는 함수 *)
(** 바로 위 예제에서 단지 [filter]의 인자로 전달하기 위해서 함수
[length_is_1]을 정의하고 이름을 붙여야 하는 것은 거의 틀림없이
약간 슬픈 일이다. 왜냐하면 이 함수는 결코 다시 사용하지 않을
것이기 때문이다. 더우기 이것은 단발성 예제가 아니다. 고차원
함수들을 사용할 때 다시 사용하지 않을 "단발성" 함수들을 인자들로
전달하기를 원할 것이다. 이런 함수들에 이름을 지어야 하는 것은 매우
번거로운 일이 될 것이다.
다행히도 더 나은 방법이 있다. 함수를 상위 레벨에서 선언하거나
이름을 붙이지 않고 "즉석으로" 함수를 만들 수 있다. *)
Example test_anon_fun':
doit3times (fun n => n * n) 2 = 256.
Proof. reflexivity. Qed.
(** 식 [(fun n => n * n)]은 "주어진 숫자 [n]으로 부터 [n * n]을 내는
함수"로 읽을 수 있다. *)
(** 여기 [filter] 예제가 있다. 이름 없는 함수를 사용해서 다시
작성하였다. *)
Example test_filter2':
filter (fun l => beq_nat (length l) 1)
[ [1; 2]; [3]; [4]; [5;6;7]; []; [8] ]
= [ [3]; [4]; [8] ].
Proof. reflexivity. Qed.
(** **** 연습문제: 별 두 개 (filter_even_gt7) *)
(** ([Fixpoint] 대신) [filter]를 사용해서 콕 함수 [filter_even_gt7]을
작성하시오. 이 함수는 입력으로 자연수 리스트를 받아 짝수이면서
7보다 큰 숫자들만으로 이루어진 리스트를 리턴한다. *)
Definition filter_even_gt7 (l : list nat) : list nat
(* 이 줄을 ":= _당신의 정의_ ."로 바꾸시오 *). Admitted.
Example test_filter_even_gt7_1 :
filter_even_gt7 [1;2;6;9;10;3;12;8] = [10;12;8].
(* 여기를 채우시오 *) Admitted.
Example test_filter_even_gt7_2 :
filter_even_gt7 [5;2;6;19;129] = [].
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** **** 연습문제: 별 세 개 (partition) *)
(** [filter]를 사용하여 콕 함수 [partition]을 작성하시오:
partition : forall X : Type, (X -> bool) -> list X -> list X *
list X
집합 [X], 타입 [X -> bool]의 테스트 함수와 [list X]가 주어지면
[partition]은 리스트들의 쌍을 리턴한다. 이 쌍의 첫 번째 원소는 원래
리스트의 서브 리스트로 그 테스트를 만족하는 원소들을 포함한다. 두
번째 원소는 이 테스트에 실패한 원소들을 포함하는 서브
리스트이다. 두 서브리스트들의 원소들의 순서는 원래 리스트에서
순서와 동일해야 한다. *)
Definition partition {X : Type}
(test : X -> bool)
(l : list X)
: list X * list X
(* 이 줄을 ":= _당신의 정의_ ."로 대체하시오 *). Admitted.
Example test_partition1: partition oddb [1;2;3;4;5] = ([1;3;5], [2;4]).
(* 여기를 채우시오 *) Admitted.
Example test_partition2: partition (fun x => false) [5;9;0] = ([], [5;9;0]).
(* 여기를 채우시오 *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** 맵(Map) *)
(** 또 다른 편리한 고차원 함수 [map]이 있다. *)
Fixpoint map {X Y:Type} (f:X->Y) (l:list X) : (list Y) :=
match l with
| [] => []
| h :: t => (f h) :: (map f t)
end.
(** 함수 [f]와 리스트 [ l = [n1, n2, n3, ...] ]을 받아 리스트 [ [f n1,
f n2, f n3,...] ]을 리턴한다. 이 리스트는 [f]를 [l]의 각 원소에
차례로 적용한 결과이다. 예를 들어: *)
Example test_map1: map (fun x => plus 3 x) [2;0;2] = [5;3;5].
Proof. reflexivity. Qed.
(** 입출력 리스트의 원소 타입들은 동일할 필요가 없다. 그래서 [map]은
_두 개_의 타입 인자들 [X]와 [Y]를 받는다. 맵 함수는 숫자 리스트와
숫자를 부울 값으로 매핑하는 함수에 적용하면 부울 값 리스트를 낼 수
있다: *)
Example test_map2:
map oddb [2;1;2;5] = [false;true;false;true].
Proof. reflexivity. Qed.
(** 맵 함수는 숫자 리스트와 숫자를 _부울 리스트들_로 매핑하는 함수에
적용해서 부울 _리스트들의 리스트_를 낼 수 있다: *)
Example test_map3:
map (fun n => [evenb n;oddb n]) [2;1;2;5]
= [[true;false];[false;true];[true;false];[false;true]].
Proof. reflexivity. Qed.
(* ----------------------------------------------------------------- *)
(** *** 연습문제들 *)
(** **** 연습문제: 별 세 개 (map_rev) *)
(** [map]과 [rev]의 교환 법칙을 보여준다. 보조 정리를 새로 정의할 필요가
있다. *)
Theorem map_rev : forall (X Y : Type) (f : X -> Y) (l : list X),
map f (rev l) = rev (map f l).
Proof.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** **** 연습문제: 별 두 개, 추천 (flat_map) *)
(** 함수 [map]은 [X -> Y] 타입의 함수를 사용하여 [list X]의 원소를
[list Y] 원소로 매핑한다. 이와 비슷한 함수 [flat_map]을 정의하여
[X -> list Y] 타입의 함수 [f]를 사용하여 [list X]의 원소를 [list
Y]의 원소로 매핑한다. 이 함수의 정의는 아래와 같이 [f]의 결과를
'펼치면서' 동작해야 한다:
flat_map (fun n => [n;n+1;n+2]) [1;5;10] = [1; 2; 3; 5; 6; 7;
10; 11; 12]. *)
Fixpoint flat_map {X Y:Type} (f:X -> list Y) (l:list X)
: (list Y)
(* 이 줄을 ":= _당신의 정의_ ."로 대체하시오 *). Admitted.
Example test_flat_map1:
flat_map (fun n => [n;n;n]) [1;5;4]
= [1; 1; 1; 5; 5; 5; 4; 4; 4].
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** 리스트는 [map] 함수로 다룰 수 있는 유일한 귀납적 타입이 아니다.
[option] 타입에 대한 [map] 함수는 이렇게 정의할 수 있다: *)
Definition option_map {X Y : Type} (f : X -> Y) (xo : option X)
: option Y :=
match xo with
| None => None
| Some x => Some (f x)
end.
(** **** 연습문제: 별 두 개, 선택사항 (implicit_args) *)
(** [filter]와 [map]를 정의하고 사용할 때 많은 곳에서 묵시적 인자들을
사용한다. 묵시적 인자들 주위의 중괄호들을 괄호로 바꾸고 필요한
곳에 명시적으로 타입 인자들을 채운다. 콕 시스템을 사용하여 제대로
바꾸었음을 확인하시오. (이 연습문제의 답은 제출하지 않는다. 이
파일을 _복사_해서 연습문제를 풀고 나중에 버리는 것이 분명히 가장
쉬울 것이다.) *)
(** [] *)
(* ================================================================= *)
(** ** 접기(Fold) *)
(** 훨씬 더 강력한 고차원 함수 [fold]가 있다. 이 함수는 구글의
맵/리듀스 분산 프로그래밍 프레임워크의 핵심에서 사용하는
"[reduce]" 연산을 위한 영감을 제공한다. *)
Fixpoint fold {X Y:Type} (f: X->Y->Y) (l:list X) (b:Y)
: Y :=
match l with
| nil => b
| h :: t => f h (fold f t b)
end.
(** 직관적으로 [fold] 연산의 동작은 주어진 이진 연산 [f]를 주어진 리스트의 원소들의
각 쌍에 적용하는 것이다. 예를 들어 [ fold plus [1;2;3;4] ]는 직관적으로
[1+2+3+4]가 된다. 자세히 설명하면, [f]에 대한 초기 두 번째 입력으로 사용할
"시작 원소"도 필요하다. 예를 들어,
fold plus [1;2;3;4] 0
는 아래 결과를 낸다:
1 + (2 + (3 + (4 + 0))).
몇 가지 추가 예제들: *)
Check (fold andb).
(* ===> fold andb : list bool -> bool -> bool *)
Example fold_example1 :
fold mult [1;2;3;4] 1 = 24.
Proof. reflexivity. Qed.
Example fold_example2 :
fold andb [true;true;false;true] true = false.
Proof. reflexivity. Qed.
Example fold_example3 :
fold app [[1];[];[2;3];[4]] [] = [1;2;3;4].
Proof. reflexivity. Qed.
(** **** 연습문제: 별 한 개, 고급 (fold_types_different) *)
(** [fold]의 타입은 _두 개_의 타입 변수들 [X]와 [Y] 패러미터로
구성되어 있고, 인자 [f]는 [X]의 원소와 [Y]의 원소를 받아 [Y]의
원소를 리턴하는 이진 연산이다. [X]와 [Y]와 다르면 유용한 상황이
무엇일지 생각해보시오. *)
(* 여기를 채우시오 *)
(** [] *)
(* ================================================================= *)
(** ** 함수를 만드는 함수 *)
(** 지금까지 이야기한 대부분의 고차원 함수들은 함수들을 인자로 받는
것이었다. 다른 함수들의 결과로 함수들을 _리턴_하는 몇 가지 예제를
살펴보자. 우선 (어떤 타입 [X]의) 값 [x]를 받고 [nat]에서 [X]로
매핑하는 그래서 [x]를 내는 함수를 리턴하는 함수가 여기 있다.
[nat] 인자는 무시한다. *)
Definition constfun {X: Type} (x: X) : nat->X :=
fun (k:nat) => x.
Definition ftrue := constfun true.
Example constfun_example1 : ftrue 0 = true.
Proof. reflexivity. Qed.
Example constfun_example2 : (constfun 5) 99 = 5.
Proof. reflexivity. Qed.
(** 사실 이미 살펴본 다중 인자 함수들은 함수들을 데이터로 전달하는
예제이기도 하다. 그 이유를 살펴보기 위해 [plus] 타입을 다시
생각해본다. *)
Check plus.
(* ==> nat -> nat -> nat *)
(** 이 식에서 각 [->]은 실제로 타입에 대한 _이진_ 연산이다. 이 것은
_우측으로 묶인_ 연산이다. 그래서 [plus]의 타입은 정말로 [nat ->
(nat -> nat)]을 짧게 줄인 것이다. 즉, "[plus]는 단일 인자 함수로
[nat]의 값을 받고 또 다른 단일 인자 함수를 리턴한다. 이 함수는
[nat]의 값을 받아 [nat]의 값을 리턴한다"이다. 위 예제에서 항상
[plus]를 한번에 두 개의 인자에 적용했지만 원한다면 단지 첫 번째
인자만 줄 수 있다. 이러한 함수 사용을 _부분 적용_이라 부른다. *)
Definition plus3 := plus 3.
Check plus3.
Example test_plus3 : plus3 4 = 7.
Proof. reflexivity. Qed.
Example test_plus3' : doit3times plus3 0 = 9.
Proof. reflexivity. Qed.
Example test_plus3'' : doit3times (plus 3) 0 = 9.
Proof. reflexivity. Qed.
(* ################################################################# *)
(** * 추가 연습문제들 *)
Module Exercises.
(** **** 연습문제: 별 두 개 (fold_length) *)
(** 리스트에 대한 많은 공통 함수들은 [fold]를 이용해서 구현할 수 있다.
예를 들어, [length]를 이렇게 정의할 수도 있다: *)
Definition fold_length {X : Type} (l : list X) : nat :=
fold (fun _ n => S n) l 0.
Example test_fold_length1 : fold_length [4;7;0] = 3.
Proof. reflexivity. Qed.
(** [fold_length]의 정확성을 증명하시오. *)
Theorem fold_length_correct : forall X (l : list X),
fold_length l = length l.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** **** 연습문제: 별 세 개 (fold_map) *)
(** [fold]를 사용하여 [map]도 정의할 수 있다. 아래의 [fold_map]을
마무리 지으시오. *)
Definition fold_map {X Y:Type} (f : X -> Y) (l : list X) : list Y
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오. *). Admitted.
(** [fold_map]의 정확성을 기술하는 [fold_map_correct] 정리를 콕으로
작성하고 증명하시오. *)
(* 여기를 채우시오 *)
(** [] *)
(** **** 연습문제: 별 두 개, 고급 (currying) *)
(** 콕에서 함수 [f : A -> B -> C]는 실제로 [A -> (B -> C)]
타입이다. 즉, [f]에 [A] 타입의 값을 주면 함수 [f' B -> C]를 낼
것이다. [f']에 [B]를 주면 [C] 타입의 값을 리턴할 것이다. 이런
방식으로 [plus3]에서 처럼 부분 적용을 사용한다. 일련의 인자들을
함수를 리턴하는 함수로 처리하는 것을 _커링_이라 부른다. 논리학자
하스켈 커리(Haskell Curry)의 이름을 따서 붙인 것이다.
역으로 [A -> B -> C] 타입을 [(A * B) -> C]로 해석할 수 있다. 이
것을 _언커링_이라 부른다. 언커링 이진 함수에는 두 인자들을 쌍으로
한 번에 주어야 한다. 부분 적용을 허용하지 않는다. *)
(** 다음과 같이 커링을 정의할 수 있다: *)
Definition prod_curry {X Y Z : Type}
(f : X * Y -> Z) (x : X) (y : Y) : Z := f (x, y).
(** 연습문제로 이 것의 역 [prod_uncurry]을 정의하시오. 그런 다음 이 두
가지가 서로 역이라는 것을 보이는 정리들을 아래에서 증명하시오. *)
Definition prod_uncurry {X Y Z : Type}
(f : X -> Y -> Z) (p : X * Y) : Z
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오. *). Admitted.
(** 커링이 유용한 (사소한) 예로써 위에서 본 예제들 중 하나를 짧게 하기
위해 커링을 사용할 수 있다: *)
Example test_map1': map (plus 3) [2;0;2] = [5;3;5].
Proof. reflexivity. Qed.
(** 사고 단련: 다음 명령어들을 실행하기 전에 [prod_curry]와
[prod_uncurry]의 타입들을 계산해보자. *)
Check @prod_curry.
Check @prod_uncurry.
Theorem uncurry_curry : forall (X Y Z : Type)
(f : X -> Y -> Z)
x y,
prod_curry (prod_uncurry f) x y = f x y.
Proof.
(* 여기를 채우시오 *) Admitted.
Theorem curry_uncurry : forall (X Y Z : Type)
(f : (X * Y) -> Z) (p : X * Y),
prod_uncurry (prod_curry f) p = f p.
Proof.
(* 여기를 채우시오 *) Admitted.
(** [] *)
(** **** 연습문제: 별 두 개, 고급 (nth_error_informal) *)
(** [nth_error] 함수의 정의를 생각해보자:
Fixpoint nth_error {X : Type} (l : list X) (n : nat) : option X :=
match l with
| [] => None
| a :: l' => if beq_nat n O then Some a else nth_error l' (pred n)
end.
다음 정리를 비형싲거으로 증명해보시오:
forall X n l, length l = n -> @nth_error X l n = None
(* 여기를 채우시오 *)
*)
(** [] *)
(** **** 연습문제: 별 네 개, 고급 (church_numerals) *)
(** 이 연습문제는 수학자 알론조 처치 이름을 따서 _처치 숫자_라고
부르는 자연수를 정의하는 한 가지 방법을 탐구한다. 자연수 [n]을
함수 [f]를 인자로 받고 [f]를 [n]번 반복하는 함수로 표현할 수
있다. *)
Module Church.
Definition nat := forall X : Type, (X -> X) -> X -> X.
(** 이 표기법으로 몇 가지 숫자를 작성하는 법을 살펴보자. 함수를 한 번
반복하는 것은 적용하는 것과 동일해야 한다. 그래서: *)
Definition one : nat :=
fun (X : Type) (f : X -> X) (x : X) => f x.
(** 비슷하게 [two]는 [f]를 그 인자에 두 번 적용해야 한다: *)
Definition two : nat :=
fun (X : Type) (f : X -> X) (x : X) => f (f x).
(** 다소 교묘하게 [zero]를 정의한다. 어떻게 "함수를 0번 적용"할 수
있을까? 그 답은 실제로 간단하다. 그냥 인자를 건드리지 않고
반환하면 된다. *)
Definition zero : nat :=
fun (X : Type) (f : X -> X) (x : X) => x.
(** 더 일반적으로 숫자 [n]을 [fun X f x => f (f ... (f x) ...)]로
[f]가 [n]번 나타나도록 작성할 수 있다. 특히 이전에 정의했던
[doit3times] 함수가 실제로 [3]의 처치 표현임을 주목하라. *)
Definition three : nat := @doit3times.
(** 다음 함수들의 정의를 완성하시오. 해당하는 단위 테스트들을
통과하는지 [reflexivity] 로 증명해서 확인하시오. *)
(** 다음 자연수: *)
Definition succ (n : nat) : nat
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오 *). Admitted.
Example succ_1 : succ zero = one.
Proof. (* 여기를 채우시오 *) Admitted.
Example succ_2 : succ one = two.
Proof. (* 여기를 채우시오 *) Admitted.
Example succ_3 : succ two = three.
Proof. (* 여기를 채우시오 *) Admitted.
(** 두 자연수에 대한 덧셈: *)
Definition plus (n m : nat) : nat
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오 *). Admitted.
Example plus_1 : plus zero one = one.
Proof. (* 여기를 채우시오 *) Admitted.
Example plus_2 : plus two three = plus three two.
Proof. (* 여기를 채우시오 *) Admitted.
Example plus_3 :
plus (plus two two) three = plus one (plus three three).
Proof. (* 여기를 채우시오 *) Admitted.
(** 곱셈: *)
Definition mult (n m : nat) : nat
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오 *). Admitted.
Example mult_1 : mult one one = one.
Proof. (* 여기를 채우시오 *) Admitted.
Example mult_2 : mult zero (plus three three) = zero.
Proof. (* 여기를 채우시오 *) Admitted.
Example mult_3 : mult two three = plus three three.
Proof. (* 여기를 채우시오 *) Admitted.
(** 누승: *)
(** (_힌트_: 다형성은 여기서 중요한 역할을 담당한다. 그러나 반복할
적절한 타입을 선택하는 것이 까다롭다. 만일 "Universe
inconsistency" 에러를 만나면 다른 타입에 대해 반복할 것을
시도해보시오: [nat] 자체는 대체로 여러 문제가 있다.) *)
Definition exp (n m : nat) : nat
(* 이 줄을 ":= _당신의 정의_ "로 대체하시오 *). Admitted.
Example exp_1 : exp two two = plus two two.
Proof. (* 여기를 채우시오 *) Admitted.
Example exp_2 : exp three two = plus (mult two (mult two two)) one.
Proof. (* 여기를 채우시오 *) Admitted.
Example exp_3 : exp three zero = one.
Proof. (* 여기를 채우시오 *) Admitted.
End Church.
(** [] *)
End Exercises.
(** $Date: 2017-09-06 11:44:36 -0400 (Wed, 06 Sep 2017) $ *)
|
I love Matthew’s account of the wise men — the Magi — who traveled far, facing peril and making sacrifices, all to bring precious gifts to the newborn King Jesus.
When I saw the majestic Bactrian camels in Mongolia, I knew I had to try to recreate that scene. We were in the Bayankhongor area, on the edge of the Gobi Desert, so I turned to the best possible helpers — World Vision staff, who seem to know everyone in their community.
They introduced me to a local man who had a large herd of camels and who was happy to help us. He warned me, though, that the camels weren’t tame, making it difficult to separate three of them from all the others.
Finally, with the help of camel herders, we lined up three unruly camels next to a ger, or yurt, and hung some battery-operated Christmas lights around the door.
And then their patience with me paid off. The setting sun filled big clouds on the horizon with brilliant color. I switched to a 180-degree fisheye lens to gather it all in.
Some gifts are material, like the animals and gers in the World Vision Gift Catalog. Others are time and hospitality, like the camel herders’ gift to me. But ultimately, every good and perfect gift is from God, and every gift given in love is an act of worship. |
import Qpf.Qpf.Multivariate.Basic
import Qpf.Qpf.Multivariate.ofPolynomial
import Qpf.PFunctor.Multivariate.Constructions.Basic
import Qpf.Macro.Tactic.FinDestr
namespace MvQPF
namespace Prod
open PFin2 (fz fs)
def P : MvPFunctor 2
:= .mk' [
![1, 1]
]
def P' : MvPFunctor 2
:= ⟨PFin2 1,
fun | _ => ![PFin2 1, PFin2 1]
⟩
def Pfin : MvPFunctor 2
:= ⟨Fin 1,
fun | _ => ![Fin 1, Fin 1]
⟩
-- `Nat` lives in `Type`, so both functors are fine
#check (P.Obj ![Nat, Nat] : Type)
#check (Pfin.Obj ![Nat, Nat] : Type)
-- Now assume some `X` that lives in a higher universe, say `Type 1`
variable (X : Type 1)
-- `P` is able to adjust
#check (P.Obj ![X, X] : Type 1)
-- `Pfin` is not
-- #check Pfin.Obj ![X, X]
-- application type mismatch
-- Vec.append1 Vec.nil X
-- argument
-- X
-- has type
-- Type 1 : Type 2
-- but is expected to have type
-- Type : Type 1
-- #check P.Obj ![Nat, X]
abbrev QpfProd' := P.Obj
abbrev QpfProd := QpfProd'.curried
/--
An uncurried version of the root `Prod`
-/
abbrev Prod' : TypeFun 2
:= @TypeFun.ofCurried 2 Prod
/--
Constructor for `QpfProd'`
-/
def mk (a : Γ 1) (b : Γ 0) : QpfProd' Γ
:= ⟨
fz,
fun
| 1, _ => a
| 0, _ => b
⟩
def box : Prod' Γ → QpfProd' Γ
| ⟨a, b⟩ => mk a b
def unbox : QpfProd' Γ → Prod' Γ
| ⟨fz, f⟩ => (f 1 fz, f 0 fz)
theorem unbox_box_id (x : Prod' Γ) :
unbox (box x) = x :=
by
rfl
theorem box_unbox_id (x : QpfProd' Γ) :
box (unbox x) = x :=
by
rcases x with ⟨i, f⟩;
fin_destr i;
simp[box, unbox, mk];
apply congrArg;
fin_destr
<;> rfl
instance : MvQPF Prod' := .ofPolynomial P box unbox box_unbox_id
end Prod
export Prod (QpfProd QpfProd')
end MvQPF |
program main
IMPLICIT NONE
double precision :: a
double precision :: b
a=3.0
b=4.0
print*, a
END PROGRAM main |
State Before: α : Type u_1
l l₁ r₁ l₂ r₂ : List α
⊢ l₂ ++ l₁ ++ l ++ (r₁ ++ r₂) = l₂ ++ (l₁ ++ l ++ r₁) ++ r₂ State After: no goals Tactic: simp only [append_assoc] |
## Import libraries
```python
from matplotlib import pyplot as plt
import numpy as np
%matplotlib inline
```
## Define function
The goal is to create a plot of the following function
\begin{equation}
f(x)=0.2+0.4x^2+0.3x\cdot\sin(15x)+0.05\cos(50x)
\end{equation}
```python
x = np.linspace(0, 1, 100)
y = 0.2+0.4*x**2+0.3*x*np.sin(15*x)+0.05*np.cos(50*x)
```
## Produce figure
```python
plt.figure(figsize=(6, 6))
plt.plot(x, y)
plt.show()
```
```python
```
|
function [ALLEEG_out cfg] = pop_pre_prepData(ALLEEG,typeproc,varargin)
%
% Preprocess EEG dataset(s) for connectivity analysis. See [1] for
% mathematical details on preprocessing steps.
%
%
% Input:
%
% ALLEEG: Array of EEGLAB datasets to preprocess.
% typeproc: Reserved for future use. Use 0
%
% Optional:
%
% <'Name',value> pairs as defined in pre_prepData()
%
% Output:
%
% ALLEEG: Prepocessed EEG structure(s)
% cfg: Argument specification structure.
%
%
% See Also: pre_prepData()
%
% References:
%
% [1] Mullen T (2010) The Source Information Flow Toolbox (SIFT):
% Theoretical Handbook and User Manual. Section 6.5.1
% Available at: http://www.sccn.ucsd.edu/wiki/Sift
%
% Author: Tim Mullen 2009, SCCN/INC, UCSD.
% Email: [email protected]
%
% Revised Jan 2010.
% This function is part of the Source Information Flow Toolbox (SIFT)
%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if nargin<2
typeproc = 0;
end
% set default output
ALLEEG_out = ALLEEG;
cfg = [];
% generate splash screen
% initialize SIFT, etc
StartSIFT(~strcmpi(typeproc,'nogui'),true);
fcnName = strrep(mfilename,'pop_','');
fcnHandle = str2func(fcnName);
% check if we've applied SIFT to this dataset before
res = hlp_checkeegset(ALLEEG,{'cat'});
if isempty(res) && isfield(ALLEEG(1).CAT.configs,fcnName)
% get default configuration (from prior use) and merge with varargin
varargin = [hlp_struct2varargin(ALLEEG(1).CAT.configs.(fcnName)) varargin];
end
if strcmpi(typeproc,'nogui')
% get the default config from function and overload supplied args
cfg = arg_tovals(arg_report('rich',fcnHandle,[{'EEG',ALLEEG(1)},varargin]),false);
else
% render the GUI
[PGh figh] = feval(['gui_' fcnName],ALLEEG(1),varargin{:});
if isempty(PGh)
% user chose to cancel
return;
end
% get the specification of the PropertyGrid
ps = PGh.GetPropertySpecification;
cfg = arg_tovals(ps,false);
end
drawnow;
if strcmpi(typeproc,'cfg_only')
return;
end
% initialize progress bar
if cfg.verb==2 && length(ALLEEG)>1
waitbarTitle = 'Preprocessing datasets';
multiWaitbar(waitbarTitle,'Reset');
multiWaitbar(waitbarTitle,'ResetCancel',true);
multiWaitbar(waitbarTitle,...
'Color', [0.8 0.0 0.1], ...
'CanCancel','on', ...
'CancelFcn',@(a,b)disp('[Cancel requested. Please wait...]'));
end
% re-initialize output
clear ALLEEG_out;
% preprocess datasets
for cnd=1:length(ALLEEG)
% execute the low-level function
[ALLEEG_out(cnd)] = feval(fcnHandle,'EEG',ALLEEG(cnd),cfg);
if ~isempty(cfg)
% store the configuration structure
ALLEEG_out(cnd).CAT.configs.(fcnName) = cfg;
end
if cfg.verb==2 && length(ALLEEG)>1
% update waitbar
drawnow;
cancel = multiWaitbar(waitbarTitle,cnd/length(ALLEEG));
if cancel && hlp_confirmWaitbarCancel(waitbarTitle)
% restore original dataset
ALLEEG_out = ALLEEG;
break;
end
end
end
% cleanup progress bar
if cfg.verb==2 && length(ALLEEG)>1
multiWaitbar(waitbarTitle,'Close');
end
|
/-
Copyright (c) 2021 Paula Neeley. All rights reserved.
Author: Paula Neeley
Following the textbook "Dynamic Epistemic Logic" by
Hans van Ditmarsch, Wiebe van der Hoek, and Barteld Kooi
-/
import del.semantics.translationdefs tactic.linarith
variables {agents : Type}
---------------------- Generic helper lemmas ----------------------
lemma comp_gt_zero {φ : formPA agents} : complexity φ > 0 :=
begin
induction φ,
repeat { rw complexity }, repeat { linarith },
have h1 : 4 > 0, linarith,
exact (mul_pos (add_lt_add h1 φ_ih_φ) φ_ih_ψ)
end
lemma comp_ge_zero {φ : formPA agents} : complexity φ ≥ 0 := le_of_lt comp_gt_zero
lemma comp_ge_one {φ : formPA agents} : complexity φ ≥ 1 :=
begin
have h1 : complexity φ > 0, from comp_gt_zero,
linarith
end
lemma maxhelper {n1 n2 n3 n4 : nat} : n1 + n2 < n4 → n1 + n3 < n4 → n1 + max n2 n3 < n4 :=
begin
intros h1 h2,
exact nat.add_lt_of_lt_sub_left (max_lt (nat.lt_sub_left_of_add_lt h1) (nat.lt_sub_left_of_add_lt h2))
end
lemma le_maxhelper {n1 n2 n3 n4 : nat} : n1 + n2 ≤ n4 → n1 + n3 ≤ n4 → n1 + max n2 n3 ≤ n4 :=
begin
intros h1 h2,
have h3 := max_le (nat.le_sub_left_of_add_le h1) (nat.le_sub_left_of_add_le h2),
rw nat.le_sub_right_iff_add_le at h3,
have h4 : max n2 n3 + n1 = n1 + max n2 n3, linarith,
exact eq.subst h4 h3, linarith
end
-------------- Helper lemmas for translation function --------------
lemma tr1 : ∀ φ ψ : formPA agents, complexity φ < 1 + max (complexity φ) (complexity ψ) :=
begin
intros φ ψ,
have h1 := lt_add_one _,
have h2 := add_comm _ _,
exact (eq.subst h2 (lt_of_le_of_lt (le_max_left (complexity φ) (complexity ψ)) h1))
end
lemma tr2 : ∀ φ ψ : formPA agents, complexity ψ < 1 + max (complexity φ) (complexity ψ) :=
begin
intros φ ψ,
have h1 := lt_add_one _,
have h2 := add_comm _ _,
exact (eq.subst h2 (lt_of_le_of_lt (le_max_right (complexity φ) (complexity ψ)) h1))
end
lemma tr3 : ∀ φ : formPA agents, 1 + max (complexity φ) 1 < 4 + complexity φ :=
begin
intro φ,
have h1 : complexity φ < complexity φ + 1, from lt_add_one _,
have h2 := zero_lt_iff_ne_zero,
have h3 : 3 + (complexity φ + 1) = complexity φ + 4, from add_comm _ _,
have h4 := add_comm (complexity φ) 4,
have h5 := (rfl.congr h4).mp h3,
exact (eq.subst h5 (add_lt_add (lt_add_of_pos_right 1 (h2.mpr (nat.add_one_ne_zero 1)))
(max_lt h1 (lt_add_of_pos_left 1 comp_gt_zero))))
end
lemma tr4helper {φ ψ : formPA agents} : (4 + complexity φ) * (1 + max (complexity ψ) 1) > 9 :=
begin
have h1 : complexity φ ≥ 1, from comp_ge_one,
have h2 : (4 + complexity φ) ≥ 5, linarith,
have h3 : complexity ψ ≥ 1, from comp_ge_one,
have h4 := max_eq_left h3,
have h5 : 1 + max (complexity ψ) 1 ≥ 1 + 1, linarith,
have h6 : 0 ≤ 2, linarith,
have h7 : 0 ≤ 4 + complexity φ, linarith,
exact (mul_le_mul h2 h5 h6 h7),
end
lemma tr4helper1 {n m : nat} : n ≥ 4 → m ≥ 1 → 2 + n * m < n * (1 + m) :=
begin
intros h1 h2,
have h3 := mul_add n 1 m,
linarith,
end
lemma tr4helper2 {φ ψ : formPA agents} : 1 + 1 + (4 + complexity φ) * complexity ψ <
(4 + complexity φ) * (1 + complexity ψ) :=
begin
have h1 : 4 + complexity φ ≥ 4, linarith,
exact (tr4helper1 h1 (comp_ge_one))
end
lemma tr4helper3 {φ ψ : formPA agents} : 1 + 1 + ((4 + complexity φ) * complexity ψ) <
(4 + complexity φ) * (1 + max (complexity ψ) 1) :=
begin
have h1 : complexity ψ ≥ 1, from comp_ge_one,
have h2 := max_eq_left h1,
have h3 := congr_arg (has_mul.mul (4 + complexity φ)) (congr_arg (has_add.add 1)
(eq.symm (max_eq_left comp_ge_one))),
exact (eq.subst h3 tr4helper2)
end
lemma tr4 : ∀ φ ψ : formPA agents, 1 + max (complexity φ) (1 + max ((4 + complexity φ)
* complexity ψ) 1) < (4 + complexity φ) * (1 + max (complexity ψ) 1) :=
begin
intros φ ψ,
have h1 : 1 ≤ 1 + max (complexity ψ) 1, linarith,
have h2 : 1 + complexity φ < (4 + complexity φ), linarith,
have h3 : 1 > 0, linarith,
have h4 : 4 ≥ 0, linarith,
have h5 : complexity ψ ≥ 1, from comp_ge_one,
have h6 : 1 ≥ 1, linarith,
have h7 := le_max_right (complexity ψ) 1,
have h8 : 3 < 9, linarith,
have h9 : 1 + 1 + max ((4 + complexity φ) * complexity ψ) 1 =
1 + (1 + max ((4 + complexity φ) * complexity ψ) 1), linarith,
exact (maxhelper (eq.subst (mul_one (1 + complexity φ))
(mul_lt_mul h2 h1 h3 (add_nonneg h4 comp_ge_zero)))
(eq.subst h9 (maxhelper tr4helper3 (lt_trans h8 tr4helper))))
end
lemma tr5helper {φ ψ : formPA agents} : 1 + ((4 + complexity φ) * complexity ψ) <
(4 + complexity φ) * (1 + (complexity ψ)) :=
begin
have h1 : 1 + 1 + (4 + complexity φ) * complexity ψ < (4 + complexity φ) * (1 + complexity ψ), from tr4helper2,
linarith,
end
lemma tr5helper1 {ψ χ : formPA agents} : complexity χ ≤ max (complexity χ) (complexity ψ) :=
begin
cases max_choice (complexity ψ) (complexity χ),
repeat {have h : complexity χ ≤ complexity χ, linarith, exact (le_max_left_of_le h)}
end
lemma tr5helper2 {ψ χ : formPA agents} : max (complexity ψ) (complexity χ) = complexity ψ →
complexity χ ≤ complexity ψ :=
begin
intro h1,
have h2 : max (complexity χ) (complexity ψ) = complexity ψ,
from eq.substr (max_comm (complexity χ) (complexity ψ)) h1,
exact (eq.subst h2 tr5helper1)
end
lemma tr5helper3 {φ ψ χ : formPA agents} : complexity χ ≤ complexity ψ →
1 + ((4 + complexity φ) * complexity χ) < (4 + complexity φ) * (1 + (complexity ψ)) :=
begin
intro h1,
have h2 : 4 + complexity φ > 0, linarith,
have h3 : 4 + complexity φ ≤ 4 + complexity φ, linarith,
have h4 := nat.mul_le_mul_left (4 + complexity φ) h1,
have h5 : 1 + (4 + complexity φ) * complexity χ ≤ 1 + (4 + complexity φ) * complexity ψ, linarith,
exact (lt_of_le_of_lt h5 tr5helper)
end
lemma tr5 : ∀ φ ψ χ : formPA agents, 1 + max ((4 + complexity φ) * complexity ψ)
((4 + complexity φ) * complexity χ) < (4 + complexity φ) * (1 + max (complexity ψ) (complexity χ)) :=
begin
intros φ ψ χ,
cases max_choice (complexity ψ) (complexity χ),
cases max_choice ((4 + complexity φ) * complexity ψ) ((4 + complexity φ) * complexity χ),
exact (eq.substr h_1 (eq.substr h (tr5helper))),
exact (eq.substr h_1 (eq.substr h (tr5helper3 (tr5helper2 h)))),
cases max_choice ((4 + complexity φ) * complexity ψ) ((4 + complexity φ) * complexity χ),
exact (eq.substr h_1 (eq.substr h (tr5helper3 (tr5helper2 (eq.subst (max_comm (complexity ψ) (complexity χ)) h))))),
exact (eq.substr h_1 (eq.substr h tr5helper)),
end
lemma tr6 : ∀ φ ψ : formPA agents, 1 + max (complexity φ) (1 + (4 + complexity φ)
* complexity ψ) < (4 + complexity φ) * (1 + complexity ψ) :=
begin
intros φ ψ,
have h1 : 1 + complexity φ < 4 + complexity φ, linarith,
have h2 : complexity ψ + 1 ≥ 1, linarith,
have h3 : 0 < 1, linarith,
have h4 : 0 ≤ 4 + complexity φ, linarith,
have h5 := mul_lt_mul h1 h2 h3 h4,
have h6 : 1 + complexity φ < (4 + complexity φ) * (1 + complexity ψ), linarith,
have h7 : 1 + 1 + (4 + complexity φ) * complexity ψ = 1 + (1 + (4 + complexity φ) * complexity ψ), linarith,
have h8 : 1 + complexity ψ = complexity ψ + 1, linarith,
exact (maxhelper h6 (eq.subst h8 (eq.subst h7 tr4helper2)))
end
lemma tr7helper1 {φ ψ : formPA agents} : (4 + complexity φ ) * (4 + complexity ψ)
= (complexity φ) * (complexity ψ) + 4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4 :=
begin
have h1 := mul_add (complexity φ + 4) (complexity ψ) 4,
have h2 := add_mul (complexity φ) 4 (complexity ψ),
have h3 := add_mul (complexity φ) 4 4,
linarith
end
lemma tr7helper2 {φ ψ : formPA agents} : 5 + (complexity φ) < (complexity φ) * (complexity ψ) + 4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4 :=
begin
have h1 : complexity φ ≥ 1, from comp_ge_one,
have h2 : complexity ψ ≥ 1, from comp_ge_one,
have : complexity φ * complexity ψ ≥ 0, apply nat.zero_le,
linarith
end
lemma tr7helper3 {φ ψ : formPA agents} : 5 + (complexity φ) < (4 + complexity φ) * (4 + complexity ψ) :=
begin
have h1 := tr7helper1,
exact (eq.substr h1 tr7helper2)
end
lemma tr7helper4 {φ ψ : formPA agents} : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ) <
(complexity φ) * (complexity ψ) + 4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4 :=
begin
have h1 : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ) =
(complexity φ) * (complexity ψ) + (5 + 4 * (complexity ψ)), linarith,
have h2 : 5 + 4 * (complexity ψ) < 4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4, linarith,
have h3 := add_lt_add_left h2 ((complexity φ) * (complexity ψ)),
have h4 : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ) <
(complexity φ) * (complexity ψ) + (4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4), from eq.substr h1 h3,
have h5 : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ) <
(complexity φ) * (complexity ψ) + 4 * (complexity ψ) + 4 * (complexity φ) + 4 * 4, linarith,
exact h5
end
lemma tr7helper5 {φ ψ : formPA agents} : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ) =
(5 + ((4 + complexity φ) * complexity ψ)) :=
begin
have h1 := add_mul (complexity φ) 4 (complexity ψ),
have h2 : 5 + (complexity φ) * (complexity ψ) + 4 * (complexity ψ)
= 5 + ((4 + complexity φ) * complexity ψ), linarith,
exact h2
end
lemma tr7helper6 {φ ψ : formPA agents} : (5 + ((4 + complexity φ) * complexity ψ))
< (4 + complexity φ) * (4 + complexity ψ) :=
begin
have h1 := tr7helper5,
have h2 := tr7helper1,
exact (eq.substr h2 (eq.subst h1 tr7helper4))
end
lemma tr7 : ∀ φ ψ χ : formPA agents, (4 + (1 + max (complexity φ) ((4 + complexity φ)
* complexity ψ))) * complexity χ < (4 + complexity φ) * ((4 + complexity ψ) * complexity χ) :=
begin
intros φ ψ χ,
have h1 := mul_assoc (4 + complexity φ) (4 + complexity ψ) (complexity χ),
have h2 : (5 + max (complexity φ) ((4 + complexity φ) * complexity ψ))
= (4 + 1 + max (complexity φ) ((4 + complexity φ) * complexity ψ)), linarith,
have h3 : (4 + 1 + max (complexity φ) ((4 + complexity φ) * complexity ψ))
= (4 + (1 + max (complexity φ) ((4 + complexity φ) * complexity ψ))), linarith,
exact eq.subst h1 (mul_lt_mul_of_pos_right (eq.subst h3 (maxhelper tr7helper3 tr7helper6)) comp_gt_zero),
end
-------------- Helper lemmas for equiv_translation_aux --------------
lemma compand1 {φ ψ : formPA agents} {n : nat} : complexity (φ & ψ) ≤ n + 1 → complexity φ ≤ n :=
begin
intro h,
rw complexity at *,
have h1 := le_max_left (complexity φ) (complexity ψ),
linarith
end
lemma compand2 {φ ψ : formPA agents} {n : nat} : complexity (φ & ψ) ≤ n + 1 → complexity ψ ≤ n :=
begin
intro h,
rw complexity at *,
have h1 := le_max_right (complexity φ) (complexity ψ),
linarith
end
lemma compimp1 {φ ψ : formPA agents} {n : nat} : complexity (φ ⊃ ψ) ≤ n + 1 → complexity φ ≤ n :=
begin
intro h,
rw complexity at *,
have h1 := le_max_left (complexity φ) (complexity ψ),
linarith
end
lemma compimp2 {φ ψ : formPA agents} {n : nat} : complexity (φ ⊃ ψ) ≤ n + 1 → complexity ψ ≤ n :=
begin
intro h,
rw complexity at *,
have h1 := le_max_right (complexity φ) (complexity ψ),
linarith
end
lemma updatecomphelper {φ ψ : formPA agents} :
complexity (U φ ψ) = (complexity φ + 4) * complexity ψ :=
begin
have h1 := add_comm 4 (complexity φ),
have h2 : (4 + complexity φ) * complexity ψ = (4 + complexity φ) * complexity ψ, refl,
exact (eq.subst h1 h2),
end
lemma updatecompand1 {φ ψ χ : formPA agents} {n : nat} :
complexity (U φ (ψ & χ)) ≤ n + 1 → complexity (U φ ψ) ≤ n :=
begin
intro h1,
simp only [complexity] at *,
have h2 := add_le_add_left (le_max_left (complexity ψ) (complexity χ)) 1,
have h3 := comp_ge_one,
have h4 : complexity φ + 4 ≥ 1, from le_add_right h3,
have h5 := le_trans (nat.mul_le_mul_left ((4 + complexity φ)) h2) h1,
have h6 := mul_add (4 + complexity φ) 1 (complexity ψ),
have h7 : (4 + complexity φ) * (complexity ψ) <
(4 + complexity φ) + (4 + complexity φ) * (complexity ψ), linarith,
linarith
end
lemma updatecompand2 {φ ψ χ : formPA agents} {n : nat} :
complexity (U φ (ψ & χ)) ≤ n + 1 → complexity (U φ χ) ≤ n :=
begin
intro h1,
simp only [complexity] at *,
have h2 := add_le_add_left (le_max_right (complexity ψ) (complexity χ)) 1,
have h3 := comp_ge_one,
have h4 : complexity φ + 4 ≥ 1, from le_add_right h3,
have h5 := le_trans (nat.mul_le_mul_left ((4 + complexity φ)) h2) h1,
have h6 := mul_add (4 + complexity φ) 1 (complexity χ),
have h7 : (4 + complexity φ) * (complexity ψ) <
(4 + complexity φ) + (4 + complexity φ) * (complexity ψ), linarith,
linarith
end
lemma updatecompimp1 {φ ψ χ : formPA agents} {n : nat} :
complexity (U φ (ψ ⊃ χ)) ≤ n + 1 → complexity (U φ ψ) ≤ n :=
begin
intro h1,
rw complexity at *,
have h2 := add_le_add_left (le_max_left (complexity ψ) (complexity χ)) 1,
have h3 := comp_ge_one,
have h4 : complexity φ + 4 ≥ 1, from le_add_right h3,
have h5 := le_trans (nat.mul_le_mul_left ((4 + complexity φ)) h2) h1,
have h6 := mul_add (4 + complexity φ) 1 (complexity ψ),
have h7 : (4 + complexity φ) * (complexity ψ) <
(4 + complexity φ) + (4 + complexity φ) * (complexity ψ), linarith,
linarith
end
lemma updatecompimp2 {φ ψ χ : formPA agents} {n : nat} :
complexity (U φ (ψ ⊃ χ)) ≤ n + 1 → complexity (U φ χ) ≤ n :=
begin
intro h1,
simp only [complexity] at *,
have h2 := add_le_add_left (le_max_right (complexity ψ) (complexity χ)) 1,
have h3 := comp_ge_one,
have h4 : complexity φ + 4 ≥ 1, from le_add_right h3,
have h5 := le_trans (nat.mul_le_mul_left ((4 + complexity φ)) h2) h1,
have h6 := mul_add (4 + complexity φ) 1 (complexity χ),
have h7 : (4 + complexity φ) * (complexity ψ) <
(4 + complexity φ) + (4 + complexity φ) * (complexity ψ), linarith,
linarith
end
lemma updatecompknow1' (φ ψ : formPA agents) {n : nat} {a : agents} : complexity (U φ (K a ψ)) ≤ n + 1
→ 1 + complexity φ ≤ n :=
begin
intro h1,
simp only [complexity] at h1,
have h2 : complexity φ > 0, from comp_gt_zero,
have h3 : complexity ψ ≥ 1, from comp_ge_one,
have h4 := add_mul 4 (complexity φ) (1 + complexity ψ),
have h5 := mul_add (complexity φ) 1 (complexity ψ),
have h6 := mul_add 4 1 (complexity ψ),
have h7 : 4 * (1 + complexity ψ) + complexity φ * (1 + complexity ψ) =
4 * (1 + complexity ψ) + complexity φ * 1 + complexity φ * complexity ψ, linarith,
have h8 : (4 + complexity φ) * (1 + complexity ψ) =
4 * 1 + 4 * complexity ψ + complexity φ * 1 + complexity φ * complexity ψ, linarith,
have h9 : complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 4 * 1 ≤ n + 1, linarith,
have h10 : complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ ≤ n, linarith,
have : complexity φ * complexity ψ ≥ 0, apply nat.zero_le,
have h11 : 1 + complexity φ ≤
complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ, linarith,
exact le_trans h11 h10
end
lemma updatecompknow2' {φ ψ : formPA agents} {n : nat} : (4 + complexity φ) * (1 + complexity ψ) ≤ n + 1
→ 1 + (1 + (4 + complexity φ) * complexity ψ) ≤ n :=
begin
intro h1,
have h2 : complexity φ ≥ 1, from comp_ge_one,
have h3 : complexity ψ ≥ 1, from comp_ge_one,
have h4 := add_mul 4 (complexity φ) (1 + complexity ψ),
have h5 := mul_add (complexity φ) 1 (complexity ψ),
have h6 := mul_add 4 (complexity ψ) 1,
have h7 : (4 + complexity φ) * (1 + complexity ψ) =
4 * (1 + complexity ψ) + complexity φ * 1 + complexity φ * complexity ψ, linarith,
have h8 : (4 + complexity φ) * (1 + complexity ψ) =
complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 4 * 1, linarith,
have h9 : complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 4 ≤ n + 1,
from eq.subst h8 h1,
have h10 : complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 3 ≤ n, linarith,
have h11 : (4 + complexity φ) * complexity ψ = 4 * complexity ψ + complexity φ * complexity ψ,
from add_mul 4 (complexity φ) (complexity ψ),
have h12 : 1 + (4 + complexity φ) * complexity ψ =
1 + (4 * complexity ψ + complexity φ * complexity ψ), linarith,
have h13 : 1 + (1 + (4 + complexity φ) * complexity ψ) =
1 + (1 + (4 * complexity ψ + complexity φ * complexity ψ)), linarith,
have h14 : 1 + (1 + (4 * complexity ψ + complexity φ * complexity ψ)) =
2 + 4 * complexity ψ + complexity φ * complexity ψ, linarith,
have h15 : 2 + 4 * complexity ψ + complexity φ * complexity ψ ≤
complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 3, linarith,
have h16 : 1 + (1 + (4 * complexity ψ + complexity φ * complexity ψ)) ≤
complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 3, from eq.substr h14 h15,
have h17 : 1 + (1 + (4 + complexity φ) * complexity ψ) ≤
complexity φ * complexity ψ + complexity φ * 1 + 4 * complexity ψ + 3, from eq.substr h12 h16,
exact le_trans h17 h10
end
lemma updatecompknow1 {a : agents} {φ ψ : formPA agents} {n : nat} :
complexity (U φ (K a ψ)) ≤ n + 1 → complexity φ ≤ n :=
begin
intro h1,
have h2 := updatecompknow1' φ ψ h1,
linarith
end
lemma updatecompknow2 {a : agents} {φ ψ : formPA agents} {n : nat} :
complexity (U φ (K a ψ)) ≤ n + 1 → complexity (φ ⊃ K a (U φ ψ)) ≤ n :=
begin
intro h1,
exact le_maxhelper (updatecompknow1' φ ψ h1) (updatecompknow2' h1)
end
lemma updatecompupdate {φ ψ χ : formPA agents} {n : nat} :
complexity (U φ (U ψ χ)) ≤ n + 1 → complexity (U (φ & U φ ψ) χ) ≤ n :=
begin
intro h1, repeat {rw complexity at *},
have h2 := add_left_comm 1 4 (max (complexity φ) ((complexity φ + 4) * complexity ψ)),
have h3 : (1 + (4 + max (complexity φ) ((4 + complexity φ) * complexity ψ))) =
(4 + (1 + max (complexity φ) ((4 + complexity φ) * complexity ψ))),
from eq.subst (add_comm (complexity φ) 4) h2,
have h4 : (1 + (4 + max (complexity φ) ((4 + complexity φ) * complexity ψ))) * complexity χ =
(1 + (4 + max (complexity φ) ((4 + complexity φ) * complexity ψ))) * complexity χ, refl,
have h5 := lt_of_lt_of_le (tr7 φ ψ χ) h1,
linarith
end |
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* (C) Copyright 2013 Andrey Semashev
* (C) Copyright 2013 Tim Blechmann
*/
/*!
* \file sync/mutexes/shared_shared_spin_mutex.hpp
*
* \brief This header defines a shared spin mutex primitive.
*/
#ifndef BOOST_SYNC_MUTEXES_SHARED_SPIN_MUTEX_HPP_INCLUDED_
#define BOOST_SYNC_MUTEXES_SHARED_SPIN_MUTEX_HPP_INCLUDED_
#if defined(BOOST_SYNC_DETAIL_DOXYGEN)
namespace boost {
namespace sync {
class shared_spin_mutex
{
public:
/*!
* \brief Default constructor
*
* Creates a mutual exclusion primitive in the unlocked state.
*/
shared_spin_mutex() noexcept;
/*!
* \brief Destructor
*
* Destroys the mutual exclusion primitive.
*
* \pre The primitive is in the unlocked state.
*/
~shared_spin_mutex() noexcept;
shared_spin_mutex(mutex const&) = delete;
shared_spin_mutex& operator= (mutex const&) = delete;
/*!
* \brief Exclusively locks the mutex
*
* If the mutex is not locked, the method acquires an exclusive lock and returns. Otherwise the method blocks until the mutex is unlocked.
*/
void lock() noexcept;
/*!
* \brief Attempts to exclusively lock the mutex
*
* If the mutex is not locked, the method acquires an exclusive lock and returns \c true. Otherwise the method returns \c false.
*/
bool try_lock() noexcept;
/*!
* \brief Unlocks the mutex
*
* Releases the mutex that has been exclusively locked by the current thread.
*
* \pre The mutex is exclusively locked by the current thread.
*/
void unlock() noexcept;
/*!
* \brief Shared locks the mutex
*
* If the mutex is not exclusively locked or shared locked, the method acquires a shared lock and returns. Otherwise the method blocks until the mutex is unlocked.
*/
void lock_shared() noexcept;
/*!
* \brief Attempts to exclusively lock the mutex
*
* If the mutex is not exclusively locked or shared locked, the method acquires a shared lock and returns \c true. Otherwise the method returns \c false.
*/
bool try_lock() noexcept;
/*!
* \brief Unlocks the mutex
*
* Releases the mutex that has been shared locked by the current thread.
*
* \pre The mutex is shared locked by the current thread.
*/
void unlock_shared() noexcept;
};
} // namespace sync
} // namespace boost
#else // defined(BOOST_SYNC_DETAIL_DOXYGEN)
#include <boost/sync/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#include <boost/sync/detail/mutexes/shared_spin_mutex.hpp>
#endif // defined(BOOST_SYNC_DETAIL_DOXYGEN)
#endif // BOOST_SYNC_MUTEXES_SHARED_SPIN_MUTEX_HPP_INCLUDED_
|
!++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
! Main prog
!++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
!
PROGRAM sico2elmer
IMPLICIT NONE
INTEGER :: &
noption=999, i, j, k, flag = 1, ndata, istat, imax,jmax,kcmax,ktmax,krmax, gotit
REAL ::&
deform, Dx=0.0e0, time_gr, H_R_gr, YEAR_SEC = 31556926.0
CHARACTER :: &
runname * 5, ergnum * 5, rname * 5, enum * 5, grid
LOGICAL :: &
dataread=.FALSE.
INTEGER, ALLOCATABLE ::&
maske_gr(:,:), n_cts_gr(:,:)
REAL, ALLOCATABLE :: &
xi_gr(:), eta_gr(:),&
zs_gr(:,:), zm_gr(:,:),zb_gr(:,:), zb0_gr(:,:),&
z_c_gr(:,:,:), z_t_gr(:,:,:), z_r_gr(:,:,:),&
H_c_gr(:,:), H_t_gr(:,:),&
vx_c_gr(:,:,:), vy_c_gr(:,:,:), vz_c_gr(:,:,:),&
vx_t_gr(:,:,:), vy_t_gr(:,:,:), vz_t_gr(:,:,:),&
temp_c_gr(:,:,:), temp_r_gr(:,:,:), temp_t_gr(:,:,:),&
temph_c_gr(:,:,:), temph_t_gr(:,:,:),&
age_c_gr(:,:,:), age_t_gr(:,:,:), omega_t_gr(:,:,:),am_perp_gr(:,:),&
qx_gr(:,:), qy_gr(:,:), Q_bm_gr(:,:), Q_tld_gr(:,:)
SAVE dataread, runname
! inquire run identifyer and allocate fields arcording to information in log-file
!--------------------------------------------------------------------------------
WRITE( *, '(A)', ADVANCE = 'YES')' '
WRITE( *, '(A)', ADVANCE = 'YES')'This is sico2elmer'
WRITE( *, '(A)', ADVANCE = 'YES')'******************'
WRITE( *, '(A)', ADVANCE = 'YES')' '
WRITE( *, '(A)', ADVANCE = 'NO') 'Input of run identifyer (5 characters): '
READ (*,'(A5)',ADVANCE = 'YES') runname
WRITE( *, '(A,A)', ADVANCE = 'YES') 'chosen run identifyer: ', runname
CALL readlog_c(runname, imax,jmax,kcmax,ktmax,krmax,deform,Dx,gotit)
IF (gotit .EQ. 0) THEN
WRITE(6,'(A)', ADVANCE = 'YES') 'Error occurred while opening log-file!'
STOP
ELSE
WRITE( *, '(A)', ADVANCE = 'YES') 'Log read successful'
! WRITE( *, '(I4,I4,I4,I4,F8.4,F8.4)', ADVANCE = 'YES') imax,jmax,kcmax,ktmax,deform,dx
END IF
ALLOCATE( &
maske_gr(0:imax,0:jmax), n_cts_gr(0:imax,0:jmax),&
xi_gr(0:imax), eta_gr(0:jmax),&
z_c_gr(0:imax,0:jmax,0:kcmax),&
z_t_gr(0:imax,0:jmax,0:ktmax),&
z_r_gr(0:imax,0:jmax,0:krmax),&
zs_gr(0:imax,0:jmax), zm_gr(0:imax,0:jmax),&
zb_gr(0:imax,0:jmax), zb0_gr(0:imax,0:jmax),&
H_c_gr(0:imax,0:jmax),H_t_gr(0:imax,0:jmax),&
vx_c_gr(0:imax,0:jmax,0:kcmax),&
vy_c_gr(0:imax,0:jmax,0:kcmax),&
vz_c_gr(0:imax,0:jmax,0:kcmax),&
vx_t_gr(0:imax,0:jmax,0:ktmax),&
vy_t_gr(0:imax,0:jmax,0:ktmax),&
vz_t_gr(0:imax,0:jmax,0:ktmax),&
temp_c_gr(0:imax,0:jmax,0:kcmax),&
temph_c_gr(0:imax,0:jmax,0:kcmax),&
age_c_gr(0:imax,0:jmax,0:kcmax),&
omega_t_gr(0:imax,0:jmax,0:ktmax),&
temp_t_gr(0:imax,0:jmax,0:ktmax),&
temph_t_gr(0:imax,0:jmax,0:ktmax),&
age_t_gr(0:imax,0:jmax,0:ktmax),&
temp_r_gr(0:imax,0:jmax,0:krmax),&
qx_gr(0:imax,0:jmax), qy_gr(0:imax,0:jmax),&
Q_bm_gr(0:imax,0:jmax), Q_tld_gr(0:imax,0:jmax),&
am_perp_gr(0:imax,0:jmax),&
STAT=istat )
IF ( istat /= 0 ) THEN
WRITE( *, '(A)', ADVANCE = 'YES') 'Error in allocation of memory!'
STOP
ELSE
WRITE( *, '(A)', ADVANCE = 'YES') 'Allocation of arrays done'
END IF
! Main Menu
!--------------------------------------------------------------------------------
DO
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES') 'Options:'
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES') ' (1) Read SICOPOLIS timeslice-file'
WRITE( *, '(A)', ADVANCE = 'YES') ' (2) Output of timestep-grid for ELMERPOST'
WRITE( *, '(A)', ADVANCE = 'YES') ' (3) Output of timestep-data for ELMERPOST'
WRITE( *, '(A)', ADVANCE = 'YES') ' (4) Output of timestep for input to ELMER SOLVER'
WRITE( *, '(A)', ADVANCE = 'YES') ' (5) Output of timestep-data in ASCII-format'
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES') ' (0) Quit'
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'NO') 'Your choice: '
READ ( *, '(I1)', ADVANCE = 'YES') noption
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES') ' '
IF ((noption == 1).and.(.NOT.dataread)) THEN
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Reading timeslice for run: ', runname
CALL ReadData(runname, ergnum, &
imax,jmax,kcmax,ktmax,krmax,deform,&
maske_gr, n_cts_gr,&
time_gr, xi_gr, eta_gr, zs_gr, zm_gr,zb_gr, zb0_gr,&
z_c_gr, z_t_gr, z_r_gr,&
H_c_gr, H_t_gr, H_R_gr, vx_c_gr, vy_c_gr, vz_c_gr,&
vx_t_gr, vy_t_gr, vz_t_gr, temp_c_gr, temp_r_gr, temp_t_gr,&
temph_c_gr, temph_t_gr, am_perp_gr, age_c_gr, age_t_gr, omega_t_gr,&
qx_gr, qy_gr, Q_bm_gr, Q_tld_gr)
dataread = .TRUE.
ELSE IF ((noption == 2).and.(dataread)) THEN
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Writing timestep-grid (ElmerPost) for run: ', runname
CALL postgrid(xi_gr, eta_gr, z_c_gr, z_t_gr, Dx, imax, jmax, kcmax, ktmax,&
runname, ergnum, maske_gr, 1)
ELSE IF ((noption == 3).and.(dataread)) THEN
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Writing timestep-data (ElmerPost) for run: ', runname
CALL elmerdata(imax, jmax, kcmax, ktmax, z_c_gr, z_t_gr,&
vx_c_gr, vy_c_gr, vz_c_gr, age_c_gr, temp_c_gr, vx_t_gr, vy_t_gr,&
vz_t_gr, temp_t_gr, age_t_gr, omega_t_gr, Q_bm_gr, Q_tld_gr,&
am_perp_gr, qx_gr, qy_gr, n_cts_gr, maske_gr, runname, ergnum, flag)
ELSE IF ((noption == 4).and.(dataread)) THEN
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Writing timestep-grid (ElmerSolver) for run: ', runname
call pregrid(xi_gr, eta_gr, z_c_gr, z_t_gr, imax, jmax, kcmax, ktmax, runname, ergnum,&
maske_gr, DX, flag)
ELSE IF ((noption == 5).and.(dataread)) THEN
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Output of timestep-data in ASCII-format for run: ',runname
CALL asciidata(xi_gr,eta_gr,IMAX,JMAX,KCMAX,KTMAX,&
z_c_gr, z_t_gr, vx_c_gr,vy_c_gr,vz_c_gr,age_c_gr,temph_c_gr,&
vx_t_gr,vy_t_gr,vz_t_gr,temph_t_gr,age_t_gr, omega_t_gr,&
Q_bm_gr, Q_tld_gr,am_perp_gr,qx_gr,qy_gr,n_cts_gr,&
maske_gr,runname,ergnum,1)
ELSE IF (noption == 0) THEN
DO i=1,5
WRITE( *, '(A)', ADVANCE = 'YES') ' '
END DO
WRITE( *, '(A)', ADVANCE = 'YES') 'Thank you for using sico2elmer!'
WRITE( *, '(A)', ADVANCE = 'YES') 'Bye'
STOP
ELSE
WRITE( *, '(A)', ADVANCE = 'NO') 'Could not perform command due to '
IF (noption > 5) THEN
WRITE( *, '(A)', ADVANCE = 'YES') 'invalid selection!'
ELSE
WRITE( *, '(A)', ADVANCE = 'YES') 'missing timeslice data! Select option (1) first'
END IF
END IF
! clear screen
! PAUSE
DO i=1,5
WRITE( *, '(A)', ADVANCE = 'YES') ' '
END DO
END DO
CONTAINS
!==============================================================================
SUBROUTINE ReadData(runname, ergnum, &
imax,jmax,kcmax,ktmax,krmax,deform,&
maske_gr, n_cts_gr,&
time_gr, xi_gr, eta_gr, zs_gr, zm_gr,zb_gr, zb0_gr,&
z_c_gr, z_t_gr, z_r_gr,&
H_c_gr, H_t_gr, H_R_gr, vx_c_gr, vy_c_gr, vz_c_gr,&
vx_t_gr, vy_t_gr, vz_t_gr, temp_c_gr, temp_r_gr, temp_t_gr,&
temph_c_gr, temph_t_gr, am_perp_gr, age_c_gr, age_t_gr, omega_t_gr,&
qx_gr, qy_gr, Q_bm_gr, Q_tld_gr)
IMPLICIT NONE
! external variables:
! ------------------------------------------------------------------------
CHARACTER :: &
runname * 5, ergnum * 2
INTEGER ::&
imax,jmax,kcmax,ktmax,krmax
REAL :: &
time_gr, deform, H_R_gr
INTEGER ::&
maske_gr(0:imax,0:jmax), n_cts_gr(0:imax,0:jmax)
REAL ::&
xi_gr(0:imax), eta_gr(0:jmax),&
zs_gr(0:imax,0:jmax), zm_gr(0:imax,0:jmax),zb_gr(0:imax,0:jmax), zb0_gr(0:imax,0:jmax),&
z_c_gr(0:imax,0:jmax,0:kcmax), z_t_gr(0:imax,0:jmax,0:ktmax), z_r_gr(0:imax,0:jmax,0:krmax),&
H_c_gr(0:imax,0:jmax), H_t_gr(0:imax,0:jmax),&
vx_c_gr(0:imax,0:jmax,0:kcmax), vy_c_gr(0:imax,0:jmax,0:kcmax), vz_c_gr(0:imax,0:jmax,0:kcmax),&
vx_t_gr(0:imax,0:jmax,0:ktmax), vy_t_gr(0:imax,0:jmax,0:ktmax), vz_t_gr(0:imax,0:jmax,0:ktmax),&
temp_c_gr(0:imax,0:jmax,0:kcmax), temp_r_gr(0:imax,0:jmax,0:krmax), temp_t_gr(0:imax,0:jmax,0:ktmax),&
temph_c_gr(0:imax,0:jmax,0:kcmax), temph_t_gr(0:imax,0:jmax,0:ktmax),&
age_c_gr(0:imax,0:jmax,0:kcmax), age_t_gr(0:imax,0:jmax,0:ktmax), omega_t_gr(0:imax,0:jmax,0:ktmax),&
am_perp_gr(0:imax,0:jmax),&
qx_gr(0:imax,0:jmax), qy_gr(0:imax,0:jmax), Q_bm_gr(0:imax,0:jmax), Q_tld_gr(0:imax,0:jmax)
! internal variables:
! ------------------------------------------------------------------------
CHARACTER :: &
ergfile * 11
INTEGER :: &
ios, kmax=0, i, j, k
REAL :: &
ScalarDummy
REAL, PARAMETER :: &
YEAR_SEC = 3.1556926e07, BETA = 8.70e-04
INTEGER, ALLOCATABLE :: &
TwoDimDummyI(:,:)
REAL, ALLOCATABLE :: &
OneDimDummyX(:), OneDimDummyY(:), TwoDimDummy(:,:),&
ThreeDimDummyR(:,:,:), ThreeDimDummyT(:,:,:), ThreeDimDummyC(:,:,:)
LOGICAL :: &
FirstTime = .TRUE.
SAVE ScalarDummy, TwoDimDummy, TwoDimDummyI, ThreeDimDummyR, ThreeDimDummyT,&
ThreeDimDummyC, OneDimDummyX, OneDimDummyY, FirstTime
! allocate some stuff (first time only)
!---------------------------------------------------------------
IF (FirstTime) THEN
ALLOCATE(&
OneDimDummyX(0:imax),&
OneDimDummyY(0:jmax),&
TwoDimDummy(0:jmax,0:imax),&
TwoDimDummyI(0:jmax,0:imax),&
ThreeDimDummyR(0:krmax,0:jmax,0:imax),&
ThreeDimDummyT(0:ktmax,0:jmax,0:imax),&
ThreeDimDummyC(0:kcmax,0:jmax,0:imax),&
STAT=istat )
IF ( istat /= 0 ) THEN
WRITE( *, '(A)', ADVANCE = 'YES') 'Error in allocation of memory!'
STOP
ELSE
WRITE( *, '(A)', ADVANCE = 'YES') 'Allocation of dummy arrays done'
END IF
FirstTime = .FALSE.
END IF
! inquire timeslice and open file
!--------------------------------
WRITE( *, '(A)', ADVANCE = 'NO') 'Enter number of timeslice (may start with 0 if < 10): '
READ (*,'(A)', ADVANCE = 'YES') ergnum
ergfile = runname//ergnum//'.erg'
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Atempting to open timeslice-file ', ergfile
OPEN(UNIT=10, IOSTAT=ios, FILE=ergfile, STATUS='old', FORM='unformatted')
IF (ios /= 0) THEN
WRITE(6,'(A,A,A)') 'Error occurred while opening timeslice-file ', ergfile,'!'
STOP
ELSE
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Reading from ', ergfile
END IF
! read in stuff
!-------------
!time
READ(10) ScalarDummy
time_gr = ScalarDummy/YEAR_SEC ! sec -> a
!x-coords
READ(10) OneDimDummyX
xi_gr(0:imax) = 1.0e-03 * OneDimDummyX(0:imax) ! m -> km
!y-coords
READ(10) OneDimDummyY
eta_gr(0:jmax) = 1.0e-03 * OneDimDummyY(0:jmax) ! m -> km
! glaciation mask
read(10) TwoDimDummyI
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertFieldI2(imax, jmax, TwoDimDummyI, maske_gr) ! 1 -> 1
! poly-thermal condition mask
read(10) TwoDimDummyI
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertFieldI2(imax, jmax, TwoDimDummyI, n_cts_gr) ! 1 -> 1
! z-coords
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, zs_gr, 1.0e-03) ! m -> km
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, zm_gr, 1.0e-03) ! m -> km
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, zb_gr, 1.0e-03) ! m -> km
! depths
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, H_c_gr, 1.0e-03) ! m -> km
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, H_t_gr, 1.0e-03) ! m -> km
read(10) ScalarDummy
H_R_gr = 1.0e-03 * ScalarDummy! m --> km
! convert heights
! CALL ElevationColdD(imax, jmax, kcmax, deform, xi_gr, eta_gr, z_c_gr)
CALL ElevationCold(imax, jmax, kcmax, deform, zm_gr, H_c_gr, z_c_gr)
CALL ElevationTemp(imax, jmax, ktmax, zb_gr, H_t_gr, z_t_gr)
WRITE( *, '(A)', ADVANCE = 'NO') '.'
! velocity components cold region
read(10) ThreeDimDummyC
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, kcmax, ThreeDimDummyC, vx_c_gr, YEAR_SEC) ! (m/s) --> (m/yr)
read(10) ThreeDimDummyC
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, kcmax, ThreeDimDummyC, vy_c_gr, YEAR_SEC) ! (m/s) --> (m/yr)
read(10) ThreeDimDummyC
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, kcmax, ThreeDimDummyC, vz_c_gr, YEAR_SEC) ! (m/s) --> (m/yr)
! velocity components temperate region
read(10) ThreeDimDummyT
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, ktmax, ThreeDimDummyT, vx_t_gr, YEAR_SEC) ! (m/s) --> (m/yr)
read(10) ThreeDimDummyT
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, ktmax, ThreeDimDummyT, vy_t_gr, YEAR_SEC) ! (m/s) --> (m/yr)
read(10) ThreeDimDummyT
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, ktmax, ThreeDimDummyT, vz_t_gr, YEAR_SEC) ! (m/s) --> (m/yr)
! temperature field cold region (Celsius and homologe temperature)
read(10) ThreeDimDummyC
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, kcmax, ThreeDimDummyC, temp_c_gr, 1.0e0) ! in C
CALL HomologousTempC(imax, jmax, kcmax, temp_c_gr, H_c_gr, deform, BETA, temph_c_gr) ! with respect to pressure melting point
! water content in temperate layer
read(10) ThreeDimDummyT
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, ktmax, ThreeDimDummyT, omega_t_gr, 1.0e0) ! dimensionless
! temperature field bedrock
read(10) ThreeDimDummyR(0:krmax,0:jmax,0:imax)
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, krmax, ThreeDimDummyR, temp_r_gr, 1.0e0) ! in C
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, Q_bm_gr, YEAR_SEC) ! m3/(m2*s) --> m3/(m2*yr)
!!$ DO i = 0, imax
!!$ WRITE( *,'(I):', ADVANCE = 'NO') i
!!$ DO j = 0, jmax
!!$ WRITE( *,'(F8.4) ', ADVANCE = 'NO') Q_bm_gr(j,i)
!!$ END DO
!!$ WRITE( *,'(A)', ADVANCE = 'YES') 'END'
!!$ END DO
! water drainage rate from the temperated region
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, Q_tld_gr, YEAR_SEC) ! m3/(m2*s) --> m3/(m2*yr)
! ice volume flux through CTS
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, am_perp_gr, YEAR_SEC) ! (m/s) --> (m/yr)
! volume-flux components
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, qx_gr, 1.0e-03) ! m2/s --> 1000 m2/yr
read(10) TwoDimDummy
WRITE( *, '(A)', ADVANCE = 'NO') '.'
CALL ConvertField2(imax, jmax, TwoDimDummy, qy_gr, 1.0e-03) ! m2/s --> 1000 m2/yr
read(10) ThreeDimDummyC
WRITE( *, '(A)', ADVANCE = 'NO') '....'
CALL ConvertField3(imax, jmax, kcmax, ThreeDimDummyC, age_c_gr, 1.0e0/(1.0e03*YEAR_SEC)) ! s --> kyr
read(10) ThreeDimDummyT
WRITE( *, '(A)', ADVANCE = 'YES') '....'
CALL ConvertField3(imax, jmax, ktmax, ThreeDimDummyT, age_t_gr, 1.0e0/(1.0e03*YEAR_SEC)) ! s --> kyr
WRITE( *, '(A)', ADVANCE = 'YES') 'Read in completed'
! close file
!-----------
CLOSE(10, STATUS='keep')
!==============================================================================
END SUBROUTINE ReadData
!--------------------------------------------------------------------------
! Writes 2d arrays read in from file into graphical output arrays
!--------------------------------------------------------------------------
SUBROUTINE ConvertField2(imax, jmax, in, out, factor)
IMPLICIT NONE
! external variables
!-------------------
INTEGER ::&
imax, jmax
REAL ::&
in(0:jmax,0:imax), out(0:imax,0:jmax), factor !in(0:(imax+1)*(jmax+1)-1)
! internal variables
!-------------------
INTEGER ::&
i, j
DO i = 0, imax
DO j = 0, jmax
! out(i,j) = factor * in(i*(jmax+1) + j)
out(i,j) = factor * in(j,i)
END DO
END DO
END SUBROUTINE ConvertField2
!==============================================================================
!--------------------------------------------------------------------------
! Writes 2d integer arrays read in from file into graphical output arrays
!--------------------------------------------------------------------------
SUBROUTINE ConvertFieldI2(imax, jmax, in, out)
IMPLICIT NONE
! external variables
!-------------------
INTEGER ::&
imax, jmax
INTEGER ::&
in(0:jmax,0:imax), out(0:imax,0:jmax)
! internal variables
!-------------------
INTEGER ::&
i, j
DO i = 0, imax
DO j = 0, jmax
out(i,j) = in(j,i)
END DO
END DO
END SUBROUTINE ConvertFieldI2
!==============================================================================
!--------------------------------------------------------------------------
! Writes 3d arrays read in from file into graphical output arrays
!--------------------------------------------------------------------------
SUBROUTINE ConvertField3(imax, jmax, kmax, in, out, factor)
IMPLICIT NONE
! external variables
!-------------------
INTEGER ::&
imax, jmax, kmax
REAL ::&
in(0:kmax,0:jmax,0:imax), out(0:imax,0:jmax,0:kmax), factor
! internal variables
!-------------------
INTEGER ::&
i, j, k
DO i = 0, imax
DO j = 0, jmax
DO k=0, kmax
out(i,j,k) = factor * in(k,j,i)
END DO
END DO
END DO
END SUBROUTINE ConvertField3
!==============================================================================
!--------------------------------------------------------------------------
! Transforms Celsius temperature into pressure meltinghomologous temperature
!--------------------------------------------------------------------------
SUBROUTINE HomologousTempC(imax, jmax, kcmax, in, depth, deform, BETA, out)
IMPLICIT NONE
! external variables
!-------------------
INTEGER ::&
imax, jmax, kcmax
REAL ::&
in(0:imax,0:jmax,0:kcmax), out(0:imax,0:jmax,0:kcmax),&
depth(0:imax,0:jmax)
REAL :: &
BETA, deform
! internal variables
!-------------------
INTEGER ::&
i, j, k
REAL :: &
ea, eaz_c_quotient, zeta_c, eaz_c
ea = exp(deform)
DO k=0, kcmax
zeta_c = real(k)/real(kcmax)
eaz_c = exp(DEFORM*zeta_c)
eaz_c_quotient =(eaz_c-1.0)/(ea-1.0)
DO i = 0, imax
DO j = 0, jmax
out(i,j,k) = in(i,j,k)&
- ( -1000.0*BETA*depth(i,j)*(1.0-eaz_c_quotient) )
END DO
END DO
END DO
END SUBROUTINE HomologousTempC
!==============================================================================
!--------------------------------------------------------------------------
! Transforms Celsius temperature into pressure meltinghomologous temperature
!--------------------------------------------------------------------------
SUBROUTINE HomologousTempT(imax, jmax, ktmax, in, depth1, depth2, BETA, out)
IMPLICIT NONE
! external variables
!-------------------
INTEGER ::&
imax, jmax, ktmax
REAL ::&
in(0:imax,0:jmax,0:ktmax), out(0:imax,0:jmax,0:ktmax),&
depth1(0:imax,0:jmax), depth2(0:imax,0:jmax)
REAL :: &
BETA
! internal variables
!-------------------
INTEGER ::&
i, j, k
REAL :: &
zeta_t
DO k=0, ktmax
zeta_t = real(k)/real(ktmax)
DO i = 0, imax
DO j = 0, jmax
out(i,j,k) = in(i,j,k)&
- ( -1000.0*BETA*(depth1(i,j)+depth2(i,j))*(1.0-zeta_t) )
END DO
END DO
END DO
END SUBROUTINE HomologousTempT
!==============================================================================
!--------------------------------------------------------------------------
! Transforms normalized into real elevations in cold layer
!--------------------------------------------------------------------------
SUBROUTINE ElevationTemp(imax, jmax, ktmax, zb_gr, H_t_gr, z_t_gr)
! external variables
!-------------------
INTEGER ::&
imax, jmax, ktmax
REAL ::&
zm_gr(0:imax,0:jmax), zb_gr(0:imax,0:jmax),&
H_t_gr(0:imax,0:jmax), z_t_gr(0:imax,0:jmax,0:ktmax)
! internal variables
!-------------------
INTEGER ::&
i, j, k
REAL :: &
zeta_t
DO k=0, ktmax
zeta_t = real(k)/real(ktmax)
DO i = 0, imax
DO j = 0, jmax
z_t_gr(i,j,k) = zb_gr(i,j) +H_t_gr(i,j)*zeta_t
END DO
END DO
END DO
END SUBROUTINE ElevationTemp
!==============================================================================
!--------------------------------------------------------------------------
! Transforms normalized into real elevations in cold layer
!--------------------------------------------------------------------------
SUBROUTINE ElevationColdD(imax, jmax, kcmax, deform, xi_gr, eta_gr, z_c_gr)
! external variables
!-------------------
INTEGER ::&
imax, jmax, kcmax
REAL ::&
xi_gr(0:imax), eta_gr(0:jmax), z_c_gr(0:imax,0:jmax,0:kcmax)
REAL :: &
deform
! internal variables
!-------------------
INTEGER ::&
i, j, k
DO i = 0, imax
DO j = 0, jmax
DO k=0, kcmax
! z_c_gr(i,j,k) = xi_gr(i) * eta_gr(j)
z_c_gr(i,j,k) = 0.0e0
END DO
END DO
END DO
END SUBROUTINE ElevationColdD
SUBROUTINE ElevationCold(imax, jmax, kcmax, deform, zm_gr, H_c_gr, z_c_gr)
! external variables
!-------------------
INTEGER ::&
imax, jmax, kcmax
REAL ::&
zm_gr(0:imax,0:jmax), H_c_gr(0:imax,0:jmax), z_c_gr(0:imax,0:jmax,0:kcmax)
REAL :: &
deform
! internal variables
!-------------------
INTEGER ::&
i, j, k
REAL :: &
ea, eaz_c_quotient, zeta_c, eaz_c
ea = exp(deform)
DO k=0, kcmax
zeta_c = real(k)/real(kcmax)
eaz_c = exp(DEFORM*zeta_c)
eaz_c_quotient =(eaz_c-1.0e0)/(ea-1.0e0)
DO i = 0, imax
DO j = 0, jmax
z_c_gr(i,j,k) = zm_gr(i,j) + H_c_gr(i,j)*eaz_c_quotient
END DO
END DO
END DO
END SUBROUTINE ElevationCold
!==============================================================================
SUBROUTINE ReadLog(runname, imax,jmax,kcmax,ktmax,krmax,deform,Dx)
IMPLICIT NONE
! external variables:
! ------------------------------------------------------------------------
INTEGER ::&
imax,jmax,kcmax,ktmax,krmax
CHARACTER :: &
runname * 5
REAL ::&
deform, Dx, rDummy
! internal variables:
! ------------------------------------------------------------------------
CHARACTER :: &
logdat * 9, chtrash
INTEGER :: &
ios
logdat = runname//'.log'
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Atempting to open log-file ', logdat
OPEN(UNIT=10, iostat=ios, file=logdat, status='old')
IF (ios.ne.0) THEN
WRITE(6,'(A)') 'Error occurred while opening log-file!'
STOP
ELSE
WRITE( *, '(A,A)', ADVANCE = 'YES') 'Reading from ', logdat
END IF
! READ(10,'(a7,i4)') chtrash
! READ(10,'(a7,i4)') chtrash
! READ(10,'(a7,i4)') chtrash
READ(10,'(a7,i4)') chtrash, imax
READ(10,'(a7,i4)') chtrash, jmax
READ(10,'(a7,i4)') chtrash, kcmax
READ(10,'(a7,i4)') chtrash, ktmax
READ(10,'(a7,i4)') chtrash, krmax
READ(10,'(a)') chtrash
READ(10,'(a3,e9.2)') chtrash, deform
READ(10,'(a,e9.2)') chtrash
READ(10,'(a12,e9.2)') chtrash, rDummy
READ(10,'(a12,e9.2)') chtrash, rDummy
READ(10,'(a)') chtrash
READ(10,'(a9,e9.2)') chtrash, Dx
CLOSE(10)
WRITE( *, '(A)', ADVANCE = 'YES')
WRITE( *, '(A)', ADVANCE = 'YES') 'The following parameters have been read in:'
WRITE( *, '(A,i4)', ADVANCE = 'YES') ' imax=', imax
WRITE( *, '(A,i4)', ADVANCE = 'YES') ' jmax=', jmax
WRITE( *, '(A,i4)', ADVANCE = 'YES') ' kcmax=', kcmax
WRITE( *, '(A,i4)', ADVANCE = 'YES') ' ktmax=', ktmax
WRITE( *, '(A,i4)', ADVANCE = 'YES') ' krmax=', krmax
WRITE( *, '(A,f9.2)', ADVANCE = 'YES') ' deform=', deform
WRITE( *, '(A,f9.2)', ADVANCE = 'YES') ' Dx=', Dx
WRITE( *, '(A)', ADVANCE = 'YES') ' '
WRITE( *, '(A)', ADVANCE = 'YES')
END SUBROUTINE ReadLog
!==============================================================================
END PROGRAM sico2elmer
!==============================================================================
!==============================================================================
|
rebol [
title: "Scheduler"
]
scheduler: make object! [
scheduled: make block! 20
process: func[][
while [not tail? scheduled][
either scheduled/1 <= now [
dprint ["SCHEDULED-ACTION: " mold scheduled/2/2]
attempt scheduled/2/2
remove/part back scheduled 2
][ break ]
]
]
add-action: func[id when [date!] action [block!]][
insert/only scheduled reduce [id action]
insert scheduled when
sort/skip scheduled 2
dprint [mold scheduled]
]
remove-action: func[id /local f][
while [not tail? scheduled][
either scheduled/2/1 = id [
remove/part scheduled 2
][ scheduled: skip scheduled 2 ]
]
dprint [mold scheduled]
]
] |
Require Import
HoTT.Classes.interfaces.abstract_algebra.
(** If [B] is a (bounded) lattice, then so is [A -> B], pointwise.
This relies on functional extensionality. *)
Section contents.
Context `{Funext}.
Context {A B : Type}.
Context `{BJoin : Join B}.
Context `{BMeet : Meet B}.
Context `{BBottom : Bottom B}.
Context `{BTop : Top B}.
Global Instance bot_fun : Bottom (A -> B)
:= fun _ => ⊥.
Global Instance top_fun : Top (A -> B)
:= fun _ => ⊤.
Global Instance join_fun : Join (A -> B) :=
fun (f g : A -> B) (a : A) => (f a) ⊔ (g a).
Global Instance meet_fun : Meet (A -> B) :=
fun (f g : A -> B) (a : A) => (f a) ⊓ (g a).
(** Try to solve some of the lattice obligations automatically *)
Create HintDb lattice_hints.
Hint Resolve
associativity
absorption
commutativity | 1 : lattice_hints.
Local Ltac reduce_fun := compute; intros; apply path_forall; intro.
Local Ltac try_solve_fun :=
reduce_fun;
eauto 10 with lattice_hints typeclass_instances.
Global Instance lattice_fun `{!IsLattice B} : IsLattice (A -> B).
Proof.
repeat split; try apply _; try_solve_fun.
* apply binary_idempotent.
* apply binary_idempotent.
Defined.
Instance boundedjoinsemilattice_fun
`{!IsBoundedJoinSemiLattice B} :
IsBoundedJoinSemiLattice (A -> B).
Proof.
repeat split; try apply _; try_solve_fun.
* apply left_identity.
* apply right_identity.
* apply commutativity.
* apply binary_idempotent.
Defined.
Instance boundedmeetsemilattice_fun
`{!IsBoundedMeetSemiLattice B} :
IsBoundedMeetSemiLattice (A -> B).
Proof.
repeat split; try apply _; reduce_fun.
* apply associativity.
* apply left_identity.
* apply right_identity.
* apply commutativity.
* apply binary_idempotent.
Defined.
Global Instance boundedlattice_fun
`{!IsBoundedLattice B} : IsBoundedLattice (A -> B).
Proof.
repeat split; try apply _; reduce_fun; apply absorption.
Defined.
End contents.
|
# Episodic Lunar Lander with function appoximation and control
This Notebook is intended to solve the Episodic Lunar Lander problem using Semi-gradient Expected sarsa with neural networks for function approximation.
The description of the problem is given below:
"Landing pad is always at coordinates (0,0). Coordinates are the first two numbers in state vector. Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points. If lander moves away from landing pad it loses reward back. Episode finishes if the lander crashes or comes to rest, receiving additional -100 or +100 points. Each leg ground contact is +10. Firing main engine is -0.3 points each frame. Solved is 200 points. Landing outside landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land on its first attempt. Four discrete actions available: do nothing, fire left orientation engine, fire main engine, fire right orientation engine."
Image and Text taken from [Official documentaiton Lunar Lander](https://gym.openai.com/envs/LunarLander-v2/).
```python
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from copy import deepcopy
import gym
from gym.wrappers import Monitor
from utils import *
import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
%matplotlib inline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
```
## Markov Decision Process
As a quick recap, the diagram below explains the workflow of a Markov Decision Process (MDP)
Image taken from [Section 3.1 Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=70)
## Environment and Agent specifications
The states, actions, reward and termination are given as follows for the lunar lander problem.
**Observation**:
Type: Box(8)
Num Observation Min Max
0 X position -inf inf
1 Y position -inf inf
2 X velocity -inf inf
3 Y velocity -inf inf
4 Theta w.r.t ground -inf inf
5 Theta rate -inf inf
6 1 if first leg has contact, else 0 -inf inf
7 1 if second leg has contact, else 0 -inf inf
**Actions**:
Type: Discrete(4)
Num Action
0 Do nothing
1 Fire left engine
2 Fire main engine
3 Fire right engine
**Reward**:
Reward of 0 is awarded if the agent reached the flag(position = 0.5) on top of the mountain
Reward of -1 is awarded if the position of the agent is less than 0.5
Reward of -100 for flying off the screen
Reward of +100 for successful landing
Reward of -0.3 for firing main thrusters
Reward of -0.03 for firing side thrusters
Reward of +10 for each leg touching fround
**Starting State**:
The starting position is above the landing target
**Episode Termination**:
The lander crashes
The lander comes to rest
Episode length is greater than 200
For further information see [Github source code](https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py).
The next cell aims to show how to iterate with the action and observation space of the agent and extract relevant information from it
```python
env = gym.make("LunarLander-v2")
observation = env.reset()
# Object's type in the action Space
print("The Action Space is an object of type: {0}\n".format(env.action_space))
# Shape of the action Space
print("The shape of the action space is: {0}\n".format(env.action_space.n))
# Object's type in the Observation Space
print("The Environment Space is an object of type: {0}\n".format(env.observation_space))
# Shape of the observation space
print("The Shape of the dimension Space are: {0}\n".format(env.observation_space.shape))
# The high and low values in the observation space
print("The High values in the observation space are {0}, the low values are {1}\n".format(
env.observation_space.high, env.observation_space.low))
# Example of observation
print("The Observations at a given timestep are {0}\n".format(env.observation_space.sample()))
```
The Action Space is an object of type: Discrete(4)
The shape of the action space is: 4
The Environment Space is an object of type: Box(8,)
The Shape of the dimension Space are: (8,)
The High values in the observation space are [inf inf inf inf inf inf inf inf], the low values are [-inf -inf -inf -inf -inf -inf -inf -inf]
The Observations at a given timestep are [-1.3609413 -0.81943125 0.00691082 0.965331 -1.3784901 1.0290705
-1.7937465 0.85192055]
## Computing action-values with neural networks
To compute action-values, a feed-forward neural network is used. This apporach allows us to compute action-values using the weights of the neural network.
$$ q_\pi(s) \approx \hat{q}(s, a, w) = NN(s,a,w) $$
Neural networks are used to solve the control problem in RL, particularly, this networl is going to be used with an Episodic Semi-gradient Expected Sarsa agent. The inputs of the network are the states, which in this case are eight, the number of hidden layers and hidden units can vary. Finally, the number of inputs is equals to the number of actions in the problem, therefore, four output nodes are needed in the final layer. Each output node corresponds to the action value of a particular action.
For further information about Neural Networks for function approximation see [Section 9.7 of Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=246)
Image taken from [Reinforcement learning specialization, C4L5S1](https://www.coursera.org/learn/complete-reinforcement-learning-system/lecture/CVH40/meeting-with-adam-getting-the-agent-details-right)
```python
# Neural Netork to compute action values
class ActionValueNetwork(nn.Module):
# Work Required: Yes. Fill in the layer_sizes member variable (~1 Line).
def __init__(self, network_config):
super().__init__()
# Number of states
self.state_dim = network_config.get("state_dim")
# Hidden units
self.num_hidden_units = network_config.get("num_hidden_units")
# Actions or output units
self.num_actions = network_config.get("num_actions")
# Initialzie hidden layer
self.hidden = nn.Linear(self.state_dim, self.num_hidden_units)
# Initialize output layer
self.output = nn.Linear(self.num_hidden_units, self.num_actions)
def forward(self, s):
"""
This is a feed-forward pass in the network
Args:
s (Numpy array): The state, a 2D array of shape (batch_size, state_dim)
Returns:
The action-values (Torch array) calculated using the network's weights.
A 2D array of shape (batch_size, num_actions)
"""
# Transform observations into a pytorch tensor
s = torch.Tensor(s)
q_vals = F.relu(self.hidden(s))
q_vals = self.output(q_vals)
return q_vals
```
## Replay Buffer
Experience replay is a technique very similar to planning in RL. Overall, this technique is used to update the action-values of the agent with a set of "experience" collected in a model. This experience allows the model to learn without interacting with the environment using simulated experience.
Experience replay is a simple method that can get some of the advantages of planning by saving a buffer of experience and using the data stored in the buffer as a model. This view of prior data as a model works because the data represents actual transitions from the underlying MDP. The data stored in the model are the state, action, reward and next state.
The model will be filled until a queue size is reached, only then the model will drop its oldest observation and add a new one. With this buffer of information, it is possible to sample "batches" and update the action values of the agent.
As a quick recap, the next pseudocode shows the pseudocode for Dyna-Q algorithm where the agent performs planning steps, improving the learning process of the agent with simulated experience.
The planning process is given in the next image.
For further information about planning see [Section 8.2 of Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=185). **Note**: Images taken from the last reference.
```python
class ReplayBuffer:
def __init__(self, size, minibatch_size):
"""
Args:
size (integer): The size of the replay buffer.
minibatch_size (integer): The sample size.
"""
# Create the buffer
self.buffer = []
self.minibatch_size = minibatch_size
self.max_size = size
def append(self, state, action, reward, terminal, next_state):
"""
Args:
state (Numpy array): The state of size (state_dim)
action (integer): The action.
reward (float): The reward.
terminal (integer): 1 if the next state is a terminal state and 0 otherwise.
next_state (Numpy array): The next state of size (state_dim) .
"""
if len(self.buffer) == self.max_size:
# Delete first position of the buffer if the Queue size is equals to max size
del self.buffer[0]
# Append new step
self.buffer.append([state, action, reward, terminal, next_state])
def sample(self):
"""
Returns:
A list of transition tuples including state, action, reward, terinal, and next_state
The return of this function is of size (minibatch_size)
"""
idxs = np.random.choice(np.arange(len(self.buffer)), size=self.minibatch_size)
return [self.buffer[idx] for idx in idxs]
def size(self):
return len(self.buffer)
```
## Softmax Policy
To compute the actions, a softmax policy is used. One advantage of a softmax policy is that it explores according to the action-values, meaning that an action with a moderate value has a higher chance of getting selected compared to an action with a lower value. This sort of policies provides a feasible alternative to do exploration.
The probability of selecting each action according to the softmax policy is shown below:
$$Pr{(A_t=a | S_t=s)} \hspace{0.1cm} \dot{=} \hspace{0.1cm} \frac{e^{Q(s, a)/\tau}}{\sum_{b \in A}e^{Q(s, b)/\tau}}$$
Here, $\tau$ is the temperature parameter which controls how much the agent focuses on the highest valued actions. The smaller the temperature, the more the agent selects the greedy action. Conversely, when the temperature is high, the agent selects among actions more uniformly random.
Given that a softmax policy exponentiates action values, if those values are large, exponentiating them could get very large. To implement the softmax policy in a numerically stable way,the maximum action-value is substracted from the action-values. Doing so, the probability of selecting each action looks as follows:
$$Pr{(A_t=a | S_t=s)} \hspace{0.1cm} \dot{=} \hspace{0.1cm} \frac{e^{Q(s, a)/\tau - max_{c}Q(s, c)/\tau}}{\sum_{b \in A}e^{Q(s, b)/\tau - max_{c}Q(s, c)/\tau}}$$
Recall that changing the action preferences (action-values in this case) for a constant, would not change the final value of the softmax probability. This Softmax implementation is different than the one provided by Pytorch.
For further informartion about Softmax policies and action preferences see [Section 13.1 of Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=344)
```python
def softmax(action_values, tau=1.0):
"""
Args:
action_values (Tensor array): A 2D array of shape (batch_size, num_actions).
The action-values computed by an action-value network.
tau (float): The temperature parameter scalar.
Returns:
A 2D Tensor array of shape (batch_size, num_actions). Where each column is a probability distribution
over the actions representing the policy.
"""
# Compute the preferences
preferences = action_values / tau
# Compute the maximum preference across the actions (Max action per row or batch)
max_preference = torch.max(preferences, dim = 1)[0]
# Reshape max_preference to [Batch, 1]
reshaped_max_preference = max_preference.view((-1, 1))
# Computing numerator
exp_preferences = torch.exp(preferences - reshaped_max_preference)
# Computing the denominator suming over rows (batches).
sum_of_exp_preferences = torch.sum(exp_preferences, dim = 1)
# Reshape sum_of_exp_preferences array to [Batch, 1]
reshaped_sum_of_exp_preferences = sum_of_exp_preferences.view((-1, 1))
# Computing action probabilities
action_probs = exp_preferences / reshaped_sum_of_exp_preferences
action_probs = action_probs.squeeze()
return action_probs
```
```python
# Testing the Softmax implementation
rand_generator = np.random.RandomState(0)
action_values = torch.tensor(rand_generator.normal(0, 1, (2, 4)))
tau = 0.5
action_probs = softmax(action_values, tau)
print("action_probs", action_probs)
assert(np.allclose(action_probs, np.array([
[0.25849645, 0.01689625, 0.05374514, 0.67086216],
[0.84699852, 0.00286345, 0.13520063, 0.01493741]
])))
```
action_probs tensor([[0.2585, 0.0169, 0.0537, 0.6709],
[0.8470, 0.0029, 0.1352, 0.0149]], dtype=torch.float64)
## Computing TD target and TD estimate
The TD target and TD estimate's computation will be done in the next lines. The main idea here is to obtain the action-value network updates with experience sampled from the experience replay buffer.
At time $t$, there is an action-value function represented as a neural network, say $Q_t$. The idea is to update the action-value function and get a new one we can use at the next timestep. We will get this $Q_{t+1}$ using multiple replay steps that each result in an intermediate action-value function $Q_{t+1}^{i}$ where $i$ indexes which replay step we are at.
In each replay step, we sample a batch of experiences from the replay buffer and compute a minibatch Expected-SARSA update. Across these N replay steps, we will use the current "un-updated" action-value network at time $t$, $Q_t$, for computing the action-values of the next-states. This contrasts using the most recent action-values from the last replay step $Q_{t+1}^{i}$. We make this choice to have targets that are stable across replay steps. Here is the pseudocode for performing the updates:
$$
\begin{align}
& Q_t \leftarrow \text{action-value network at timestep t (current action-value network)}\\
& \text{Initialize } Q_{t+1}^1 \leftarrow Q_t\\
& \text{For } i \text{ in } [1, ..., N] \text{ (i.e. N} \text{ replay steps)}:\\
& \hspace{1cm} s, a, r, t, s'
\leftarrow \text{Sample batch of experiences from experience replay buffer} \\
& \hspace{1cm} \text{Do Expected Sarsa update with } Q_t: Q_{t+1}^{i+1}(s, a) \leftarrow Q_{t+1}^{i}(s, a) + \alpha \cdot \left[r + \gamma \left(\sum_{b} \pi(b | s') Q_t(s', b)\right) - Q_{t+1}^{i}(s, a)\right]\\
& \hspace{1.5cm} \text{ making sure to add the } \gamma \left(\sum_{b} \pi(b | s') Q_t(s', b)\right) \text{ for non-terminal transitions only.} \\
& \text{After N replay steps, we set } Q_{t+1}^{N} \text{ as } Q_{t+1} \text{ and have a new } Q_{t+1} \text{for time step } t + 1 \text{ that we will fix in the next set of updates. }
\end{align}
$$
As you can see in the pseudocode, after sampling a batch of experiences, we do many computations. The basic idea however is that we are looking to compute a form of a TD error.
$$ R_{t+1} + \gamma \hat{q}(S_{t+1}, A_{t+1}, w)- \hat{q}(S_t, A_t, w) $$
Recall that the for this problem, the TD Target is given by.
$$ r + \gamma \left(\sum_{b} \pi(b | s') Q_t(s', b)\right) $$
Similarly, the TD estimate is.
$$ Q_{t+1}^{i}(s, a) $$
The Semi-gradient Expected Sarsa update is given below.
$$w \leftarrow w + \alpha[R_{t+1} + \gamma \sum_{a'}\pi(a' | S_{t+1}) \hat{q}(S_{t+1}, a', w) - \hat{q}(S_t, A_t, w)]\nabla \hat{q}(S_t, A_t, w)$$
For further explanation about Episodic semi-gradient control see [Section 10.1 of Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=265z)
```python
# Method to compute the TD Target and TD estimate
def get_td(states, next_states, actions, rewards, discount, terminals, network, current_q, tau):
"""
Args:
states (Numpy array): The batch of states with the shape (batch_size, state_dim).
next_states (Numpy array): The batch of next states with the shape (batch_size, state_dim).
actions (Numpy array): The batch of actions with the shape (batch_size,).
rewards (Numpy array): The batch of rewards with the shape (batch_size,).
discount (float): The discount factor (gamma).
terminals (Numpy array): The batch of terminals with the shape (batch_size,).
network (ActionValueNetwork): The latest state of the network that is getting replay updates.
current_q (ActionValueNetwork): The fixed network used for computing the targets,
and particularly, the action-values at the next-states.
Returns:
target_vec (Tensor array): The TD Target for actions taken, of shape (batch_size,)
estimate_vec (Tensor array): The TD estimate for actions taken, of shape (batch_size,)
"""
# network is the latest state of the network that is getting replay updates. In other words,
# network represents Q_{t+1}^{i} whereas current_q represents Q_t, the fixed network used
# for computing the targets, and particularly, the action-values at the next-states.
# q_next_mat is a 2D Tensor of shape (batch_size, num_actions)
# used to compute the action-values of the next states
# Detach is used to remove this graph from the main graph
q_next_mat = current_q.forward(next_states).detach()
# Compute policy at next state.
# probs_mat is a 2D Tensor of shape (batch_size, num_actions)
probs_mat = softmax(q_next_mat, tau)
# Sum of the action-values for the next_states weighted by the policy, probs_mat.
# (1 - terminals) to make sure v_next_vec is zero for terminal next states.
# v_next_vec is a 1D Tensor of shape (batch_size,)
v_next_vec = torch.zeros((q_next_mat.shape[0]), dtype=torch.float64).detach()
# Sum over rows axis (batches)
v_next_vec = torch.sum(probs_mat * q_next_mat, dim = 1) * (1 - torch.tensor(terminals))
# Compute Expected Sarsa target
# target_vec is a 1D Tensor of shape (batch_size,)
target_vec = torch.tensor(rewards) + (discount * v_next_vec)
# Computing action values at the current states for all actions using network
# q_mat is a 2D Tensor of shape (batch_size, num_actions)
q_mat = network.forward(states)
# Batch Indices is an array from 0 to the batch size - 1.
batch_indices = torch.arange(q_mat.shape[0])
# Compute q_vec by selecting q(s, a) from q_mat for taken actions
# q_vec are the estimates
# q_vec is a 1D Tensor of shape (batch_size)
estimate_vec = q_mat[batch_indices, actions]
return target_vec, estimate_vec
```
## Computing Network's optmizer
One important step is to optimize the network using the TD estimate and the TD target computed previously. As a quick recap, the Mean squared value error is given below.
$$\overline{VE} = \sum_s\mu(s)[V_\pi(s) - \hat{v}(s,w)]^2$$
The idea is to use $\overline{VE}$ as the Loss function to optimize the action-value network. For this particular problem, the MSE implementation provided by Pytorch is used. Additionally, the Adam optimizer is used to optimize the weights of the neural network.
See [Section 9.2 of Reinforment Learning an Introduction](http://www.incompleteideas.net/book/RLbook2018.pdf#page=221)
```python
### Work Required: Yes. Fill in code in optimize_network (~2 Lines).
def optimize_network(experiences, discount, optimizer, network, current_q, tau, criterion):
"""
Args:
experiences (Numpy array): The batch of experiences including the states, actions,
rewards, terminals, and next_states.
discount (float): The discount factor.
network (ActionValueNetwork): The latest state of the network that is getting replay updates.
current_q (ActionValueNetwork): The fixed network used for computing the targets,
and particularly, the action-values at the next-states.
Return:
Loss (float): The loss value for the current batch.
"""
# Get states, action, rewards, terminals, and next_states from experiences
states, actions, rewards, terminals, next_states = map(list, zip(*experiences))
states = np.concatenate(states) # Batch per states
next_states = np.concatenate(next_states) # Batch per states
rewards = np.array(rewards) # Batch size
terminals = np.array(terminals) # Batch size
batch_size = states.shape[0] # Batch size
# Computing TD target and estimate using get_td function
td_target, td_estimate = get_td(states, next_states, actions, rewards, discount, terminals, \
network, current_q, tau)
# zero the gradients buffer
optimizer.zero_grad()
# Compute the Mean squared value error loss
loss = criterion(td_estimate.double().to(device), td_target.to(device))
# Backprop the error
loss.backward()
# Optimize the network
optimizer.step()
return (loss / batch_size).detach().numpy()
```
## Implementing Expected-Sarsa Agent
The final step is to use all the methods implemented above in the Expected-Sarsa Agent.
```python
### Expected Expected-Sarsa Agent
class ExpectedSarsaAgent():
def __init__(self):
self.name = "expected_sarsa_agent"
def agent_init(self, agent_config):
"""Setup for the agent called when the experiment first starts.
Set parameters needed to setup the agent.
Assume agent_config dict contains:
{
network_config: dictionary,
optimizer_config: dictionary,
replay_buffer_size: integer,
minibatch_sz: integer,
num_replay_updates_per_step: float
discount_factor: float,
}
"""
self.replay_buffer = ReplayBuffer(agent_config['replay_buffer_size'],
agent_config['minibatch_sz'])
# Add model to CPU or GPU respectively
self.network = ActionValueNetwork(agent_config['network_config']).to(device)
self.optimizer = optim.Adam(self.network.parameters(), lr = agent_config['optimizer_config']['step_size'],
betas=(agent_config['optimizer_config']['beta_m'], agent_config['optimizer_config']['beta_v']),
eps=agent_config['optimizer_config']['epsilon'])
self.criterion = nn.MSELoss()
self.num_actions = agent_config['network_config']['num_actions']
self.num_replay = agent_config['num_replay_updates_per_step']
self.discount = agent_config['gamma']
self.tau = agent_config['tau']
self.last_state = None
self.last_action = None
self.sum_rewards = 0
self.episode_steps = 0
self.loss = 0
def policy(self, state):
"""
Args:
state (Numpy array): the state.
Returns:
the action.
"""
action_values = self.network.forward(state)
probs_batch = softmax(action_values, self.tau).detach().numpy()
action = np.random.choice(self.num_actions, p=probs_batch.squeeze())
return action
def agent_start(self, state):
"""The first method called when the experiment starts, called after
the environment starts.
Args:
state (Numpy array): the state from the
environment's evn_start function.
Returns:
The first action the agent takes.
"""
self.sum_rewards = 0
self.episode_steps = 0
self.last_state = np.array([state])
self.last_action = self.policy(self.last_state)
return self.last_action
def agent_step(self, reward, state):
"""A step taken by the agent.
Args:
reward (float): the reward received for taking the last action taken
state (Numpy array): the state from the
environment's step based, where the agent ended up after the
last step
Returns:
The action the agent is taking.
"""
# Add current reward to the sum of rewards
self.sum_rewards += reward
self.episode_steps += 1
# Make state an array of shape (1, state_dim) to add a batch dimension and
# to later match the forward() and get_td() functions
state = np.array([state])
# Select action
action = self.policy(state) #change for state for submission, normally, it is self.last_state
# Append new experience to replay buffer
self.replay_buffer.append(self.last_state, self.last_action, reward, 0, state)
# Perform replay steps:
if self.replay_buffer.size() > self.replay_buffer.minibatch_size:
# Make a copy of the current network to obtain stable targets
current_q = deepcopy(self.network)
for _ in range(self.num_replay):
# Get sample experiences from the replay buffer
experiences = self.replay_buffer.sample()
# Call optimize_network to update the weights of the network
self.loss +=optimize_network(experiences, self.discount, self.optimizer, self.network, current_q, self.tau,
self.criterion)
# Update the last state and last action.
self.last_state = state
self.last_action = action
return self.last_action
# update of the weights using optimize_network
def agent_end(self, reward):
"""Run when the agent terminates.
Args:
reward (float): the reward the agent received for entering the
terminal state.
"""
self.sum_rewards += reward
self.episode_steps += 1
# Set terminal state to an array of zeros
state = np.zeros_like(self.last_state)
self.replay_buffer.append(self.last_state, self.last_action, reward, 1, state)
# Perform replay steps:
if self.replay_buffer.size() > self.replay_buffer.minibatch_size:
current_q = deepcopy(self.network)
for _ in range(self.num_replay):
# Get sample experiences from the replay buffer
experiences = self.replay_buffer.sample()
# Call optimize_network to update the weights of the network
self.loss += optimize_network(experiences, self.discount, self.optimizer, self.network, current_q, self.tau,
self.criterion)
def agent_message(self, message):
if message == "get_sum_reward":
return self.sum_rewards, self.episode_steps
else:
raise Exception("Unrecognized Message!")
```
## Running the experiment
The following lines solves the Lunar Lander problem and plot the average reward obtained over episodes, steps taken to solve the challenge at a specific episode and average loss over episodes.
```python
# Test the expected Sarsa Agent
#model = ActionValueNetwork(network_config).to(device)
num_runs = 1
num_episodes = 1000
# Experiment parameters
agent_info = {
'network_config': {
'state_dim': env.observation_space.shape[0],
'num_hidden_units': 256,
'num_actions': env.action_space.n
},
'optimizer_config': {
'step_size': 1e-3,
'beta_m': 0.9,
'beta_v': 0.999,
'epsilon': 1e-8
},
'replay_buffer_size': 50000,
'minibatch_sz': 8,
'num_replay_updates_per_step': 4,
'gamma': 0.99,
'tau': 0.001}
# Variable to store the amount of steps taken to solve the challeng
all_steps = []
# Variable to save the rewards in an episode
all_rewards = []
all_loss = []
# Agent
agent = ExpectedSarsaAgent()
# Environment
env = gym.make('LunarLander-v2')
env.reset()
# Maximum number of possible iterations (default was 200)
env._max_episode_steps = 10000
# Number of runs are the times the experiment will start again (a.k.a episode)
for n_runs in range(num_runs):
# Resets environment
observation = env.reset()
# Reset agent
agent.agent_init(agent_info)
# Generate last state and action in the agent
last_action = agent.agent_start(observation)
# Steps, rewards and loss at each episode to solve the challenge
steps_per_episode = []
rewards_per_episode = []
loss_per_episode = []
# Times the environment will start again without resetting the agent
for t in tqdm(range(num_episodes)):
# Reset done flag
done = False
# Set rewards, steps and loss to zero
rewards = 0
n_steps = 0
agent.loss = 0
# Reset environment
observation = env.reset()
# Run until the experiment is over
while not done:
# Render the environment only after t > # episodes
if t > 300:
env.render()
# Take a step with the environment
observation, reward, done, info = env.step(last_action)
rewards += reward
n_steps += 1
# If the goal has been reached stop
if done:
# Last step with the agent
agent.agent_end(reward)
else:
# Take a step with the agent
last_action = agent.agent_step(reward, observation)
# Append steps taken to solve the episode
steps_per_episode.append(n_steps)
# Reward obtained during the episode
rewards_per_episode.append(rewards)
# Loss obtained solving the experiment
loss_per_episode.append(agent.loss)
# Steps taken to solve the experiment during all
all_steps.append(np.array(steps_per_episode))
# Awards obtained during all episode
all_rewards.append(np.array(rewards_per_episode))
# Loss obtained during all episodes
all_loss.append(loss_per_episode)
env.close()
```
100%|██████████| 1000/1000 [51:03<00:00, 3.06s/it]
```python
steps_average = np.mean(np.array(all_steps), axis=0)
plt.plot(steps_average, label = 'Steps')
plt.xlabel("Episodes")
plt.ylabel("Iterations",rotation=0, labelpad=40)
plt.xlim(-0.2, num_episodes)
plt.ylim(steps_average.min(), steps_average.max())
plt.title("Average iterations to solve the experiment over runs")
plt.legend()
plt.show()
print("The Minimum number of iterations used to solve the experiment were: {0}\n".format(np.array(all_steps).min()))
print("The Maximum number of iterations used to solve the experiment were: {0}\n".format(np.array(all_steps).max()))
```
```python
rewards_average = np.mean(all_rewards, axis=0)
plt.plot(rewards_average, label = 'Average Reward')
plt.xlabel("Episodes")
plt.ylabel("Sum of\n rewards\n during\n episode" ,rotation=0, labelpad=40)
plt.xlim(-0.2, num_episodes)
plt.ylim(rewards_average.min(), rewards_average.max())
plt.title("Average reward to solve the experiment over runs")
plt.legend()
plt.show()
print("The best reward obtained solving the experiment was: {0}\n".format(np.array(all_rewards).max()))
print("The Worst reward obtained solving the experiment was: {0}\n".format(np.array(all_rewards).min()))
```
```python
loss_average = np.mean(np.array(all_loss), axis=0)
plt.plot(loss_average, label = 'Steps')
plt.xlabel("Episodes")
plt.ylabel("Average loss",rotation=0, labelpad=40)
plt.xlim(-0.2, num_episodes)
plt.ylim(loss_average.min(), loss_average.max())
plt.title("Average loss over iterations")
plt.legend()
plt.show()
print("The best loss obtained solving the experiment was: {0}\n".format(np.array(loss_average).min()))
print("The Worst loss obtained solving the experiment was: {0}\n".format(np.array(loss_average).max()))
```
## Using the last trained Agent
This lines shows in a video the performance of the last trained agent and save a video with the results.
```python
# Test Sarsa Agent
num_runs = 1
num_episodes = 1000
# Environment
env_to_wrap = gym.make('LunarLander-v2')
# Maximum number of possible iterations (default was 200)
env_to_wrap._max_episode_steps = 1500
env = Monitor(env_to_wrap, "./videos/lunarLander", video_callable=lambda episode_id: True, force=True)
# Number of runs are the times the experiment will start again (a.k.a episode)
for n_runs in tqdm(range(num_runs)):
# Resets environment
observation = env.reset()
# Generate last state and action in the agent
last_action = agent.agent_start(observation)
# Times the environment will start again without resetting the agent
for t in tqdm(range(num_episodes)):
# View environment
env.render()
# Take a step with the environment
observation, reward, done, info = env.step(last_action)
# If the goal has been reached stop
if done:
# Last step with the agent
agent.agent_end(reward)
break
else:
# Take a step with the agent
last_action = agent.agent_step(reward, observation)
env.close()
env_to_wrap.close()
print("Episode finished after {} timesteps".format(t+1))
```
0%| | 0/1 [00:00<?, ?it/s]
0%| | 0/1000 [00:00<?, ?it/s][A
1%| | 7/1000 [00:00<00:15, 64.45it/s][A
1%|▏ | 14/1000 [00:00<00:14, 65.81it/s][A
2%|▏ | 21/1000 [00:00<00:14, 66.28it/s][A
3%|▎ | 28/1000 [00:00<00:14, 66.48it/s][A
4%|▎ | 35/1000 [00:00<00:14, 67.30it/s][A
4%|▍ | 42/1000 [00:00<00:14, 67.19it/s][A
5%|▌ | 50/1000 [00:00<00:13, 68.34it/s][A
6%|▌ | 57/1000 [00:00<00:13, 68.59it/s][A
6%|▋ | 65/1000 [00:00<00:13, 69.27it/s][A
7%|▋ | 72/1000 [00:01<00:13, 68.31it/s][A
8%|▊ | 79/1000 [00:01<00:13, 68.32it/s][A
9%|▊ | 86/1000 [00:01<00:13, 67.84it/s][A
9%|▉ | 93/1000 [00:01<00:13, 68.45it/s][A
10%|█ | 100/1000 [00:01<00:13, 68.64it/s][A
11%|█ | 107/1000 [00:01<00:12, 68.78it/s][A
11%|█▏ | 114/1000 [00:01<00:12, 68.76it/s][A
12%|█▏ | 121/1000 [00:01<00:12, 68.15it/s][A
13%|█▎ | 128/1000 [00:01<00:12, 68.30it/s][A
14%|█▎ | 135/1000 [00:01<00:12, 68.15it/s][A
14%|█▍ | 142/1000 [00:02<00:12, 68.07it/s][A
15%|█▍ | 149/1000 [00:02<00:12, 68.05it/s][A
16%|█▌ | 156/1000 [00:02<00:12, 68.13it/s][A
16%|█▋ | 163/1000 [00:02<00:12, 68.22it/s][A
17%|█▋ | 171/1000 [00:02<00:11, 69.21it/s][A
18%|█▊ | 179/1000 [00:02<00:11, 69.79it/s][A
19%|█▊ | 186/1000 [00:02<00:11, 69.32it/s][A
19%|█▉ | 194/1000 [00:02<00:11, 70.24it/s][A
20%|██ | 202/1000 [00:02<00:11, 70.40it/s][A
21%|██ | 210/1000 [00:03<00:11, 70.63it/s][A
22%|██▏ | 218/1000 [00:03<00:11, 70.29it/s][A
23%|██▎ | 226/1000 [00:03<00:10, 71.32it/s][A
23%|██▎ | 234/1000 [00:03<00:10, 70.17it/s][A
24%|██▍ | 242/1000 [00:03<00:10, 71.23it/s][A
25%|██▌ | 250/1000 [00:03<00:10, 72.79it/s][A
26%|██▌ | 260/1000 [00:03<00:10, 69.24it/s][A
100%|██████████| 1/1 [00:03<00:00, 3.90s/it]
Episode finished after 261 timesteps
|
(* Title: HOL/SMT.thy
Author: Sascha Boehme, TU Muenchen
Author: Jasmin Blanchette, VU Amsterdam
*)
section \<open>Bindings to Satisfiability Modulo Theories (SMT) solvers based on SMT-LIB 2\<close>
theory SMT
imports Divides
keywords "smt_status" :: diag
begin
subsection \<open>A skolemization tactic and proof method\<close>
lemma choices:
"\<And>Q. \<forall>x. \<exists>y ya. Q x y ya \<Longrightarrow> \<exists>f fa. \<forall>x. Q x (f x) (fa x)"
"\<And>Q. \<forall>x. \<exists>y ya yb. Q x y ya yb \<Longrightarrow> \<exists>f fa fb. \<forall>x. Q x (f x) (fa x) (fb x)"
"\<And>Q. \<forall>x. \<exists>y ya yb yc. Q x y ya yb yc \<Longrightarrow> \<exists>f fa fb fc. \<forall>x. Q x (f x) (fa x) (fb x) (fc x)"
"\<And>Q. \<forall>x. \<exists>y ya yb yc yd. Q x y ya yb yc yd \<Longrightarrow>
\<exists>f fa fb fc fd. \<forall>x. Q x (f x) (fa x) (fb x) (fc x) (fd x)"
"\<And>Q. \<forall>x. \<exists>y ya yb yc yd ye. Q x y ya yb yc yd ye \<Longrightarrow>
\<exists>f fa fb fc fd fe. \<forall>x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)"
"\<And>Q. \<forall>x. \<exists>y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \<Longrightarrow>
\<exists>f fa fb fc fd fe ff. \<forall>x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)"
"\<And>Q. \<forall>x. \<exists>y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \<Longrightarrow>
\<exists>f fa fb fc fd fe ff fg. \<forall>x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)"
by metis+
lemma bchoices:
"\<And>Q. \<forall>x \<in> S. \<exists>y ya. Q x y ya \<Longrightarrow> \<exists>f fa. \<forall>x \<in> S. Q x (f x) (fa x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb. Q x y ya yb \<Longrightarrow> \<exists>f fa fb. \<forall>x \<in> S. Q x (f x) (fa x) (fb x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb yc. Q x y ya yb yc \<Longrightarrow> \<exists>f fa fb fc. \<forall>x \<in> S. Q x (f x) (fa x) (fb x) (fc x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb yc yd. Q x y ya yb yc yd \<Longrightarrow>
\<exists>f fa fb fc fd. \<forall>x \<in> S. Q x (f x) (fa x) (fb x) (fc x) (fd x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb yc yd ye. Q x y ya yb yc yd ye \<Longrightarrow>
\<exists>f fa fb fc fd fe. \<forall>x \<in> S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \<Longrightarrow>
\<exists>f fa fb fc fd fe ff. \<forall>x \<in> S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)"
"\<And>Q. \<forall>x \<in> S. \<exists>y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \<Longrightarrow>
\<exists>f fa fb fc fd fe ff fg. \<forall>x \<in> S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)"
by metis+
ML \<open>
fun moura_tac ctxt =
Atomize_Elim.atomize_elim_tac ctxt THEN'
SELECT_GOAL (Clasimp.auto_tac (ctxt addSIs @{thms choice choices bchoice bchoices}) THEN
ALLGOALS (Metis_Tactic.metis_tac (take 1 ATP_Proof_Reconstruct.partial_type_encs)
ATP_Proof_Reconstruct.default_metis_lam_trans ctxt [] ORELSE'
blast_tac ctxt))
\<close>
method_setup moura = \<open>
Scan.succeed (SIMPLE_METHOD' o moura_tac)
\<close> "solve skolemization goals, especially those arising from Z3 proofs"
hide_fact (open) choices bchoices
subsection \<open>Triggers for quantifier instantiation\<close>
text \<open>
Some SMT solvers support patterns as a quantifier instantiation
heuristics. Patterns may either be positive terms (tagged by "pat")
triggering quantifier instantiations -- when the solver finds a
term matching a positive pattern, it instantiates the corresponding
quantifier accordingly -- or negative terms (tagged by "nopat")
inhibiting quantifier instantiations. A list of patterns
of the same kind is called a multipattern, and all patterns in a
multipattern are considered conjunctively for quantifier instantiation.
A list of multipatterns is called a trigger, and their multipatterns
act disjunctively during quantifier instantiation. Each multipattern
should mention at least all quantified variables of the preceding
quantifier block.
\<close>
typedecl 'a symb_list
consts
Symb_Nil :: "'a symb_list"
Symb_Cons :: "'a \<Rightarrow> 'a symb_list \<Rightarrow> 'a symb_list"
typedecl pattern
consts
pat :: "'a \<Rightarrow> pattern"
nopat :: "'a \<Rightarrow> pattern"
definition trigger :: "pattern symb_list symb_list \<Rightarrow> bool \<Rightarrow> bool" where
"trigger _ P = P"
subsection \<open>Higher-order encoding\<close>
text \<open>
Application is made explicit for constants occurring with varying
numbers of arguments. This is achieved by the introduction of the
following constant.
\<close>
definition fun_app :: "'a \<Rightarrow> 'a" where "fun_app f = f"
text \<open>
Some solvers support a theory of arrays which can be used to encode
higher-order functions. The following set of lemmas specifies the
properties of such (extensional) arrays.
\<close>
lemmas array_rules = ext fun_upd_apply fun_upd_same fun_upd_other fun_upd_upd fun_app_def
subsection \<open>Normalization\<close>
lemma case_bool_if[abs_def]: "case_bool x y P = (if P then x else y)"
by simp
lemmas Ex1_def_raw = Ex1_def[abs_def]
lemmas Ball_def_raw = Ball_def[abs_def]
lemmas Bex_def_raw = Bex_def[abs_def]
lemmas abs_if_raw = abs_if[abs_def]
lemmas min_def_raw = min_def[abs_def]
lemmas max_def_raw = max_def[abs_def]
lemma nat_zero_as_int:
"0 = nat 0"
by simp
lemma nat_one_as_int:
"1 = nat 1"
by simp
lemma nat_numeral_as_int: "numeral = (\<lambda>i. nat (numeral i))" by simp
lemma nat_less_as_int: "(<) = (\<lambda>a b. int a < int b)" by simp
lemma nat_leq_as_int: "(\<le>) = (\<lambda>a b. int a \<le> int b)" by simp
lemma Suc_as_int: "Suc = (\<lambda>a. nat (int a + 1))" by (rule ext) simp
lemma nat_plus_as_int: "(+) = (\<lambda>a b. nat (int a + int b))" by (rule ext)+ simp
lemma nat_minus_as_int: "(-) = (\<lambda>a b. nat (int a - int b))" by (rule ext)+ simp
lemma nat_times_as_int: "(*) = (\<lambda>a b. nat (int a * int b))" by (simp add: nat_mult_distrib)
lemma nat_div_as_int: "(div) = (\<lambda>a b. nat (int a div int b))" by (simp add: nat_div_distrib)
lemma nat_mod_as_int: "(mod) = (\<lambda>a b. nat (int a mod int b))" by (simp add: nat_mod_distrib)
lemma int_Suc: "int (Suc n) = int n + 1" by simp
lemma int_plus: "int (n + m) = int n + int m" by (rule of_nat_add)
lemma int_minus: "int (n - m) = int (nat (int n - int m))" by auto
lemma nat_int_comparison:
fixes a b :: nat
shows "(a = b) = (int a = int b)"
and "(a < b) = (int a < int b)"
and "(a \<le> b) = (int a \<le> int b)"
by simp_all
lemma int_ops:
fixes a b :: nat
shows "int 0 = 0"
and "int 1 = 1"
and "int (numeral n) = numeral n"
and "int (Suc a) = int a + 1"
and "int (a + b) = int a + int b"
and "int (a - b) = (if int a < int b then 0 else int a - int b)"
and "int (a * b) = int a * int b"
and "int (a div b) = int a div int b"
and "int (a mod b) = int a mod int b"
by (auto intro: zdiv_int zmod_int)
lemma int_if:
fixes a b :: nat
shows "int (if P then a else b) = (if P then int a else int b)"
by simp
subsection \<open>Integer division and modulo for Z3\<close>
text \<open>
The following Z3-inspired definitions are overspecified for the case where \<open>l = 0\<close>. This
Schönheitsfehler is corrected in the \<open>div_as_z3div\<close> and \<open>mod_as_z3mod\<close> theorems.
\<close>
definition z3div :: "int \<Rightarrow> int \<Rightarrow> int" where
"z3div k l = (if l \<ge> 0 then k div l else - (k div - l))"
definition z3mod :: "int \<Rightarrow> int \<Rightarrow> int" where
"z3mod k l = k mod (if l \<ge> 0 then l else - l)"
lemma div_as_z3div:
"\<forall>k l. k div l = (if l = 0 then 0 else if l > 0 then z3div k l else z3div (- k) (- l))"
by (simp add: z3div_def)
lemma mod_as_z3mod:
"\<forall>k l. k mod l = (if l = 0 then k else if l > 0 then z3mod k l else - z3mod (- k) (- l))"
by (simp add: z3mod_def)
subsection \<open>Extra theorems for veriT reconstruction\<close>
lemma verit_sko_forall: \<open>(\<forall>x. P x) \<longleftrightarrow> P (SOME x. \<not>P x)\<close>
using someI[of \<open>\<lambda>x. \<not>P x\<close>]
by auto
lemma verit_sko_forall': \<open>P (SOME x. \<not>P x) = A \<Longrightarrow> (\<forall>x. P x) = A\<close>
by (subst verit_sko_forall)
lemma verit_sko_forall_indirect: \<open>x = (SOME x. \<not>P x) \<Longrightarrow> (\<forall>x. P x) \<longleftrightarrow> P x\<close>
using someI[of \<open>\<lambda>x. \<not>P x\<close>]
by auto
lemma verit_sko_ex: \<open>(\<exists>x. P x) \<longleftrightarrow> P (SOME x. P x)\<close>
using someI[of \<open>\<lambda>x. P x\<close>]
by auto
lemma verit_sko_ex': \<open>P (SOME x. P x) = A \<Longrightarrow> (\<exists>x. P x) = A\<close>
by (subst verit_sko_ex)
lemma verit_sko_ex_indirect: \<open>x = (SOME x. P x) \<Longrightarrow> (\<exists>x. P x) \<longleftrightarrow> P x\<close>
using someI[of \<open>\<lambda>x. P x\<close>]
by auto
lemma verit_Pure_trans:
\<open>P \<equiv> Q \<Longrightarrow> Q \<Longrightarrow> P\<close>
by auto
lemma verit_if_cong:
assumes \<open>b \<equiv> c\<close>
and \<open>c \<Longrightarrow> x \<equiv> u\<close>
and \<open>\<not> c \<Longrightarrow> y \<equiv> v\<close>
shows \<open>(if b then x else y) \<equiv> (if c then u else v)\<close>
using assms if_cong[of b c x u] by auto
lemma verit_if_weak_cong':
\<open>b \<equiv> c \<Longrightarrow> (if b then x else y) \<equiv> (if c then x else y)\<close>
by auto
lemma verit_ite_intro_simp:
\<open>(if c then (a :: 'a) = (if c then P else Q') else Q) = (if c then a = P else Q)\<close>
\<open>(if c then R else b = (if c then R' else Q')) =
(if c then R else b = Q')\<close>
\<open>(if c then a' = a' else b' = b')\<close>
by (auto split: if_splits)
lemma verit_or_neg:
\<open>(A \<Longrightarrow> B) \<Longrightarrow> B \<or> \<not>A\<close>
\<open>(\<not>A \<Longrightarrow> B) \<Longrightarrow> B \<or> A\<close>
by auto
lemma verit_subst_bool: \<open>P \<Longrightarrow> f True \<Longrightarrow> f P\<close>
by auto
lemma verit_and_pos:
\<open>(a \<Longrightarrow> \<not>b \<or> A) \<Longrightarrow> \<not>(a \<and> b) \<or> A\<close>
\<open>(a \<Longrightarrow> A) \<Longrightarrow> \<not>a \<or> A\<close>
\<open>(\<not>a \<Longrightarrow> A) \<Longrightarrow> a \<or> A\<close>
by blast+
lemma verit_la_generic:
\<open>(a::int) \<le> x \<or> a = x \<or> a \<ge> x\<close>
by linarith
lemma verit_tmp_bfun_elim:
\<open>(if b then P True else P False) = P b\<close>
by (cases b) auto
lemma verit_eq_true_simplify:
\<open>(P = True) \<equiv> P\<close>
by auto
lemma verit_and_neg:
\<open>B \<or> B' \<Longrightarrow> (A \<and> B) \<or> \<not>A \<or> B'\<close>
\<open>B \<or> B' \<Longrightarrow> (\<not>A \<and> B) \<or> A \<or> B'\<close>
by auto
lemma verit_forall_inst:
\<open>A \<longleftrightarrow> B \<Longrightarrow> \<not>A \<or> B\<close>
\<open>\<not>A \<longleftrightarrow> B \<Longrightarrow> A \<or> B\<close>
\<open>A \<longleftrightarrow> B \<Longrightarrow> \<not>B \<or> A\<close>
\<open>A \<longleftrightarrow> \<not>B \<Longrightarrow> B \<or> A\<close>
\<open>A \<longrightarrow> B \<Longrightarrow> \<not>A \<or> B\<close>
\<open>\<not>A \<longrightarrow> B \<Longrightarrow> A \<or> B\<close>
by blast+
lemma verit_eq_transitive:
\<open>A = B \<Longrightarrow> B = C \<Longrightarrow> A = C\<close>
\<open>A = B \<Longrightarrow> C = B \<Longrightarrow> A = C\<close>
\<open>B = A \<Longrightarrow> B = C \<Longrightarrow> A = C\<close>
\<open>B = A \<Longrightarrow> C = B \<Longrightarrow> A = C\<close>
by auto
subsection \<open>Setup\<close>
ML_file \<open>Tools/SMT/smt_util.ML\<close>
ML_file \<open>Tools/SMT/smt_failure.ML\<close>
ML_file \<open>Tools/SMT/smt_config.ML\<close>
ML_file \<open>Tools/SMT/smt_builtin.ML\<close>
ML_file \<open>Tools/SMT/smt_datatypes.ML\<close>
ML_file \<open>Tools/SMT/smt_normalize.ML\<close>
ML_file \<open>Tools/SMT/smt_translate.ML\<close>
ML_file \<open>Tools/SMT/smtlib.ML\<close>
ML_file \<open>Tools/SMT/smtlib_interface.ML\<close>
ML_file \<open>Tools/SMT/smtlib_proof.ML\<close>
ML_file \<open>Tools/SMT/smtlib_isar.ML\<close>
ML_file \<open>Tools/SMT/z3_proof.ML\<close>
ML_file \<open>Tools/SMT/z3_isar.ML\<close>
ML_file \<open>Tools/SMT/smt_solver.ML\<close>
ML_file \<open>Tools/SMT/cvc4_interface.ML\<close>
ML_file \<open>Tools/SMT/cvc4_proof_parse.ML\<close>
ML_file \<open>Tools/SMT/verit_proof.ML\<close>
ML_file \<open>Tools/SMT/verit_isar.ML\<close>
ML_file \<open>Tools/SMT/verit_proof_parse.ML\<close>
ML_file \<open>Tools/SMT/conj_disj_perm.ML\<close>
ML_file \<open>Tools/SMT/smt_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/smt_replay.ML\<close>
ML_file \<open>Tools/SMT/z3_interface.ML\<close>
ML_file \<open>Tools/SMT/z3_replay_rules.ML\<close>
ML_file \<open>Tools/SMT/z3_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/z3_replay.ML\<close>
ML_file \<open>Tools/SMT/verit_replay_methods.ML\<close>
ML_file \<open>Tools/SMT/verit_replay.ML\<close>
ML_file \<open>Tools/SMT/smt_systems.ML\<close>
method_setup smt = \<open>
Scan.optional Attrib.thms [] >>
(fn thms => fn ctxt =>
METHOD (fn facts => HEADGOAL (SMT_Solver.smt_tac ctxt (thms @ facts))))
\<close> "apply an SMT solver to the current goal"
subsection \<open>Configuration\<close>
text \<open>
The current configuration can be printed by the command
\<open>smt_status\<close>, which shows the values of most options.
\<close>
subsection \<open>General configuration options\<close>
text \<open>
The option \<open>smt_solver\<close> can be used to change the target SMT
solver. The possible values can be obtained from the \<open>smt_status\<close>
command.
\<close>
declare [[smt_solver = z3]]
text \<open>
Since SMT solvers are potentially nonterminating, there is a timeout
(given in seconds) to restrict their runtime.
\<close>
declare [[smt_timeout = 20]]
text \<open>
SMT solvers apply randomized heuristics. In case a problem is not
solvable by an SMT solver, changing the following option might help.
\<close>
declare [[smt_random_seed = 1]]
text \<open>
In general, the binding to SMT solvers runs as an oracle, i.e, the SMT
solvers are fully trusted without additional checks. The following
option can cause the SMT solver to run in proof-producing mode, giving
a checkable certificate. This is currently only implemented for Z3.
\<close>
declare [[smt_oracle = false]]
text \<open>
Each SMT solver provides several commandline options to tweak its
behaviour. They can be passed to the solver by setting the following
options.
\<close>
declare [[cvc3_options = ""]]
declare [[cvc4_options = "--full-saturate-quant --inst-when=full-last-call --inst-no-entail --term-db-mode=relevant --multi-trigger-linear"]]
declare [[verit_options = "--index-fresh-sorts"]]
declare [[z3_options = ""]]
text \<open>
The SMT method provides an inference mechanism to detect simple triggers
in quantified formulas, which might increase the number of problems
solvable by SMT solvers (note: triggers guide quantifier instantiations
in the SMT solver). To turn it on, set the following option.
\<close>
declare [[smt_infer_triggers = false]]
text \<open>
Enable the following option to use built-in support for datatypes,
codatatypes, and records in CVC4. Currently, this is implemented only
in oracle mode.
\<close>
declare [[cvc4_extensions = false]]
text \<open>
Enable the following option to use built-in support for div/mod, datatypes,
and records in Z3. Currently, this is implemented only in oracle mode.
\<close>
declare [[z3_extensions = false]]
subsection \<open>Certificates\<close>
text \<open>
By setting the option \<open>smt_certificates\<close> to the name of a file,
all following applications of an SMT solver a cached in that file.
Any further application of the same SMT solver (using the very same
configuration) re-uses the cached certificate instead of invoking the
solver. An empty string disables caching certificates.
The filename should be given as an explicit path. It is good
practice to use the name of the current theory (with ending
\<open>.certs\<close> instead of \<open>.thy\<close>) as the certificates file.
Certificate files should be used at most once in a certain theory context,
to avoid race conditions with other concurrent accesses.
\<close>
declare [[smt_certificates = ""]]
text \<open>
The option \<open>smt_read_only_certificates\<close> controls whether only
stored certificates are should be used or invocation of an SMT solver
is allowed. When set to \<open>true\<close>, no SMT solver will ever be
invoked and only the existing certificates found in the configured
cache are used; when set to \<open>false\<close> and there is no cached
certificate for some proposition, then the configured SMT solver is
invoked.
\<close>
declare [[smt_read_only_certificates = false]]
subsection \<open>Tracing\<close>
text \<open>
The SMT method, when applied, traces important information. To
make it entirely silent, set the following option to \<open>false\<close>.
\<close>
declare [[smt_verbose = true]]
text \<open>
For tracing the generated problem file given to the SMT solver as
well as the returned result of the solver, the option
\<open>smt_trace\<close> should be set to \<open>true\<close>.
\<close>
declare [[smt_trace = false]]
subsection \<open>Schematic rules for Z3 proof reconstruction\<close>
text \<open>
Several prof rules of Z3 are not very well documented. There are two
lemma groups which can turn failing Z3 proof reconstruction attempts
into succeeding ones: the facts in \<open>z3_rule\<close> are tried prior to
any implemented reconstruction procedure for all uncertain Z3 proof
rules; the facts in \<open>z3_simp\<close> are only fed to invocations of
the simplifier when reconstructing theory-specific proof steps.
\<close>
lemmas [z3_rule] =
refl eq_commute conj_commute disj_commute simp_thms nnf_simps
ring_distribs field_simps times_divide_eq_right times_divide_eq_left
if_True if_False not_not
NO_MATCH_def
lemma [z3_rule]:
"(P \<and> Q) = (\<not> (\<not> P \<or> \<not> Q))"
"(P \<and> Q) = (\<not> (\<not> Q \<or> \<not> P))"
"(\<not> P \<and> Q) = (\<not> (P \<or> \<not> Q))"
"(\<not> P \<and> Q) = (\<not> (\<not> Q \<or> P))"
"(P \<and> \<not> Q) = (\<not> (\<not> P \<or> Q))"
"(P \<and> \<not> Q) = (\<not> (Q \<or> \<not> P))"
"(\<not> P \<and> \<not> Q) = (\<not> (P \<or> Q))"
"(\<not> P \<and> \<not> Q) = (\<not> (Q \<or> P))"
by auto
lemma [z3_rule]:
"(P \<longrightarrow> Q) = (Q \<or> \<not> P)"
"(\<not> P \<longrightarrow> Q) = (P \<or> Q)"
"(\<not> P \<longrightarrow> Q) = (Q \<or> P)"
"(True \<longrightarrow> P) = P"
"(P \<longrightarrow> True) = True"
"(False \<longrightarrow> P) = True"
"(P \<longrightarrow> P) = True"
"(\<not> (A \<longleftrightarrow> \<not> B)) \<longleftrightarrow> (A \<longleftrightarrow> B)"
by auto
lemma [z3_rule]:
"((P = Q) \<longrightarrow> R) = (R \<or> (Q = (\<not> P)))"
by auto
lemma [z3_rule]:
"(\<not> True) = False"
"(\<not> False) = True"
"(x = x) = True"
"(P = True) = P"
"(True = P) = P"
"(P = False) = (\<not> P)"
"(False = P) = (\<not> P)"
"((\<not> P) = P) = False"
"(P = (\<not> P)) = False"
"((\<not> P) = (\<not> Q)) = (P = Q)"
"\<not> (P = (\<not> Q)) = (P = Q)"
"\<not> ((\<not> P) = Q) = (P = Q)"
"(P \<noteq> Q) = (Q = (\<not> P))"
"(P = Q) = ((\<not> P \<or> Q) \<and> (P \<or> \<not> Q))"
"(P \<noteq> Q) = ((\<not> P \<or> \<not> Q) \<and> (P \<or> Q))"
by auto
lemma [z3_rule]:
"(if P then P else \<not> P) = True"
"(if \<not> P then \<not> P else P) = True"
"(if P then True else False) = P"
"(if P then False else True) = (\<not> P)"
"(if P then Q else True) = ((\<not> P) \<or> Q)"
"(if P then Q else True) = (Q \<or> (\<not> P))"
"(if P then Q else \<not> Q) = (P = Q)"
"(if P then Q else \<not> Q) = (Q = P)"
"(if P then \<not> Q else Q) = (P = (\<not> Q))"
"(if P then \<not> Q else Q) = ((\<not> Q) = P)"
"(if \<not> P then x else y) = (if P then y else x)"
"(if P then (if Q then x else y) else x) = (if P \<and> (\<not> Q) then y else x)"
"(if P then (if Q then x else y) else x) = (if (\<not> Q) \<and> P then y else x)"
"(if P then (if Q then x else y) else y) = (if P \<and> Q then x else y)"
"(if P then (if Q then x else y) else y) = (if Q \<and> P then x else y)"
"(if P then x else if P then y else z) = (if P then x else z)"
"(if P then x else if Q then x else y) = (if P \<or> Q then x else y)"
"(if P then x else if Q then x else y) = (if Q \<or> P then x else y)"
"(if P then x = y else x = z) = (x = (if P then y else z))"
"(if P then x = y else y = z) = (y = (if P then x else z))"
"(if P then x = y else z = y) = (y = (if P then x else z))"
by auto
lemma [z3_rule]:
"0 + (x::int) = x"
"x + 0 = x"
"x + x = 2 * x"
"0 * x = 0"
"1 * x = x"
"x + y = y + x"
by (auto simp add: mult_2)
lemma [z3_rule]: (* for def-axiom *)
"P = Q \<or> P \<or> Q"
"P = Q \<or> \<not> P \<or> \<not> Q"
"(\<not> P) = Q \<or> \<not> P \<or> Q"
"(\<not> P) = Q \<or> P \<or> \<not> Q"
"P = (\<not> Q) \<or> \<not> P \<or> Q"
"P = (\<not> Q) \<or> P \<or> \<not> Q"
"P \<noteq> Q \<or> P \<or> \<not> Q"
"P \<noteq> Q \<or> \<not> P \<or> Q"
"P \<noteq> (\<not> Q) \<or> P \<or> Q"
"(\<not> P) \<noteq> Q \<or> P \<or> Q"
"P \<or> Q \<or> P \<noteq> (\<not> Q)"
"P \<or> Q \<or> (\<not> P) \<noteq> Q"
"P \<or> \<not> Q \<or> P \<noteq> Q"
"\<not> P \<or> Q \<or> P \<noteq> Q"
"P \<or> y = (if P then x else y)"
"P \<or> (if P then x else y) = y"
"\<not> P \<or> x = (if P then x else y)"
"\<not> P \<or> (if P then x else y) = x"
"P \<or> R \<or> \<not> (if P then Q else R)"
"\<not> P \<or> Q \<or> \<not> (if P then Q else R)"
"\<not> (if P then Q else R) \<or> \<not> P \<or> Q"
"\<not> (if P then Q else R) \<or> P \<or> R"
"(if P then Q else R) \<or> \<not> P \<or> \<not> Q"
"(if P then Q else R) \<or> P \<or> \<not> R"
"(if P then \<not> Q else R) \<or> \<not> P \<or> Q"
"(if P then Q else \<not> R) \<or> P \<or> R"
by auto
hide_type (open) symb_list pattern
hide_const (open) Symb_Nil Symb_Cons trigger pat nopat fun_app z3div z3mod
end
|
\chapter{Glossary (optional)}
|
(**************************************************************************)
(* Copyright 2010 2011, Thomas Braibant *)
(* *)
(**************************************************************************)
Require Import Common Axioms Base Finite.
Section Lifting.
Context {tech : Techno}.
Context {Data : Type}.
Context {tech_spec : Technology_spec tech Data}.
Context {tech_spec' : Technology_spec tech (stream Data)}.
(** A wf_atom is one that can be lifted (does not depend on time) *)
Definition wf_atom n m (c : techno n m) :=
forall ins outs (H : spec c ins outs) (t : nat),
spec c (time ins t) (time outs t).
(** A wf circuit can be lifted to streams *)
Inductive wf : forall n m (c : circuit n m), Prop :=
| wf_Atom : forall n m t {Hn : Fin n} {Hn : Fin m} ,
wf_atom n m t -> wf n m (Atom t)
| wf_Ser : forall n m p x1 x2, wf n m x1 -> wf m p x2 -> wf n p (x1 |> x2)
| wf_Par : forall n m p q x1 x2, wf n p x1 -> wf m q x2 -> wf (n + m) (p + q)
(x1 & x2)
| wf_Plug : forall n m f {Hn : Fin n} {Hn : Fin m} , wf n m (Plug f).
Lemma lifting n m (x : circuit n m) (Hwf : wf n m x): forall ins outs,
Semantics x ins outs -> forall t, Semantics x (time ins t) (time outs t).
Proof.
induction x; intros; rinvert; inversionK Hwf.
apply inversion_Atom in H; constructor; auto.
apply IHx1 with( t := t) in Hk; clear IHx1; auto.
apply IHx2 with (t := t) in Hk0; clear IHx2;auto.
econstructor; eauto.
apply IHx1 with( t := t) in Hk; clear IHx1;auto.
apply IHx2 with (t := t) in Hk0; clear IHx2;auto.
rewrite Data.time_left in *.
rewrite Data.time_right in *.
constructor; auto.
apply inversion_Plug in H. rewrite H.
rewrite Data.time_lift.
simpl. constructor.
Qed.
End Lifting. |
subroutine table(flag,length,colmn1,colmn2,given,found)
c
c + + + PURPOSE + + +
c
c SR TABLE inserts an additional value into the first
c column of an ordered, paired data set and uses linear
c interpolation to compute the corresponding value of the
c column
c
c Called from: SRS CHNCON, HYDCHN, DCAP, AND OTHERS
c Author(s): CREAMS file, C. Baffaut
c Reference in User Guide:
c
c Version:
c Date recoded:
c Recoded by: Jim Ascough II
c
c + + + KEYWORDS + + +
c
c + + + PARAMETERS + + +
c
c + + + ARGUMENT DECLARATIONS + + +
c
integer length, flag
real colmn1(length), colmn2(length), given, found, mxfound
c
c + + + ARGUMENT DEFINITIONS + + +
c
c colmn1(length) -
c colmn2(length) -
c given -
c found -
c length -
c flag -
c
c + + + COMMON BLOCKS + + +
c
c + + + LOCAL VARIABLES + + +
c
real intrpl, tval
integer i, npos, j, k
c
c + + + LOCAL DEFINITIONS + + +
c
c intrpl -
c tval -
c i -
c npos -
c j -
c k -
c
c + + + SAVES + + +
c
c + + + SUBROUTINES CALLED + + +
c
c intrpl
c
c + + + DATA INITIALIZATIONS + + +
c
c + + + END SPECIFICATIONS + + +
c
c
if (flag.ne.2) then
if (flag.eq.3) go to 60
if (flag.eq.4) go to 80
c
do 10 i = 1, length
npos = i
if (colmn1(i).lt.given) go to 100
10 continue
c
go to 120
end if
c
do 20 i = 1, length
npos = i
c
if (colmn1(i).gt.given) then
if (npos.eq.1) go to 120
mxfound = intrpl(colmn1(npos-1),colmn2(npos-1),colmn1(npos),
1 colmn2(npos),given)
c exit
go to 30
end if
c
20 continue
c
30 do 50 k = 2, length
tval = colmn1(k)
c
do 40 j = npos, length
c
if (colmn1(j)-tval.gt.given) then
found = intrpl(colmn1(j-1),colmn2(j-1),colmn1(j),colmn2(j),
1 given+tval) - colmn2(k)
if (found.gt.mxfound) mxfound = found
npos = j
c exit
go to 50
end if
c
40 continue
c
50 continue
c
found = mxfound
return
c
60 continue
c
do 70 i = 1, length
npos = i
if (colmn2(i).lt.given) go to 110
70 continue
c
go to 120
c
80 continue
c
do 90 i = 1, length
npos = i
if (colmn2(i).gt.given) go to 110
90 continue
c
go to 120
c
100 continue
c
if (npos.eq.1) go to 120
found = intrpl(colmn1(npos-1),colmn2(npos-1),colmn1(npos),
1 colmn2(npos),given)
return
c
110 continue
c
if (npos.ne.1) then
found = intrpl(colmn2(npos-1),colmn1(npos-1),colmn2(npos),
1 colmn1(npos),given)
return
end if
c
120 continue
c
write (38,1000) flag, given, colmn1(3), colmn2(3)
write (6,1000) flag, given, colmn1(3), colmn2(3)
c
stop
1000 format (' ',/,12x,'**********ERROR**********',/,12x,
1 'GIVEN is outside the range of the table',//,1x,
1 'use this info to identify the function where',
1 ' the problem occured',//,4x,
1 'flag: 1 - given column 1 find column 2 ',
1 '(column 1 decreases)',/,4x,
1 ' 2 - given column 1 find column 2 ',
1 '(column 1 increases)',//,4x,
1 ' 3 - given column 2 find column 1 ',
1 '(column 2 decreases)',/,4x,
1 ' 4 - given column 2 find column 1 ',
1 '(column 2 increases)',//,12x,'the flag = ',
1 i2,/,12x,'given value = ',e10.3,/,12x,
1 'third value from column 1 = ',e10.3,/,12x,
1 'third value from column 2 = ',e10.3)
end
|
\chapter{Tool demonstration for lifetime-long depletion: Transatomic Power MSR}
This chapter presents a validation demonstration applying SaltProc v1.0 to
the \gls{TAP} \gls{MSR}. The \gls{TAP} concept was selected because it is well
analyzed in the literature \cite{betzler_two-dimensional_2017,
betzler_assessment_2017-1} making code-to-code verification with
ChemTriton/SCALE possible \cite{betzler_assessment_2017-1}. This chapter
presents the \gls{TAP} \gls{MSR} core lifetime-long (25 years) depletion
simulation with moderate time resolution (3-day depletion step) and a
constant, 100\% power level. The results obtained with SaltProc v1.0 are
compared with full-core \gls{TAP} depletion analysis by Betzler \emph{et al.}
\cite{betzler_assessment_2017-1} with assumed ideal removal efficiency (100\%
of the target isotope is removed). This validation effort showed that the
SaltProc v1.0 solution matches the case with \emph{ideal extraction
efficiency}.
Finally, this chapter presents a lifetime-long fuel salt depletion simulation
for the case with \emph{a realistic, physics-based} mathematical model for
noble gas removal efficiency, which provides fuel isotopic composition
evolution during 25 years of the \gls{TAP} \gls{MSR} operation. Additionally,
this chapter presents safety and operational parameters evolution during
operation. Detailed
insights about fuel salt composition and neutron spectrum dynamics obtained
herein will be used in the following chapters to investigate \gls{TAP}
reactor poisoning during load-following.
\section{Transatomic Power MSR design description}\label{sec:tap_design_sum}
The \gls{TAP} concept is a 1250 MW$_{th}$ \gls{MSR} with a LiF-based uranium
fuel salt \cite{transatomic_power_corporation_technical_2016}. This concept
uses configurable zirconium hydride rods as the moderator, while most
\gls{MSR} designs typically propose high-density reactor graphite. Zirconium
hydride offers a much higher neutron moderating density than graphite, so a
much smaller volume of zirconium hydride is needed to achieve a thermal energy
spectrum similar to one obtained with a graphite moderator. Moreover,
zirconium hydride clad in a corrosion-resistant material has a much longer
lifespan in extreme operational conditions (e.g., high temperature, large
neutron flux, chemically aggressive salt) than reactor graphite
\cite{transatomic_power_corporation_lost_2018}. Finally, zirconium hydride is
a nonporous material and holds up fewer neutron poisons (e.g., xenon, krypton)
than does high-density reactor graphite.
In this section, the design characteristics and reprocessing plant design are
based on information presented in the TAP white papers
\cite{transatomic_power_corporation_technical_2016,
transatomic_power_corporation_neutronics_2016} and \gls{ORNL} technical
reports \cite{betzler_two-dimensional_2017, betzler_assessment_2017-1}.
\subsection{General design description}
Figure~\ref{fig:tap-rendering} renders of the primary and secondary loop of
the \gls{TAP} \gls{MSR} seated inside a concrete nuclear island.
Figure~\ref{fig:tap-primary-scheme} shows the schematic design of a 520
MW$_{e}$, 2-loop nuclear reactor system with an intermediate salt loop.
\begin{figure}[h] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/tap_render.jpg}
\caption{Rendering of the \gls{TAP} \gls{MSR}. The fission happens in the
fuel salt inside the reactor vessel (1). The heat generated by a
self-sustaining nuclear fission reaction would be transferred to the
secondary salt by heat exchangers (2), which would boil water in the
steam generator (3). Valves made of salt with a higher melting point
(4) would melt in case of emergency, allowing the salt to drain into a
drain tank (5), which can passively dissipate decay heat
(reproduced from
\cite{strickland_transatomic_2014}, illustration by Emily Cooper).}
\label{fig:tap-rendering}
\end{figure}
\begin{figure}[h] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/tap_simplified_scheme.png}
\caption{Simplified schematic of the \gls{TAP} \gls{MSR} primary and
secondary loops (reproduced from the Transatomic Power Technical White
Paper \cite{transatomic_power_corporation_technical_2016}). Figure
legend:
A) reactor vessel, B) fuel salt pumps, C) primary heat exchangers, D)
freeze plug, E) primary loop drain tank, F) secondary loop salt pump,
G)
steam generator, H) secondary loop drain tank, I) fuel catch basin.}
\label{fig:tap-primary-scheme}
\end{figure}
The \gls{TAP} core design (Figure~\ref{fig:tap-side-view}) is very similar to
the
original \gls{MSRE} design developed by \gls{ORNL}
\cite{haubenreich_experience_1970} but has two significant innovations:
the fuel salt composition and the moderator. The \gls{MSRE}'s
LiF-BeF$_2$-ZrF$_4$-UF$_4$ salt has been substituted with LiF-UF$_4$ salt,
which allows for an increase in the uranium concentration within the fuel salt
from 0.9 to 27.5\% while maintaining a relatively low melting point
(490$^{\circ}$C compared with 434$^{\circ}$C for the original \gls{MSRE}'s
salt) \cite{betzler_two-dimensional_2017}. The graphite has an extensive
thermal scattering cross section which makes it a perfect moderator but has
a few major drawbacks:
\begin{enumerate}[label=(\alph*), noitemsep, topsep=0pt]
\item low lethargy gain per collision requires a large volume of a
moderator to be present to reach criticality, which leads to a larger core
and obstructs the core power density;
\item even special reactor-grade graphite has relatively high porosity;
thus, it holds gaseous \glspl{FP} (e.g., tritium, xenon) in pores;
\item reactor graphite lifespan in a commercial reactor is
approximately ten years \cite{robertson_conceptual_1971}.
\end{enumerate}
As previously mentioned, to resolve these issues, the \gls{TAP} concept uses
zirconium hydride instead of graphite, allowing for a more compact core and a
significant increase in power density. These two innovative design choices,
together with a configurable moderator (the moderator-to-fuel ratio can be
changed during operation), facilitate use of commercially available 5\%
enriched \gls{LEU} fuel cycle.
\begin{figure}[h] % replace 't' with 'b' to
\hspace{+2.2in}
\includegraphics[width=0.65\textwidth]{ch4/tap_front_view.png}
\caption{The \gls{TAP} \gls{MSR} schematic view showing moveable moderator
rod bundles and shutdown rod (reproduced from Transatomic Power
White Paper \cite{transatomic_power_corporation_technical_2016}).}
\label{fig:tap-side-view}
\end{figure}
The \gls{TAP} \gls{MSR} primary loop contains the reactor core volume
(including the zirconium hydride moderator rods with silicon carbide
cladding), pumps, pipes, and primary heat exchangers. Pumps circulate the
LiF-(Act)F$_4$ fuel salt through the primary loop. The pumps, vessels, tanks,
and piping are made of a nickel-based alloy (similar to Hastelloy-N\footnote{
Hastelloy-N is very common in \gls{MSR} designs now, but was developed at
\gls{ORNL} in the \gls{MSRE} program that started in the 1950s.}), which
is highly resistant to corrosion in various molten salt environments. Inside
the reactor vessel, near the zirconium hydride moderator rods, the fuel salt
is in a critical configuration and generates heat. Table~\ref{tab:tap_tab}
contains details of the \gls{TAP} system design, which are taken from a
technical white paper \cite{transatomic_power_corporation_technical_2016} and
a neutronics overview \cite{transatomic_power_corporation_neutronics_2016} as
well as an \gls{ORNL} analysis of the \gls{TAP} design
\cite{betzler_two-dimensional_2017, betzler_assessment_2017-1}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htp!]
\caption{Summary of principal data for the \gls{TAP} \gls{MSR}
(reproduced from \cite{betzler_assessment_2017-1,
transatomic_power_corporation_technical_2016}). }
\centering
\begin{tabularx}{0.8\textwidth}{L R}
\hline
Thermal power & 1250 MW$_{th} $ \\
Electric power & 520 MW$_e $ \\
Gross thermal efficiency& 44\% \\
Outlet temperature & 620$^{\circ}$C \\
Fuel salt components & LiF-UF$_4$ \\
Fuel salt composition & 72.5-27.5 mole\% \\
Uranium enrichment & 5\% $^{235}$U \\
Moderator & Zirconium hydride (ZrH$_{1.66}$) rods \\
& (with silicon carbide cladding) \\
Neutron spectrum & epithermal \\
\hline
\end{tabularx}
\label{tab:tap_tab}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Reactor core design}
In the \gls{TAP} core (Figure~\ref{fig:tap-core-ben}), fuel salt flows around
moderator assemblies consisting of lattices of zirconium hydride rods clad in
a corrosion-resistant silicone carbide. The \gls{TAP} reactor pressure vessel
is a cylinder made of a nickel-based alloy with an inner radius of 150 cm, a
height of 350 cm, and a wall thickness of 5 cm.
\begin{figure}[t] % replace 't' with 'b' to
\includegraphics[width=\textwidth]{ch4/tap_core_ornl.png}
\vspace{-0.1in}
\caption{The \gls{TAP} \gls{MSR} schematic core view showing moderator
rods (reproduced from ORNL/TM-2017/475
\cite{betzler_assessment_2017-1}).}
\label{fig:tap-core-ben}
\end{figure}
The \gls{SVF} in the core is a parameter similar to the widely-used
moderator-to-fuel ratio and can be defined as:
\begin{align}
SVF &= \frac{V_F}{V_F+V_M} = \frac{1}{1+V_M/V_F}
\intertext{where}
V_F &= \mbox{fuel volume $[m^3]$} \nonumber \\
V_M &= \mbox{moderator volume $[m^3]$} \nonumber \\
V_M/V_F &= \mbox{moderator-to-fuel salt ratio $[-]$.} \nonumber
\end{align}
Figure~\ref{fig:svf-predetermined} shows the \gls{SVF} variation during
operation that shifts the reactor neutron energy spectrum from intermediate to
thermal to maximize fuel burnup. At the \gls{BOL}, a high \gls{SVF} results in
a relatively hard spectrum and enhances fertile material ($^{238}$U)
conversion into the fissile material ($^{239}$Pu) when the startup fissile
material ($^{235}$U) inventory is still large. As fissile concentration in the
fuel salt declines, additional moderator rods are introduced to maintain
criticality, leading to salt volume fraction decrease (see
Figure~\ref{fig:svf-predetermined}).
The initial \gls{TAP} concept suggested varying the \gls{SVF} by inserting
fixed-sized moderator rods via the bottom of the reactor vessel (for safety
considerations), similar to moving the control rods in a \gls{BWR}, as shown
in Figure~\ref{fig:tap-side-view}
\cite{transatomic_power_corporation_neutronics_2016}. The later \gls{TAP}
concept proposes reducing the \gls{SVF} by reconfiguring the moderator rods
during the regular shutdown for reactor maintenance
\cite{betzler_assessment_2017-1}. For the \gls{TAP} reactor, \gls{EOL} occurs
when the maximum number of moderator rods is inserted into the core and a
further injection of fresh fuel salt does not alter criticality. Unmoderated
salt flows in the annulus between the core and the vessel wall to reduce fast
neutron fluence at the vessel structural material.
\begin{figure}[t] % replace 't' with 'b' to
\includegraphics[width=\textwidth]{ch4/svf_predetermined.png}
\caption{The change in SVF as a function of burnup in the \gls{TAP}
reactor (reproduced from Transatomic Power Neutronics Overview
\cite{transatomic_power_corporation_neutronics_2016}).}
\label{fig:svf-predetermined}
\end{figure}
\subsection{Fuel salt reprocessing system}
The \gls{TAP} nuclear system contains a fission product removal system.
Gaseous \glspl{FP} are continuously removed using an off-gas system, while
liquid and solid \glspl{FP} are extracted via a chemical processing system. As
these byproducts are gradually removed, a small quantity of fresh fuel salt is
regularly added to the primary loop. This process conserves a constant fuel
salt mass and keeps the reactor critical. In contrast with the \gls{MSBR}
reprocessing system, the \gls{TAP} design does not need a protactinium
separation and isolation system because it operates in a uranium-based
single-stage fuel cycle. The authors of the \gls{TAP} concept suggested three
distinct fission product removal methods
\cite{transatomic_power_corporation_neutronics_2016}:
\paragraph*{Off-Gas System:} The off-gas system removes gaseous fission
products such as krypton and xenon, which are then compressed and temporarily
stored until they have decayed to the background radiation level. Trace
amounts of tritium are also removed and bottled in a liquid form via the same
process. Also, the off-gas system directly removes a small fraction of the
noble metals.
\paragraph*{Metal Plate-Out/Filtration:} A nickel mesh filter removes noble
and semi-noble metal solid fission products as they plate out onto the
internal surface of the filter.
\paragraph*{Liquid Metal Extraction:} Lanthanides and other non-noble metals
stay dissolved in the fuel salt. They generally have a lower capture cross
section and thus absorb fewer neutrons than $^{135}$Xe, but their extraction
is essential to ensure normal operation. In the \gls{TAP} reactor, lanthanide
removal is accomplished via a liquid-metal/molten salt extraction process
similar to that developed for the \gls{MSBR} by \gls{ORNL}
\cite{robertson_conceptual_1971}. This process converts the dissolved
lanthanides into a well-understood oxide waste form, similar to that of
\gls{LWR} \gls{SNF}. This oxide waste exits the \gls{TAP} reprocessing
plant in ceramic granules which can be sintered into another convenient form
for storage \cite{transatomic_power_corporation_technical_2016}.\\
Figure~\ref{fig:tap-reproc} shows the principal design of the \gls{TAP}
primary loop, including an off-gas system, nickel mesh filter, and lanthanide
chemical extraction facility. As in the \gls{MSBR}, the \gls{TAP} off-gas
system is based on helium sparging through the fuel salt with consequent gas
bubbles removed before returning the fuel salt to the core (see
Section~\ref{sec:gas-separ}).
Nevertheless, one crucial difference must be noted: the \gls{MSBR} gas
separation system suggested helium injection and subsequent transport of the
voids throughout the primary loop, including the core for at least ten full
loops \cite{robertson_conceptual_1971}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/tap_primary_loop.png}
\caption{Simplified \gls{TAP} primary loop design including off-gas system
(blue), nickel filter (orange), and liquid metal extraction system
(green) (reproduced from \cite{transatomic_power_transatomic_2019}).}
\label{fig:tap-reproc}
\end{figure}
In the \gls{TAP} design the introduction of the void (helium bubbles) during
operation is a significant concern for safe, stable operation because the
increase of void fraction in the fuel salt when it enters back to the core
could cause unpredictable reactivity change. Kedl stated without explanation,
``Average loop void fractions as high as 1\% are undesirable... it is
desirable to keep the average loop void fraction well below
1\%.''\cite{robertson_conceptual_1971} The \gls{MSBR} design targeted a 0.2\%
average void in the fuel salt \cite{robertson_conceptual_1971} and the
\gls{MSRE} successfully operated with an average void fraction of about 0.7\%
\cite{compere_fission_1975}. The \gls{TAP} design reduces void fraction in the
fuel salt to negligible levels by using an effective gas separator for
stripping helium/xenon bubbles before returning the salt to a primary loop
(Figure~\ref{fig:tap-reproc}, blue block).
Noble and semi-noble metal solid fission products tend to plate out onto metal
surfaces, including piping, heat exchanger tubes, reactor vessel inner
surface, etc. Previous research by \gls{ORNL} \cite{robertson_conceptual_1971}
reported that about 50\% of noble and semi-noble metals would plate out inside
\gls{MSBR} systems (including the off-gas system) without any special
treatment. To improve the extraction efficiency of these fission products, the
\gls{TAP} concept suggested employing a nickel mesh filter located in a bypass
stream in the primary loop (Figure~\ref{fig:tap-reproc}, orange block). The
main idea of this filter is to create a large nickel surface area using porous
metal (e.g., Inconel fibers). The fuel salt flows throughout the filter
and noble metals plate out on the filtering material.
This Liquid Metal Extraction process for the \gls{TAP} concept has been
adopted from the \gls{MSRE}. The \gls{MSRE} demonstrated a liquid-liquid
extraction process for removing rare earths and lanthanides from fuel salt and
estimated efficiency of this process. In fact, due to similarities in
reprocessing schemes, the \gls{TAP} project reported almost the same set of
elements for removal and similar effective cycle times\footnote{The \gls{MSBR}
program defined ``cycle time" as the time required to remove 100\% of atoms of
a target nuclide from a
fuel salt \cite{robertson_conceptual_1971}.
} as
suggested for the \gls{MSBR} (Table~\ref{tab:reprocessing_list}). The
\gls{TAP} neutronics white paper specifies additional low-probability fission
products and gases that should be removed during operation
\cite{transatomic_power_corporation_neutronics_2016}. These elements are
categorized into the previously defined processing groups, but the removal
rates of most of these elements (except hydrogen) are meager.
Details of gas removal and fuel reprocessing systems have historically
been conceptual. Accordingly, liquid-fueled system design, including the
\gls{TAP} concept, usually assumes ideal (rather than realistically
constrained) removal efficiencies for reactor performance simulations. In this
thesis, I developed a realistic online reprocessing system and reactor model
to capture the dynamics of fuel composition evolution during reactor
operation. Gas removal efficiency is variable in that model, described using
mathematical correlations from Chapter 2 (see Equation~\ref{eq:gas_eff}). For
the other \glspl{FP}, a fixed\footnote{Published information about dynamics of
extraction efficiency during reactor operation for noble-, semi-noble metals,
and rare earths is insufficient to inform a variable removal efficiency.},
non-ideal extraction efficiency based on cycle time from
Table~\ref{tab:reprocessing_list} was used to inform the fuel reprocessing
model.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htp!]
\centering
\caption{The effective cycle times for fission product removal from the
\gls{TAP} reactor (reproduced from \cite{betzler_implementation_2017}
and
\cite{transatomic_power_corporation_neutronics_2016}).}
\begin{tabular}{p{0.2\textwidth} p{0.42\textwidth} p{0.12\textwidth}
p{0.14\textwidth}}
\hline
%\begin{tabularx}{\linewidth}{l X} \toprule
\textbf{Processing group} & \qquad\qquad\qquad \textbf{Nuclides} &
\textbf{Removal rate (s$^{-1}$)} & \textbf{Cycle time (at full power)}
\\ [5pt] \hline
\multicolumn{3}{c}{\textit{Elements removed in the \gls{MSBR} concept
and adopted for the \gls{TAP}} \cite{robertson_conceptual_1971}} \\
Volatile gases & Xe, Kr & 5.00E-2 & 20
sec \\ [5pt]
Noble metals & Se, Nb, Mo, Tc, Ru, Rh, Pd, Ag, Sb, Te & 5.00E-2 & 20
sec \\ [5pt]
Semi-noble metals & Zr, Cd, In, Sn & 5.79E-8 & 200
days \\ [5pt]
Volatile fluorides & Br, I & 1.93E-7 & 60
days \\ [5pt]
Rare earths & Y, La, Ce, Pr, Nd, Pm, Sm, Gd & 2.31E-7 & 50
days \\ [5pt]
\qquad & Eu & 2.32E-8 & 500 days \\ [5pt]
Discard & Rb, Sr, Cs, Ba & 3.37E-9 & 3435 days \\ [5pt]
\hline
\multicolumn{3}{c}{\textit{Additional elements removed}
\cite{transatomic_power_corporation_neutronics_2016,
betzler_implementation_2017} } \\
Volatile gases & H & 5.00E-2 & 20
sec \\ [5pt]
Noble metals & Ti, V, Cr, Cu & 3.37E-9 & 3435
days \\ [5pt]
Semi-noble metals & Mn, Fe, Co, Ni, Zn, Ga, Ge, As & 3.37E-9 & 3435
days \\ [5pt]
Rare earths & Sc & 3.37E-9 & 3435
days \\ [5pt]
Discard & Ca & 3.37E-9 & 3435
days \\ [5pt]
\hline
\end{tabular}
\label{tab:reprocessing_list}
\vspace{-0.9em}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{TAP system model} \label{sec:tap_model}
In this section, the \gls{TAP} core and fuel salt reprocessing system models
for demonstrating SaltProc v1.0 are described in detail. I used these models
for SaltProc demonstration and validation in the current and following
chapters.
\subsection{Serpent 2 full-core model}
Nested and lattice geometry types, as well as transformation capabilities of
Serpent \cite{leppanen_serpent_2014}, are employed to represent the \gls{TAP}
core. Figure~\ref{fig:tap-serpent-plan} shows the $XY$ section of the
whole-core model at the expected reactor operational level when all control
rods are fully withdrawn. Figures~\ref{fig:tap-serpent-elev} and
\ref{fig:tap-serpent-elev-zoom} show a longitudinal section of the
reactor. This model contains the moderator rods with their silicon carbide
cladding, the pressure vessel, and the inlet and outlet plena
(Table~\ref{tab:tap_model_param}).
Fuel salt flows around square moderator assemblies consisting of lattices
of small-diameter zirconium hydride rods in a corrosion-resistant material.
The salt volume fraction for Figure~\ref{fig:tap-serpent-plan} is 0.917204,
which means the modeled core is under-moderated and has an intermediate
neutron spectrum. Quarter-core configurations of the \gls{TAP} core with
various salt volume fractions, used in the current work to maintain
criticality for a reasonable operational period ($>20$ years), are listed in
Table~\ref{tab:tap_adjustable_core}, Figures~\ref{fig:tap-406-681}, and
\ref{fig:tap-840-1668} in Appendix~A.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.75\textwidth]{ch4/tap_plan_view_serpent_347.png}
\caption{An $XY$ section of the \gls{TAP} model at horizontal midplane
with fully withdrawn control rods at \gls{BOL} (347 moderator rods,
salt volume fraction 0.917204) \cite{chaube_tap_2019,
rykhlevskii_milestone_2019}.}
\label{fig:tap-serpent-plan}
\end{figure}
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.6\textwidth]{ch4/tap_elev_view_serpent_347.png}
\caption{45$^{\circ}$ $XZ$ section of the \gls{TAP} core model
\cite{chaube_tap_2019, rykhlevskii_milestone_2019}.}
\label{fig:tap-serpent-elev}
\end{figure}
To represent the reactivity control system, the model has:
\begin{enumerate}[label=(\alph*), noitemsep, topsep=0pt]
\item control rod guide tubes made of nickel-based alloy;
\item control rods represented as boron carbide (B$_4$C) cylinders
with a thin Hastelloy-N coating;
\item air inside guide tubes and control rods.
\end{enumerate}
The control rods must be able to suppress excess reactivity at the \gls{BOL}
when the core configuration is the most reactive, and the neutron spectrum is
the hardest. The control rod design shown on
Figures~\ref{fig:tap-serpent-plan}, \ref{fig:tap-serpent-elev}, and
\ref{fig:tap-serpent-elev-zoom} is comprised of a cluster of 25 rods that
provide a total reactivity worth of $3922\pm10$ $pcm$ at the \gls{BOL}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.55\textwidth]{ch4/tap_elev_view_zoomed_serpent.png}
\caption{Zoomed $XZ$ section of the top of the moderator and control rods
in the \gls{TAP} model.}
\label{fig:tap-serpent-elev-zoom}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\caption{Geometric parameters for the full-core 3D model of the
\gls{TAP} (reproduced from Betzler \emph{et al.}
\cite{betzler_assessment_2017-1}). }
\centering
\begin{tabularx}{0.9\textwidth}{s s x p{0.14\textwidth}}
\hline
\textbf{Component}&\textbf{Parameter}&\textbf{Value}& \textbf{Unit}
\\ \hline
\multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}Moderator\\
rod\end{tabular}}
& Cladding thickness & 0.10 & cm
\\
& Radius & 1.15 & cm
\\
& Length & 3.0 & m
\\
& Pitch & 3.0 & cm \\
\hline
\multirow{2}{*}{\begin{tabular}[c]{@{}l@{}}Moderator\\
assembly\end{tabular}}
& Array & 5 $\times$ 5 &
rods$\times$rods \\
& Pitch & 15.0 & cm
\\ \hline
\multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}Core\end{tabular}}
& Assemblies & 268 & assemblies/core
\\
& Inner radius & 1.5 &
m \\
& Plenum height & 25.0 & cm
\\
& Vessel wall thickness & 5.0 &
cm \\ \hline
\end{tabularx}
\label{tab:tap_model_param}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The control rod cluster is modeled using the \textbf{TRANS} Serpent 2 feature,
which allows the user to change the control rod position during the
simulation easily. The current work assumed that all control rods are fully
withdrawn from the core (Figure~\ref{fig:tap-serpent-elev-zoom}), but the user
can use reactivity control capabilities in SaltProc v1.0 to change control
rod position during operation. In this dissertation, all figures of the core
were generated using the built-in Serpent plotter.
The neutron population per cycle and the number of active/inactive cycles were
chosen to obtain a balance between minimizing uncertainty for a transport
problem (28 pcm for $k_{eff}$) and simultaneously minimizing computational
time.
\subsection{Model of the fuel reprocessing system}\label{sec:tap-online-model}
I thoroughly analyzed the original \gls{TAP} reprocessing system design
(Figure~\ref{fig:tap-reproc}) and neutron poison removal rates
(Table~\ref{tab:reprocessing_list}) to determine a suitable reprocessing
scheme for the SaltProc v1.0 demonstration
(Figure~\ref{fig:demo-repro-scheme}). This chapter presents two demonstration
cases: with ideal (Section~\ref{sec:ben-valid}) and realistic, non-ideal
(Section~\ref{sec:long-term-real}) gas removal efficiency. Realistic noble gas
removal efficiency is based on the physical model for noble gas extraction
efficiency discussed in Section~\ref{sec:gas-separ}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/tap_saltproc_var_eps.png}
\caption{\gls{TAP} reprocessing scheme flowchart used for the
demonstration of SaltProc v1.0.}
\label{fig:demo-repro-scheme}
\end{figure}
Arrows on Figure~\ref{fig:demo-repro-scheme} represent material flows,
percents
represent a fraction of total mass flow rates; ellipses represent fuel
reprocessing system components; boxes represent waste streams; the diamond
shows refuel material flow (UF$_4$, 5 wt\% of $^{235}$U). The efficiency
of gas migration to helium bubbles ($\epsilon_m$) and efficiency of
gas bubbles separation from the salt ($\epsilon_{es}$) are different for
various demonstration cases and discussed in more detail in
Sections~\ref{sec:ben-valid} and \ref{sec:long-term-real}. Efficiency of
noble metal extraction in the nickel filter
(Figure~\ref{fig:tap-primary-scheme}, orange block) and semi-noble metals/rare
earths (RE) in the liquid metal extraction system
(Figure~\ref{fig:tap-primary-scheme}, green block) is assumed fixed and equal
100\% and 57\%, respectively.
The gas removal components (sparger/contactor and entrainment separator) are
located in-line because the estimated full loop time\footnote{Full loop time
is the time taken by a particle of the coolant to make one full circle in the
primary loop.} for the fuel salt is about 20 seconds and approximately equal
to the cycle time (Table~\ref{tab:reprocessing_list}). To extract volatile
gases every 20 seconds, the gas removal system must operate with 100\% of the
core throughout flow rate (in-line gas removal system). In this chapter, the
efficiency of noble gas migration to helium bubbles and the efficiency of
bubble removal from the salt by the entrainment separator
($\epsilon_m,\epsilon_{es}$ on Figure~\ref{fig:demo-repro-scheme},
respectively) are selected separately for each demonstration case.
The nickel filter in the \gls{TAP} concept is designed to extract
noble/semi-noble metals and volatile fluorides
(Table~\ref{tab:reprocessing_list}). Similar to volatile gases,
noble metals must be removed every 20 seconds and, hence, the filter should
operate at 100\% of the flow rate through the core. The nickel filter removes
a wide range of elements with various effective cycle time
(Table~\ref{tab:reprocessing_list}).
Lanthanides and other non-noble metals have a lower capture cross section than
gases and noble metals. These elements can be removed via a
liquid-metal/molten salt extraction process with relatively low removal rates
(cycle time $>50$ days). This is accomplished by directing a small fraction of
the salt mass flow leaving the nickel mesh filter (10\% of the flow rate
throughout the core) to the liquid-metal/molten salt component of the
reprocessing system, in which lanthanides are removed with a specific
extraction efficiency to match the required cycle time
(Table~\ref{tab:reprocessing_list}). The remaining 90\% of the salt mass flow
is directed from the nickel filter to the heat exchangers without performing
any fuel salt treatment.
The removal rates vary among nuclides in this reactor concept, which dictate
the necessary resolution of depletion calculations. To compromise, a 3-day
depletion time step was selected for the long-term demonstration case based on
a time step refinement study by Betzler \emph{et al.}
\cite{betzler_assessment_2017-1} A complimentary time step refinement study is
presented in Section~\ref{sec:time-refinement} to determine the impact of
temporal resolution on the depleted composition calculation.
\section{Long-term depletion demonstration and validation}\label{sec:long-term}
\subsection{Constant, ideal extraction efficiency case}\label{sec:ben-valid}
To validate SaltProc v1.0, I performed a lifetime-long depletion calculation
with ideal extraction efficiency. This case was selected to repeat fuel salt
depletion as close as possible to the ChemTriton simulation for the full-core
\gls{TAP} reactor by Betzler \emph{et al.} \cite{betzler_assessment_2017-1}
Betzler \emph{et al.} made the following assumptions and approximations in
their work \cite{betzler_assessment_2017-1}:
\begin{enumerate}[label=(\alph*), noitemsep, topsep=0pt]
\item Effective cycle times as prescribed by the Transatomic Power
Technical White Paper \cite{transatomic_power_corporation_technical_2016}
(Table~\ref{tab:reprocessing_list}) with \textbf{100\% noble
gas removal efficiency}; hence, $\epsilon_{es}$ and $\epsilon_m$ in the
reprocessing model (Figure~\ref{fig:demo-repro-scheme}) are both set to
1.0.
\item 5\% \gls{LEU} feed rate is equal to the rate of fission product
removal.
\item 3-day depletion step.
\item Quarter-core, 3-D model with vacuum boundary conditions.
\item Delayed neutron precursor drift was neglected.
\end{enumerate}
I adopted these assumptions for code-to-code verification of SaltProc v1.0
against ChemTriton. The ENDF/B-VII.1 \cite{chadwick_endf/b-vii.1_2011} nuclear
data library is used for this case to be consistent with Betzler's work.
Unfortunately, some crucial details have not been reported in
\cite{betzler_assessment_2017-1}: (1) exact core geometries for various
moderator rod configurations except startup configuration; (2) the excess
reactivity at startup; (3) the library from which S($\alpha, \beta$) tables
for thermal scattering in zirconium hydride are obtained. This section
presented my best effort to repeat Betzler's simulation using the same input
data to validate SaltProc for the \gls{TAP} concept.
\subsubsection{Effective multiplication factor dynamics}
Figures~\ref{fig:keff-ben-valid} and \ref{fig:keff-ben-valid-zoomed}
demonstrate the effective multiplication factor obtained using SaltProc v1.0
with Serpent. The $k_{eff}$ was obtained after removing fission products and
adding feed material at the end of each depletion step (3 days for this case).
SaltProc v1.0 updated the moderator rod configuration to the next
configuration (e.g., from 1388 rods per core to 1624 rods per core) once the
predicted value of $k_{eff}$ at the end of the next depletion step dropped
below 1.
This algorithm mimics regular maintenance shutdown when the \gls{TAP} core
excess reactivity is exhausted, and moderator rod assemblies should be
reconfigured to operate the next cycle.
An optimal number of moderator configurations (cycles) is found to be 15 (see
Appendix~A). Fewer cycles would improve capacity factor
but need larger excess reactivity at the \gls{BOC}, which is strictly limited
by reactivity control system worth. More cycles would require more frequent
moderator rod reconfigurations, which worsens the capacity factor. The
interval between the first and second moderator configuration was only 12
months, the shortest interval between moderator configuration updates. For the
operation interval between 2 and 16 years after startup, the intervals between
shutdowns for moderator rod updates were 18-26 months. However, towards the
\gls{EOL}, the intervals between moderator rod reconfigurations dropped to 13
months.
Overall, the average interval between regular shutdowns for the core
reconfiguration was 18 months, which exactly matches the refueling interval
for conventional \glspl{LWR} and is consistent with Betzler \emph{et al.}
($\approx$16 months) \cite{betzler_assessment_2017-1}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/keff_ben.png}
\vspace{-9mm}
\caption{Effective multiplication factor dynamics during 23.5 years of
operation for the full-core \gls{TAP} core model for the case with an
ideal removal efficiency of fission product. Confidence interval
$\sigma=28$ $pcm$ is shaded.}
\label{fig:keff-ben-valid}
\end{figure}
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.93\textwidth]{ch4/keff_ben_zoomed.png}
\vspace{-4mm}
\caption{Zoomed effective multiplication factor for the interval from 280
to 350 EFPD while transitioning from Cycle \#1 (startup geometry
configuration, 347 moderator rods, \gls{SVF}=0.91720353) to Cycle \#2
(\gls{SVF}=0.88694). Confidence interval $\sigma=28$ $pcm$ is
shaded.}
\label{fig:keff-ben-valid-zoomed}
\end{figure}
The $k_{eff}$ fluctuates significantly as a result of the batch-wise nature of
the online reprocessing approach used. Loading the initial fuel salt
composition with 5\% \gls{LEU} into the \gls{TAP} core leads to a
supercritical configuration with an excess reactivity of about 3200 $pcm$
(Figure~\ref{fig:keff-ben-valid}). Without performing any fuel salt
reprocessing and spectrum shifting, the core became subcritical after 30 days
of operation \cite{rykhlevskii_milestone_2019}. SaltProc calculates an
operational lifetime of 22.5 years, after which the fuel salt reached a total
burnup of 81.46 MWd/kgU. The end of an operational lifetime is achieved when
the minimum \gls{SVF} is obtained, as restricted by the moderator geometry
parameters (e.g., moderator rod diameter, rod pitch, the internal diameter of
the reactor vessel). Table~\ref{tab:valid_ben_lifetime} compares obtained
results with Betzler \emph{et al.} \cite{betzler_assessment_2017-1}. Overall,
SaltProc-calculated operational lifetime and burnup are lower than the
reference by approximately 22\% and 17\%, respectively. A better match in the
operational lifetime between SaltProc v1.0 and ChemTriton can be obtained if a
detailed moderator configuration description of Betzler's model will be
available in the future.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htp!]
\centering
\caption{Comparison of main operational parameters in the \gls{TAP}
reactor between the current work and Betzler \emph{et al.}
\cite{betzler_assessment_2017-1}.}
\begin{tabularx}{\textwidth}{p{0.42\textwidth} R R}
\hline
\textbf{Parameter} & \textbf{Current work} & \textbf{Betzler, 2017}
\cite{betzler_assessment_2017-1}\\ \hline
Operational lifetime [y] & 22.5 & 29.0 \\
Discharge burnup [MWd/kgU] & 76.30& 91.9 \\
Average moderator reconfiguration interval [months] & 18 & 16 \\
\hline
\end{tabularx}
\label{tab:valid_ben_lifetime}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Fuel salt isotopic composition dynamics}
Figure~\ref{fig:u-ben-valid} show that continuous \gls{LEU} feed into the
\gls{TAP} reactor is not sufficient to maintain the fissile $^{235}$U content
of the core, as the uranium enrichment steadily decreases from 5\% at the
\gls{BOL} to 1\% at the \gls{EOL}. However, during the first 13 years of
operation, the \gls{TAP} \gls{MSR} breeds fissile $^{239}$Pu and $^{241}$Pu,
reaching a peak of total fissile plutonium inventory of 2.15 t
(Figure~\ref{fig:pu-fiss-ben-valid}). Figure~\ref{fig:pu-ben-valid} shows that
a significant amount of non-fissile
plutonium ($^{238}$Pu, $^{240}$Pu, and $^{242}$Pu) and uranium ($^{236}$U)
builds up in the reactor during operation and negatively impacts criticality
of the reactor. $^{239}$Pu and $^{241}$Pu are major contributors to the
fissile material content of the core, keeping it critical during the second
half of the operational lifecycle. The total $^{239}$Pu inventory in the core
rises during the first 11 years of operation due to the harder neutron
spectrum. After 11 years, the softer spectrum breeds less $^{239}$Pu from
$^{238}$U, and more of $^{239}$Pu is progressively burned. Obtained results
are in good agreement with results in ORNL Report by Betzler \emph{et al.}
(Table~\ref{tab:valid_ben_isos}) \cite{betzler_assessment_2017-1}.
%$^{235}$U inventory in Betzler \emph{et al.} changed from 6.8t at the
%\gls{BOL} to 1.0t at the \gls{EOL}. $^{239}$Pu was 1.065t at the
%\gls{EOL}. $^{240}$Pu was 995kg at the
%\gls{EOL}. $^{241}$Pu was 465kg at the
%\gls{EOL}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/u_ben_valid.png}
\caption{SaltProc-calculated uranium isotopic fuel salt content during
22.5 years of operation. Uncertainty of the predicted mass will be
estimated and discussed in Chapter~\ref{ch:uq}.}
\label{fig:u-ben-valid}
\end{figure}
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.83\textwidth]{ch4/pu_ben_valid.png}
\vspace{-4mm}
\caption{SaltProc-calculated plutonium isotopic fuel salt content during
22.5 years of operation.}
\label{fig:pu-ben-valid}
\end{figure}
\begin{figure}[hbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.83\textwidth]{ch4/tot_pu_ben_valid.png}
\vspace{-4mm}
\caption{SaltProc-calculated fissile and non-fissile plutonium fuel salt
content during 22.5 years of operation.}
\label{fig:pu-fiss-ben-valid}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htp!]
\centering
\caption{Comparison of major heavy isotopes inventories at the \gls{EOL}
in the \gls{TAP} reactor between the current work and Betzler \emph{et al.}
\cite{betzler_assessment_2017-1}.}
\begin{tabularx}{\textwidth}{L p{0.12\textwidth} R R R}
\hline
& \textbf{Isotope} & \textbf{Current work mass [kg]} &
\textbf{Betzler, 2017 mass [kg]} & \textbf{$\Delta m$ [\%]}\\ \hline
\multirow{4}{*}{Fissile}
&$^{235}$U & 1299 & 1160 & $+11$\% \\
&$^{239}$Pu & 942 & 995 & $-5$\% \\
&$^{241}$Pu & 427 & 435 & $-2$\% \\
&Total & 2668 & 2590 & $+3$\% \\ \hline
\multirow{4}{*}{Non-fissile}
&$^{236}$U & 1123 & 1200 & $-6$\% \\
&$^{238}$U & 127,353 & 132,400 & $-4$\% \\
&$^{238}$Pu & 235 & 280 & $-16$\% \\
&$^{240}$Pu & 503 & 1000 & $-50$\% \\
&$^{242}$Pu & 230 & 310 & $-26$\% \\
&Total & 129,444 & 135,190 & $+4$\% \\ \hline
\end{tabularx}
\label{tab:valid_ben_isos}
\vspace{-0.9em}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
A lifetime-long SaltProc depletion calculation requires a 5\% \gls{LEU} feed
rate of 460.8
kg per year to maintain the fuel salt inventory in the primary loop, which is
consistent with the reference. Table~\ref{tab:valid_ben_performance} shows the
main fuel cycle performance parameters calculated using SaltProc and compared
with the reference. Normalized per GW$_{th}$-year, the \gls{TAP} concept
requires about 5.23 t of fuel compared with 4.14 t reported by Betzler
\emph{et al.} SaltProc-calculated waste production normalized per
GW$_{th}$-year is 5\% less than reported by ORNL. Potentially, the \gls{TAP}
can operate with \gls{LWR} \gls{SNF} as the fissile material feed. The heavy
metal component of \gls{LWR} \gls{SNF} has a lower fissile material weight
fraction than 5\% enriched uranium and adds less fertile $^{238}$U to the fuel
salt, potentially reducing the operational lifetime. Nevertheless, in the case
of using waste material (e.g., transuranium elements from \gls{LWR} \gls{SNF})
in this fueling scenario, the \gls{TAP} concept has superior waste reduction
metrics.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[hbp!]
\centering
\caption{Comparison of normalized by GW$_{th}$-year total fuel load and
actinide waste from the TAP reactor obtained in the current work and
Betzler \emph{et al.}
\cite{betzler_assessment_2017-1}.}
\begin{tabularx}{\textwidth}{p{0.42\textwidth} R R}
\hline
\textbf{Parameter} & \textbf{Current work} & \textbf{Betzler, 2017}
\cite{betzler_assessment_2017-1}\\ \hline
5\% \gls{LEU} feed rate [kg/y] & 460.8 & 480.0 \\
Loaded fuel [t per GW$_{th}$-y] & 5.23 & 4.14 \\
Waste [t per GW$_{th}$-y] & 3.57 & 3.74 \\
\hline
\end{tabularx}
\label{tab:valid_ben_performance}
\vspace{-0.9em}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\subsubsection{Neutron energy spectrum}
Significant thermalization of the neutron spectrum is observed as moderator
rods are added into the core configuration
(Figure~\ref{fig:ben-spectrum-bol}). At startup, the neutron spectra from the
current work and Betzler \emph{et al.} are matched well because the core
geometry, its \gls{SVF}, and initial fuel composition in these two simulations
are similar. The Pearson correlation coefficient\footnote{Pearson correlation
coefficient is calculated by the following formula:
\begin{align}
r &= \frac{\sum_{i=1}^{N}
(\Phi_i^{ref}-\overline{\Phi^{ref}})(\Phi_i-\overline{\Phi})}
{\sqrt{\sum_{i=1}^{N} (\Phi_i^{ref}-\overline{\Phi^{ref}})^2
\sum_{i=1}^{N}
(\Phi_i-\overline{\Phi})^2}}\\
\mbox{where} \nonumber\\
\Phi_i^{ref},\Phi_i &= \mbox{neutron flux for i$^{th}$ energy bin
reported in the reference and the current work $[n/cm^2\cdot s]$}
\nonumber\\
\overline{\Phi^{ref}}, \overline{\Phi} &= \mbox{neutron flux averaged over
N energy bins reported in the reference and current work $[n/cm^2\cdot
s]$}
\nonumber\\
N &= \mbox{number of neutron energy bins [-].}
\nonumber
\end{align}}
$r_{BOL}=0.91115$, which indicates a strong, positive association between the
spectra at the \gls{BOL} (see Figure~\ref{fig:ben-spectrum-bol}, upper plot).
At the \gls{EOL}, SaltProc/Serpent-calculated spectrum is more thermal than
reported by Betzler \emph{et al.} \cite{betzler_assessment_2017-1}, but the
correlation coefficient $r_{EOL}=0.90987$ shows that the spectra are still
extremely strongly related (see Figure~\ref{fig:ben-spectrum-bol}, lower
plot).
%with smaller amplitude of resonances between 10$^{-5}$ and 10$^{-2}$ MeV
%(resonance capture of neutrons by $^{238}$U).
\begin{figure}[htbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.77\textwidth]{ch4/ben_spec_bol.png}\\
\vspace{-12mm}
\hspace{0.5mm}
\includegraphics[width=0.77\textwidth]{ch4/ben_spec_eol.png}
\vspace{-3mm}
\caption{Neutron flux energy spectrum at the BOL (upper) and the EOL
(lower) obtained using SaltProc/Serpent (orange) compared with
ChemTriton/Shift (blue) \cite{betzler_assessment_2017-1}.}
\label{fig:ben-spectrum-bol}
\end{figure}
The harder spectrum at the \gls{BOL} tends to significantly increase resonance
absorption in $^{238}$U and decrease the absorptions in fissile and
construction materials. Thus, the softer spectrum in the current work compared
with Betzler \emph{et al.} led to fewer resonance captures\footnote{The energy
range for $^{238}$U resonance neutron capture is between 10$^{-5}$ and
10$^{-2}$ MeV.} of neutrons by $^{238}$U, hence, less $^{239}$Pu bred from
$^{238}$U. Therefore, the SaltProc/Serpent calculation in the current work
underpredicts the destruction (i.e., fission and capture) of $^{235}$U and
overpredicts the destruction of $^{238}$U (see
Table~\ref{tab:valid_ben_isos}).
Finally, the softer neutron spectrum leads to more fissions in fissile
plutonium isotopes ($^{239}$Pu and $^{241}$Pu) which also decreases
non-fissile plutonium (Table~\ref{tab:valid_ben_isos}) and total actinide
waste production (Table~\ref{tab:valid_ben_performance}).
\subsubsection{Time step refinement}\label{sec:time-refinement}
The results shown in this chapter are obtained from SaltProc calculations with
a uniform depletion time step of 3 days. The duration of the time step was
chosen after performing a parametric sweep to determine the longest depletion
time step that provides suitable calculation accuracy. A longer time
step potentially reduces the SaltProc calculation costs, providing results
faster for lifetime-long (25-year) simulations.
Figure~\ref{fig:timeref-keff} shows $k_{eff}$ evolution obtained with 3-, 6-,
12-, and 24-day depletion time intervals for a 25-year simulation. The
interval between moderator configuration updates was assumed similar for all
four cases for consistency. The multiplication factor at the \gls{BOC} for
each moderator configuration reduced with increasing time step duration. At
the \gls{EOC} for each geometry, $k_{eff}=1.0$ for a 3-day time step but
drops below 1.0 to 0.9980, 0.9972, and 0.9948 for 6-, 12-, and 24-day step,
respectively.
The
decrease is because more poisonous \glspl{FP} (e.g., $^{135}$Xe) are produced
in the core during longer depletion intervals. With longer time steps, a large
concentration of poisons is obtained at the end of the depletion step when
those poisons are being removed, resulting in substantial criticality
growth.
Figures~\ref{fig:timeref-u} and \ref{fig:timeref-pu239} show that the longer
time steps appropriately capture uranium depletion ($<1$\% difference even for
a 24-day time step), but the observed difference in fissile $^{239}$Pu mass is
significant when the depletion interval is 6 days or longer ($>0.5$\%
difference for 6-day step). Using a 6-day depletion interval leads to
overprediction of $^{239}$Pu production by 5 kg at the \gls{EOL}
(Figure~\ref{fig:timeref-pu239}). The use of a 6-day time step caused an
overprediction of total plutonium production by 9.6 kg. Notably,
significant quantity for plutonium currently in use by the IAEA is 8 kg
($<80$\% $^{238}$Pu) \cite{close_iaea_1995}. Thus, a 6-day depletion interval
or longer leads to significant error in the predicted plutonium inventory at
the \gls{EOL} (larger than 1 significant quantity).
\begin{figure}[hbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/keff_time_refinement.png}
\caption{SaltProc-calculated effective multiplication factor ($k_{eff}$)
during operation for different depletion time step sizes.}
\label{fig:timeref-keff}
\end{figure}
\begin{figure}[hbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.91\textwidth]{ch4/u235_time_refinement.png}\\
\vspace{-11mm}
\hspace{0.5mm}
\includegraphics[width=\textwidth]{ch4/u238_time_refinement.png}
\vspace{-6mm}
\caption{SaltProc-calculated $^{235}$U (upper) and $^{238}$U (lower)
content during operation for different depletion time step sizes.}
\label{fig:timeref-u}
\end{figure}
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/pu239_time_refinement.png}
\caption{SaltProc-calculated $^{239}$Pu content during operation for
different depletion time step sizes.}
\label{fig:timeref-pu239}
\end{figure}
Increasing the depletion time interval significantly reduces computational
cost but also deteriorates the accuracy of depletion calculations (i.e.,
24-day step gave $\times4$ speedup but causes about 1.5\% error in $^{239}$Pu
mass prediction). Calculations using a depletion time step of 6 days or more
demonstrated a significant difference in calculated $k_{eff}$ (i.e.,
$\approx300$ $pcm$ for 6-day) and depleted
mass (e.g., $\approx0.34$\% error in $^{235}$U predicted mass for 6-day) from
those using a 3-day depletion step. \emph{In the current work, a 3-day
depletion step was selected to adequately predict the mass of major heavy
isotopes in the fuel salt during 25 years of the \gls{TAP} reactor operation.}
\subsection{Realistic extraction efficiency case}\label{sec:long-term-real}
This section demonstrates SaltProc v1.0 for lifetime-long depletion simulation
similar to Section~\ref{sec:ben-valid}, but with realistic, physics-based
correlations for noble gas removal efficiency. For the demonstration case
herein, efficiency of xenon, krypton, and hydrogen extraction are determined
using the model by Peebles \emph{et al.} (Equation~\ref{eq:gas_eff}) discussed
earlier in Section~\ref{sec:gas-separ}. The gas-liquid interfacial area per
unit volume ($a$) to inform
Equation~\ref{eq:gas_eff} is a function of salt/gas flow rates and gas bubble
diameter \cite{sada_gas-liquid_1987}:
\begin{align}\label{eq:interfacial-area}
& a = \frac{6}{d_b}\frac{Q_{He}}{Q_{He} + Q_{salt}}
\intertext{where}
Q_{salt}&= \mbox{volumetric salt flow rate $[m^3/s]$} \nonumber \\
Q_{He}&= \mbox{volumetric helium flow rate $[m^3/s]$} \nonumber \\
d_b &= \mbox{helium bubble diameter $[m]$.} \nonumber
\end{align}
Additionally, the following parameters inform
Equation~\ref{eq:gas_eff} for the prototypic sparger: (1) salt volumetric flow
rate throughout the sparger $Q_{salt}=2$ $m^3/s$; (2) sparging gas (helium)
volumetric flow rate $Q_{He}=0.1$ $m^3/s$; (3) helium bubble diameter
$d_b=0.508$ $mm$ as advised by ORNL \cite{robertson_conceptual_1971}; (4)
sparger length $L=11$ $m$; (5) sparger diameter $D=0.4$ $m$ (sparger
cross section $A_C=0.126$ $m^2$).
The liquid phase mass transfer coefficient ($K_L$) selection presents a
challenge since published information to inform
Equation~\ref{eq:gas_eff} is applicable for only laboratory-scale conditions
\cite{chen_cfd_2019-1}.
Peebles \emph{et al.} stated that Equation~\ref{eq:gas_eff} is valid for $K_L$
in a range from 1 to 100 $ft/hr$ (from $0.084\bar{7}$ to $8.47\bar{7}$ $mm/s$)
\cite{peebles_removal_1968}. For the demonstration case herein, I performed
25-year depletion calculations for $K_L$ of $0.084\bar{7}$, 2.1167, and
$8.466\bar{7}$ $mm/s$
to investigate the effect of noble gas removal efficiency on lifetime-long
fuel depletion calculations.
The extraction efficiency is gas specific because solubility in the salt
(Henry's law constant) is different for various gases.
Table~\ref{tab:gas_removal_efficiency} reports the dimensionless Henry's law
constant and corresponding calculated efficiency of noble gas (Xe, Kr, H)
migration to the helium bubbles ($\epsilon_m$) in the prototypic sparger for
various mass transfer coefficients. Total separation efficiency
(Table~\ref{tab:gas_removal_efficiency}, last three columns) refers to the
efficiency of extraction target gaseous elements after performing helium
sparging in the sparger followed by separation of noble-gas-reach bubbles from
the salt in the axial-flow centrifugal bubble separator
\cite{gabbard_development_1974}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htp!]
\fontsize{9}{11}\selectfont
\centering
\caption{The noble gas extraction efficiency at working temperature
T=627$^{\circ}$C calculated using Equation~\ref{eq:gas_eff}
\cite{peebles_removal_1968} assuming salt volumetric flow
rate $Q_{salt}=2$ $m^3/s$, helium volumetric flow rate $Q_{He}=0.1$
$m^3/s$, helium bubbles diameter $d_b=0.508$ $mm$, and sparger volume
$V=1.4$ $m^3$. The liquid phase mass transfer coefficient is varied in
validity range $[0.0847,8.4667]$ $mm/s$.}
\begin{tabularx}{\textwidth}{L X R R R R R R}
\hline
\textbf{Element}&\textbf{Henry's}&
\multicolumn{6}{c}{\textbf{Efficiency of}} \\
& \textbf{law}& \multicolumn{3}{c}{\textbf{migration to He bubbles
($\epsilon_m$)}}& \multicolumn{3}{c}{\textbf{total separation
($\epsilon$)$^{\star}$}} \\
&\textbf{constant} & \multicolumn{3}{c}{\textbf{for $K_L$ [mm/s]}}
&\multicolumn{3}{c}{\textbf{for $K_L$ [mm/s]}}\\
&\textbf{($K_H$)$[-]$} & 8.4667&2.1167&0.0847&8.4667&2.1167&0.0847\\
\hline
Xe &5.7E-5 \cite{blander_solubility_1959}&0.9630&0.5639&0.0327&
0.9149&0.5357&0.0310\\
Kr &2.8E-4 \cite{blander_solubility_1959}&0.9595&0.5630&0.0327&
0.9115&0.5349&0.0310\\
H &3.9E-3\cite{tomkins_gases_2016}&0.9066&0.5499&0.0326&
0.8613&0.5224&0.0309\\
\hline
\end{tabularx}
\begin{tablenotes}
\footnotesize
\item$^{\star}$With axial-flow centrifugal bubble separator by
Gabbard \emph{et al.}, which allows the bubble separation efficiency
$\epsilon_{es}$=0.95 \cite{gabbard_development_1974}. Thus, total
gas removal efficiency ($\epsilon$) can be calculated as follows:
$\epsilon=\epsilon_m\times \epsilon_{es}$.
\end{tablenotes}
\label{tab:gas_removal_efficiency}
\vspace{-0.9em}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Effective multiplication factor dynamics}
Figures~\ref{fig:keff-eps-var} and \ref{fig:keff-eps-var-zoom} demonstrate the
effective multiplication factor dynamics ($k_{eff}$)
during 25 years of operation with 15 various moderator rod configurations
(cycles) described in Appendix~A. SaltProc v1.0
coupled to Serpent calculated
$k_{eff}$ after removing fission products and feeding 5\% \gls{LEU} at the end
of each depletion step (3 days as was determined in
Section~\ref{sec:time-refinement}). Notably, the core went subcritical during
the first cycle (startup moderator rod configuration) after 330 and 318 days
for $K_L=8.4667$ and 0.0847 $mm/s$, respectively.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/eps/keff.png}
\caption{Effective multiplication factor dynamics for the full-core
\gls{TAP} core model during 25 years of operation for the case with a
realistic removal efficiency of fission product and various mass
transfer
coefficients. Confidence interval $\sigma=28$ $pcm$ is shaded.}
\label{fig:keff-eps-var}
\end{figure}
\begin{figure}[htbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.93\textwidth]{ch4/eps/keff_zoomed_1.png}\\
\vspace{-6mm}
\hspace{+1mm}
\includegraphics[width=0.93\textwidth]{ch4/eps/keff_zoomed_2.png}
\vspace{-3mm}
\caption{Zoomed effective multiplication factor dynamics while switching
from Cycle \#1 (startup geometry configuration, 347 moderator rods,
\gls{SVF}=0.917) to Cycle \#2 (\gls{SVF}=0.887) (upper panel) and
from Cycle \#2 to Cycle \#3 (\gls{SVF}=0.881)
(lower panel) for various mass transfer coefficients ($K_L$).
Confidence interval $\sigma=28$ $pcm$ is shaded.}
\label{fig:keff-eps-var-zoom}
\end{figure}
A reduced mass transfer coefficient worsens the neutron poison efficiency,
which shortens the interval between
shutdowns for moderator rod updates. Additionally, the presence of unremoved
poisons in the core suppresses the effective multiplication factor after
moderator reconfiguration ($\approx500$ $pcm$ lower for $K_L=0.0847$ $mm/s$
than for $K_L=8.4667$ $mm/s$ at the \gls{BOL} and $\approx1100$ $pcm$ at
the \gls{EOL}). Overall, noble gas removal provides significant neutronics
benefits (fewer neutrons are lost in strong absorbers such as
$^{135}$Xe), better fuel utilization, and enables longer moderator rod
reconfiguration intervals.
\subsubsection{Neutron spectrum}
Figure~\ref{fig:spectrum-eps-var} shows the normalized neutron flux spectrum
for the full-core TAP core model in the energy range from 10$^{-9}$ to 15 MeV.
The neutron energy spectrum at the \gls{EOL} is harder than at
the \gls{BOL} due to moderator-to-fuel ratio growth during reactor operation
caused by periodic moderator rod reconfigurations. The \gls{TAP} reactor
spectrum is harder than in a typical \gls{LWR} and correlates well (Pearson
correlation coefficient $>0.8$) with the \gls{TAP} neutronics white paper
\cite{transatomic_power_corporation_neutronics_2016} and ORNL
reports \cite{betzler_assessment_2017-1, betzler_two-dimensional_2017}.
The liquid phase mass transfer coefficient ($K_L$) and, consequently, noble
gas removal efficiency ($\epsilon$), has a negligible effect on the spectrum
in the fast range (between 10$^{-2}$ and 10 MeV) at the \gls{EOL}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.7\textwidth]{ch4/eps/spectrum.png}
\vspace{-3mm}
\caption{The neutron flux energy spectrum normalized by unit lethargy at
the \gls{BOL} and \gls{EOL} for the case with a realistic removal
efficiency of fission product and various mass transfer coefficients.}
\label{fig:spectrum-eps-var}
\end{figure}
However, Figure~\ref{fig:spectrum-th-eps-var} demonstrates a notable
difference in the thermal range of the spectrum due to the enormous $^{135}$Xe
absorption cross section ($\sigma_{a,^{135}Xe}=2.6\times10^6$ b).
Figure~\ref{fig:xe135-eps-var-zoomes} shows that $^{135}$Xe mass in the core
at the \gls{EOL} for the case with low noble gas removal efficiency
($K_L=0.0847$ $mm/s$) is significantly larger than for the case with high
removal efficiency ($K_L=8.4667$ $mm/s$) which leads to higher neutron loss
due to absorption in xenon. Overall, noble gas removal from the fuel salt
alters the neutron spectrum.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.7\textwidth]{ch4/eps/spectrum_th_zoomed.png}
\vspace{-3mm}
\caption{The neutron flux energy spectrum normalized by unit lethargy
\gls{EOL} zoomed in the thermal energy range.}
\label{fig:spectrum-th-eps-var}
\end{figure}
\subsubsection{Fuel salt isotopic composition evolution}
The time-dependent isotopic compositions obtained with different noble gas
extraction efficiencies behave very similarly. For $^{235}$U predicted mass,
the difference between $K_L=8.4667$ $mm/s$ (e.g., 91.5\% of $^{135}$Xe is
removed) and $K_L=0.0847$ $mm/s$ (e.g., 3.1\% of $^{135}$Xe is removed) is
within 0.2\% for
the first 14 years and rises rapidly to 1.15\% over the remaining 10 years
(Figure~\ref{fig:u235-eps-var}). The simulations with a mass transfer
coefficient smaller than $8.4467$ $mm/s$ retain more
$^{235}$U during operation because more neutrons are parasitically
absorbed by the noble gas, which leads to a lower fission rate. The
relative mass difference in $^{238}$U is small
(Figure~\ref{fig:u238-eps-var}), but the absolute difference is approximately
50 kg at the \gls{EOL}, with low removal efficiency corresponding to a reduced
\gls{EOL} inventory of $^{235}$U.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.8\textwidth]{ch4/eps/u235.png}
\vspace{-4mm}
\caption{SaltProc-calculated mass of $^{235}$U in the fuel salt during
25 years of operation for $K_L=8.4667$ $mm/s$ compared with less
effective noble gas removal.}
\label{fig:u235-eps-var}
\end{figure}
\begin{figure}[hbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.85\textwidth]{ch4/eps/u238.png}
\vspace{-4mm}
\caption{SaltProc-calculated mass of $^{238}$U in the fuel salt during
25 years of operation for $K_L=8.4667$ $mm/s$ compared with less
effective noble gas removal.}
\label{fig:u238-eps-var}
\end{figure}
Differences in the plutonium production between cases with different gas
removal efficiencies are much greater. Over 3\% more $^{239}$Pu mass is
generated in
the case with $K_L=0.0847$ $mm/s$ than with $K_L=8.4667$ $mm/s$
(Figure~\ref{fig:pu239-eps-var}). The greater mass of neutron poison
($^{135}$Xe) in the core leads to a harder spectrum
(Figure~\ref{fig:spectrum-th-eps-var}), which results in a faster rate of
destruction of $^{238}$U and increased breeding of fissile $^{239}$Pu.
\begin{figure}[hbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.8\textwidth]{ch4/eps/pu239.png}
\caption{SaltProc-calculated mass of $^{239}$Pu in the fuel salt during
25 years of operation for $K_L=8.4667$ $mm/s$ (91.5\% of $^{135}$Xe is
removed) compared with less effective noble gas removal.}
\label{fig:pu239-eps-var}
\end{figure}
Figure~\ref{fig:xe135-eps-var} demonstrates $^{135}$Xe mass dynamics in the
\gls{TAP} core during 25 years of operation for various mass transfer
coefficients. Jumps in $^{135}$Xe mass every few years reflect the
spectral shifts due to moderator rod reconfiguration. In
contrast, the mass of $^{135}$I, which is the primary direct precursor of
$^{135}$Xe, is approximately 18 g and stays almost constant over 25 years.
Figure~\ref{fig:xe135-eps-var-zoomes} shows $^{135}$Xe mass at the
end of each depletion time step before and after performing the fuel salt
reprocessing procedure in SaltProc v1.0. $^{135}$Xe concentration in the core
after performing \gls{FP} removals behaves as expected and is consistent with
calculated extraction efficiencies in Table~\ref{tab:gas_removal_efficiency}.
Notably, the $^{135}$Xe production rate increases during the first seven years
of operation and then decreases rapidly to 17 g during the remaining 17 years
as the spectrum thermalizes during operation.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.85\textwidth]{ch4/eps/xe135.png}
\caption{SaltProc-calculated mass of $^{135}$Xe in the fuel salt during
25 years of operation for the case with a realistic removal efficiency
of fission product and various mass transfer coefficients ($K_L$).}
\label{fig:xe135-eps-var}
\end{figure}
I also performed an analytic verification to confirm SaltProc v1.0 correctness
by comparing the mass of $^{135}$Xe to the expected mass after performing
removals after each depletion step with realistic efficiency
(Table~\ref{tab:gas_removal_efficiency}). The expected mass of a reprocessed
isotope is calculated as follows:
\begin{align}
\qquad\qquad\qquad & m_{a} = m_{b} \times
(1-\epsilon_{m}) \times (1-\epsilon_{es})
\intertext{where}
m_{a} &= \mbox{mass of the isotope after applying removals and feeds $[g]$}
\nonumber \\
m_{b} &= \mbox{mass of the isotope right before reprocessing $[g]$}
\nonumber \\
\epsilon_{m} &= \mbox{efficiency of the isotope migration to helium bubbles
$[-]$}
\nonumber \\
\epsilon_{es} &= \mbox{entrainment separator extraction efficiency $[-]$.}
\nonumber
\end{align}
This simple check showed that the SaltProc-calculated mass of $^{135}$Xe
(Figure~\ref{fig:xe135-eps-var-zoomes}) matches the expected mass exactly.
Thus, SaltProc v1.0 extraction module correctly removes target isotopes with a
specified extraction efficiency. Finally, I added this correctness check as
SaltProc v1.0 unit test.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/eps/xe135_zoomed_3.png}
\caption{SaltProc-calculated mass of $^{135}$Xe in the fuel salt during
the last 18 months of operation for various mass transfer coefficients
($K_L$) at the end of each depletion step before and after performing the
salt treatment.}
\label{fig:xe135-eps-var-zoomes}
\end{figure}
\FloatBarrier
\section{Safety and operational parameters} \label{sec:safety-param}
The previous section (Section~\ref{sec:long-term}) reported fuel salt
composition evolution during 25 years of \gls{TAP} \gls{MSR} operation.
The inventory of fissile $^{235}$U decreased with time, while the inventories
of fissile, $^{239}$Pu and $^{241}$Pu, increased. At
the same time, many poisonous actinides (e.g., $^{236}$U, $^{240}$Pu,
$^{242}$Pu) built up in the core, shifting the neutron energy
spectrum. Moreover, the \gls{TAP} design assumes an intentional spectrum shift
by adding more moderator rods during operation. In this section, I analyze how
such neutron spectrum shift affects major safety and operational parameters
such as temperature and void coefficients of reactivity, total control rod
worth, and other reactor kinetic parameters.
\subsection{Temperature coefficient of reactivity}
The main physical principle underlying reactor temperature feedback is an
expansion of heated material. When the fuel salt temperature increases, the
density of the salt decreases, but at the same time, the total volume of fuel
salt in the core remains constant because it is bounded by the vessel. When
the moderator rod temperature increases, the density of zirconium hydride
decreases, reducing space between moderator rods and displacing fuel salt from
the core. Another physical principle underlying temperature feedback is the
Doppler broadening of the resonance capture cross section of the $^{238}$U
due to thermal motion of target nuclei in the fuel. The Doppler effect
arises from the dependence of the capture cross sections on the relative
velocity between neutron and nucleus. The Doppler coefficient of reactivity of
thermal reactors is always negative and instantaneous.
The temperature coefficient of reactivity, $\alpha$, quantifies reactivity
changes due to temperature change in fuel and moderator component of a reactor
core. The $\alpha_{T,j}$ represents the temperature coefficient of
reactivity of a component $j$ (fuel, moderator, or isothermal) and can be
calculated as:
\begin{align}\label{eq:th-feedback}
\alpha_{T,j} &= \frac{\partial \rho}{\partial T_j} \quad[pcm/K]
\intertext{where}
\rho &= \frac{k_{eff}-1}{k_{eff}} \times 10^5 \quad[pcm] \\
k_{eff} &= \mbox{effective multiplication factor corresponding to $T$ of
component $j$ [$-$]} \nonumber \\
\partial T_j &= \mbox{change in average temperature of component $j$
[$K$]}.\nonumber
\end{align}
If the temperature change is assumed to be uniform throughout the core, the
temperature coefficient of reactivity is usually called Total or Isothermal
Temperature Coefficient (ITC), $\alpha_{T,ISO}$, and can be defined as the
change in reactivity per unit of temperature change:
\begin{align}\label{eq:itc-feedback}
\alpha_{T,ISO} &= \frac{\Delta\rho}{\Delta T} \quad[pcm/K] \\
\intertext{where}
\Delta\rho &= \mbox{change in reactivity [$pcm$]} \nonumber \\
\Delta T_j &= \mbox{change in average temperature of the core [$K$]}.\nonumber
\end{align}
However, fuel and moderator temperature are rarely equal because fuel heats up
much faster than the moderator; thus, the fuel temperature coefficient
($\alpha_{T,F}$ or FTC) and the moderator temperature coefficient
($\alpha_{T,M}$ or MTC) must be calculated
separately. In the base case simulation in this work, the fuel salt and the
moderator temperatures are fixed at 900K. To determine $\alpha_{T,F}$, I
perturbed the fuel salt temperature from 800K to 1000K in
increments of 50K while fixing the moderator temperature at 900K (base case).
Likewise, I calculated $\alpha_{T,M}$ by perturbing the moderator temperature
from 800K to 1000K with 50K increments, while fixing the fuel temperature at
900K.
The range of temperature perturbation for the temperature coefficient
calculation has been selected based on operational parameters. The \gls{TAP}
\gls{MSR} operates in the range of 773-973K (500-700$^{\circ}$C), which is far
below the salt boiling point of approximately 1473K
\cite{transatomic_power_corporation_technical_2016}. The salt freezes below
773K \cite{barton_phase_1958}. At the other end of the temperature spectrum,
the temperature higher than 973K passively melts a freeze plug, which drains
the fuel salt from the reactor vessel to the drain tanks. The drain tanks have
a subcritical configuration with a large free surface area to readily
dissipate heat by passive cooling
\cite{transatomic_power_corporation_technical_2016}.
Thus, calculating temperature coefficients in the temperature range from 800
to 1000K captured the outcomes of most accident transients.
To determine the temperature coefficients, the cross section temperatures for
the fuel and moderator were changed in the range of 800-1000K. For
$\alpha_{T,F}$ calculation, changes in the fuel temperature impact cross
section resonances (Doppler effect) as well as the fuel salt density. The
density of fuel salt changes with respect to temperature as follows
\cite{janz_molten_1974}:
\begin{align}\label{eq:salt-den}
\rho_{salt}(T) &= 6.105 - 12.720\times10^4 T[K] \quad [g/cm^3]
\end{align}
The uncertainty in the salt density calculated using
Equation~\ref{eq:salt-den} is approximately 0.036 $g/cm^3$ at 900K.
In contrast, when the moderator temperature changes, the density, cross
section temperature, and the geometry also change due to thermal
expansion of the solid zirconium hydride (ZrH$_{1.66}$) rods. Accordingly, the
new moderator density and sizes are calculated using a linear temperature
expansion coefficient \cite{yamanaka_thermal_1999}:
\begin{align}
\alpha_L &= 2.734\times10^{-5} \quad [K^{-1}]
\end{align}
Using this thermal expansion data, I took into account the displacement of the
moderator surfaces by generating corresponding geometry definitions for each
Serpent calculation. That is, $\alpha_{T,M}$ calculation takes into account
the following factors:
\begin{itemize}[noitemsep, topsep=0pt]
\item thermal Doppler broadening of the resonance capture cross sections
in ZrH$_{1.66}$;
\item hydrogen S($\alpha$, $\beta$) thermal scattering data shift due to
moderator temperature change;
\item density change due to moderator thermal expansion/contraction;
\item corresponding geometric changes in the moderator rod diameter and
length.
\end{itemize}
By propagating the $k_{eff}$ statistical error provided by Serpent 2, the
corresponding uncertainty in each temperature coefficient is obtained using
the formula:
\begin{align}
\delta\alpha_T &= \abs{\frac{1}{T_{i+1} - T_i}} \sqrt{\frac{\delta
k_{eff}^2(T_{i+1})}{k_{eff}^4(T_{i+1})}
+ \frac{\delta k_{eff}^2(T_i)}{k_{eff}^4(T_i)}}
\intertext{where}
k_{eff} &= \mbox{effective multiplication factor corresponding to $T_i$ [-]}
\nonumber \\
\delta k_{eff} &= \mbox{statistical error for $k_{eff}$ from Serpent output
[$pcm$]} \nonumber \\
T_i &= \mbox{perturbed temperature in the range of 800-1000K.} \nonumber
\end{align}
Notably, other sources of uncertainty are neglected, such as design parameter
uncertainty, cross section measurement error\footnote{Chapter 7 of the current
work presents uncertainty quantification method for propagating cross section
measurement uncertainty throughout depletion calculations. While it is out of
scope of this work to estimate nuclear-data related uncertainty of the
temperature feedback coefficient, method from Chapter 7 can be adopted for the
future work to perform such calculations.}, and approximations inherent in
the equations of state providing both the salt and moderator density
dependence on temperature.
Figure~\ref{fig:tc-bol-eol} shows reactivity as a function of fuel, moderator,
and total temperature for the \gls{TAP} \gls{MSR} at the \gls{BOL} and
\gls{EOL}. At startup, reactivity change with temperature clearly fits
linear regression (R-squared\footnote{Coefficient of determination ($R^2$) is
a statistical measure of how good measured data fitted linear regression
line.} is 0.9, 0.99, and 0.98 for fuel, moderator, and isothermal case,
respectively).
Also, while the linear relationship between reactivity and moderator
temperature worsens toward the \gls{EOL}, an $R^2>0.7$ still indicates a
strong linear association between $\rho$ and $T$ ($R^2$ is 0.99, 0.87, and
0.74 for fuel,
moderator, and total case, respectively). I determined the temperature
coefficient of reactivity separately for each component (fuel, moderator,
and isothermal) using the slope of the linear regression for each.
\begin{figure}[htbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.97\textwidth]{ch4/saf_par/tc_bol.png}\\
\vspace{-12mm}
\hspace{+0.05mm}
\includegraphics[width=0.97\textwidth]{ch4/saf_par/tc_eol.png}
\vspace{-3mm}
\caption{Serpent-calculated reactivity as a function of fuel salt
(blue), moderator (orange), and both fuel/moderator (green)
temperature
at \gls{BOL} (upper) and \gls{EOL} (lower). The uncertainty
$\pm\sigma$ region is shaded.}
\label{fig:tc-bol-eol}
\end{figure}
Table~\ref{tab:tcoef_tap} summarizes temperature coefficients of reactivity in
the
\gls{TAP} core calculated at the \gls{BOL} and \gls{EOL}. The fuel
temperature coefficient is negative throughout operation and becomes stronger
toward the \gls{EOL} as the spectrum thermalizes due to additional, retained
fission products and actinides building up in the fuel salt.
The MTC and ITC are both strongly negative at startup. However, the MTC became
weakly positive toward the \gls{EOL} due to the same spectral shift. To better
understand the dynamics of temperature coefficient evolution, I calculated
temperature coefficients for 15 distinct moments during operation to cover all
moderator rod configurations described in Appendix~A.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[ht!]
\caption{Temperature coefficients for the \gls{TAP} reactor at the
\gls{BOL} and \gls{EOL}.}
\centering
\begin{tabularx}{0.6\textwidth}{ X r r } \hline
\textbf{Coefficient} & \textbf{\gls{BOL} [pcm/K]} & \textbf{\gls{EOL}
[pcm/K]} \tabularnewline
[5pt] \hline
FTC & $-0.350\pm0.050$ & $-0.868\pm0.045$
\tabularnewline [3pt] \hline
MTC & $-1.134\pm0.050$ & $+0.746\pm0.045$
\tabularnewline [3pt] \hline
ITC & $-1.570\pm0.050$ & $-0.256\pm0.045$
\tabularnewline [3pt] \hline
\end{tabularx}
\label{tab:tcoef_tap}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Figure~\ref{fig:tc-evo} shows temperature coefficient evolution for the
\gls{TAP} reactor during 25 years of operation and takes into account the
spectral shift due to moderator rod reconfigurations. The fuel temperature
coefficient is almost
constant for 19 years but decreases for the last 6 years (configurations with
1498 and 1668 moderator rods in the core). In contrast, the moderator
temperature coefficient decreases from $-1.134$ $pcm$/$K$ to $-2.280$
$pcm$/$K$ during the first 11 years and then increases up to $+0.746$
$pcm$/$K$ at the \gls{EOL}.
The moderator temperature increase at startup pushes thermal neutrons to
higher energies, nearly up to the lowest
$^{238}$U resonances in the capture cross section. After 11 years, similar
moderator temperature increase shifts neutrons into the same
energy region, but this time that energy range is populated not only with
$^{238}$U but also with low-lying resonances from the actinides and fission
products.
Additionally, the moderator temperature coefficient increases after 11 years
of operation because there is twice as much moderator in the core at 11 years
compared to the \gls{BOL}. The moderator temperature increase causes fuel
salt displacement due to the thermal expansion of the moderator rods, which
has a particularly strong effect when the salt volume fraction is less than
75\%. That is, when moderator heats up, the moderator-to-fuel ratio increases
due to thermal expansion of zirconium hydride, which in turn leads to positive
change in reactivity. %These
%observations from estimating reactivity coefficients demonstrate the
%importance of proper characterization of thermal scattering data for the
%zirconium hydride moderator.
Finally, the isothermal temperature coefficient dynamics are similar to the
MTC:
the ITC decreases from $-1.57$ $pcm/K$ to $-2.66$ $pcm/K$ first 13 years of
operation. After that, the ITC grows rapidly up to $-0.256$ $pcm/K$ at the
\gls{EOL}. Overall, the ITC remains negative throughout operation but became
relatively weak after 25 years of operation (comparing with conventional
\gls{PWR}, which has an isothermal temperature coefficient of
$\alpha_{T,ISO}\approx-3.08$ $pcm/K$ \cite{forget_integral_2018}).
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/saf_par/tc_evo.png}
\caption{Serpent-calculated fuel, moderator, and isothermal temperature
coefficients of reactivity as a function of time and number of
moderator
rods in the \gls{TAP} core. The uncertainty $\pm\sigma$ region is
shaded.}
\label{fig:tc-evo}
\end{figure}
\subsection{Void coefficient of reactivity}
The effect of fuel voids (i.e. bubbles) on reactivity is evaluated by reducing
the fuel salt density from the base value (0\% void) assuming helium volume
fraction in the salt varies between 0 and 2\%. The temperatures of both the
fuel salt and the moderator are held constant at 900K.
Because a decrease in the salt density causes an increase of moderator-to-fuel
ratio, an increase in the helium volume fraction (voids) increases reactivity
as shown in Figure~\ref{fig:void-bol-eol}.
However, the slope of the line (void coefficient of reactivity ($\alpha_{V}$))
decreases toward \gls{EOL} due to the gradually increasing volume of moderator
in the core (the volume fraction of the fuel salt at the \gls{EOL} is less
than 54\%).
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.85\textwidth]{ch4/saf_par/void_coeff_bol_eol.png}
\vspace{-4mm}
\caption{Serpent-calculated reactivity as a function of void volume
fraction [\%] in the fuel salt. The uncertainty $\pm\sigma$ region is
shaded.}
\label{fig:void-bol-eol}
\end{figure}
Figure~\ref{fig:void-evo} shows the void coefficient evolution during 25 years
of operation, taking into account 15 moderator rod reconfigurations. The
positive void coefficient of reactivity, though not ideal, does not compromise
the reactor safety, if fuel density change resulted be coupled to a
change in temperature. And, while some void fraction fluctuations may happen
due to gaseous fission product production, their generation rates are
usually almost
constant. However, a large volume of sparging gas (helium) can be accidentally
introduced into the \gls{TAP} core in case of the bubble separator
malfunction. Thus, \emph{the bubble separator must have backup safety
mechanism} to avoid
sudden positive negativity insertion in case of the separator failure,
particularly at the \gls{BOL}. These
observations from calculating reactivity coefficients should be taken into
account in the \gls{TAP} \gls{MSR} accident analysis and safety justification.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.94\textwidth]{ch4/saf_par/void_evo.png}
\caption{Serpent-calculated void coefficient of reactivity as a function
of time and number of moderator rods in the \gls{TAP} core. The
uncertainty $\pm\sigma$ region is shaded.}
\label{fig:void-evo}
\end{figure}
\subsection{Reactivity control rod worth}
In the \gls{TAP} concept, control rods perform two main functions: to shut
down the reactor at any point during operation by negative reactivity
insertion and to control the excess of reactivity after moderator
rod reconfiguration during regular maintenance. In an accident, the control
rods would be dropped down into the core. The total control rod worth
($\rho_{CRW}$ or CRW) is calculated for various moments during 25 years of
operation to evaluate neutron spectrum shift influence on the CRW.
The reactivity worth of all control rods is defined as:
\begin{align}
\rho_{CRW}(pcm) &= \frac{k_{eff}^W - k_{eff}^I}{k_{eff}^W k_{eff}^I}\times
10^5 \\
\rho_{CRW}(\$) &= \frac{1}{\beta_{eff}} \frac{k_{eff}^W - k_{eff}^I}{k_{eff}^W
k_{eff}^I}
\intertext{where}
k_{eff}^W &= \mbox{effective multiplication factor when all rods are fully
withdrawn} \nonumber \\
k_{eff}^I &= \mbox{effective multiplication factor when all rods are fully
inserted} \nonumber \\
\beta_{eff} &= \mbox{effective delayed neutron fraction.} \nonumber
\end{align}
The statistical error of the reactivity worth are obtained using formula:
\begin{align}
\delta\rho_{CRW}(pcm) &= \sqrt{\frac{(\delta k_{eff}^W)^2}{(k_{eff}^W)^4} +
\frac{(\delta k_{eff}^I)^2}{(k_{eff}^I)^4}} \\
\delta\rho_{CRW}(\$) &= \frac{1}{\beta_{eff}}\sqrt{\frac{(\delta
k_{eff}^W)^2}{(k_{eff}^W)^4} + \frac{(\delta k_{eff}^I)^2}{(k_{eff}^I)^4} +
\frac{(\delta \beta_{eff})^2 (k_{eff}^W-k_{eff}^I)^2}{\beta_{eff}^2
(k_{eff}^W k_{eff}^I)^2}}
\intertext{where}
\delta k_{eff}^{W}, \delta k_{eff}^{I}, \delta \beta_{eff} &=
\mbox{statistical errors from Serpent output.}
\nonumber
\end{align}
Figure~\ref{fig:crw-evo} demonstrates control rod worth evolution during 25
years of the \gls{TAP} reactor operation. The cluster of 25 control rods made
of boron carbide (B$_4$C) provided a reactivity worth of $5.059\pm0.014$ \$ at
the \gls{BOL}.
However, spectral shift due to additional moderator rods toward the \gls{EOL}
leads to significant change in $\rho_{CRW}$. Adding more moderation near
control rods increases $\rho_{CRW}$ due to the local neutron spectrum
thermalization (see transition from 347 to 427 moderator rods,
Figure~\ref{fig:tap-406-681}).
In contrast, adding moderator rods far away from the control rod positions
leads to $\rho_{CRW}$ degradation (see transition from 427 to 505
moderator rods, Figure~\ref{fig:tap-406-681}). On the one hand, the spectrum
thermalizes and many fission product poisons exhibit larger absorption cross
sections in the thermal energy range. On the other hand, higher actinides
(particularly, isotopes of plutonium) are accumulated in the fuel salt which
deteriorates control rod worth. Overall, $\rho_{CRW}$ decreases to
$4.472\pm0.015$ \$ at the \gls{EOL}.
\begin{figure}[htp!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.85\textwidth]{ch4/saf_par/crw_evo_dollar.png}
\vspace{-5mm}
\caption{Serpent-calculated total control rod worth as a function
of time and number of moderator rods in the \gls{TAP} core. The
uncertainty $\pm\sigma$ region is shaded.}
\label{fig:crw-evo}
\end{figure}
Overall, the design of the reactivity control system is sufficient to shut
down the \gls{TAP} reactor at the \gls{BOL}. However, the spectral shift,
moderator rod reconfigurations, and the change in the salt composition during
operation drive the total control rod worth below excess reactivity, violating
reactor safety (insufficient shutdown margin). Thus, the number of control
rods, their position, or material selection must be revised to make sure that
the \gls{TAP} reactor could be safely shut down at any moment during
operation. For example, europium oxide (Eu$_2$O$_3$) might be a better
absorbing material for the control rods \cite{ashraf_preliminary_2020}.
\subsection{Reactor kinetic parameters}
Most of the neutrons produced in fission are prompt ($>99$\%). But less than
1\% of neutrons are later emitted by fission products that are called the
delayed neutron precursors (DNP). The term ``delayed" means, that the neutron
is emitted due to $\beta$-decay with half-lives in the range from few
milliseconds up to 1 minute. Even though, the number of delayed neutrons per
fission neutron is quite small ($<1$\% for most fissile isotopes), they play
an essential role in the nuclear reactor control. Delayed neutrons presence
changes the dynamic time response of a reactor to reactivity change from
$10^{-7}$ s to 10 s, making it controllable by reactivity control system such
as control rods. In nuclear library JEFF-3.1.2, delayed nuclear precursors are
divided into 8 groups, each with different characteristic half-life,
$\lambda_i$. The delayed neutron fraction, $\beta_i$, is defined as the
fraction of all fission neutrons that appears as delayed neutrons in the
$i^{th}$ group.
It is crucial to study kinetic parameter dynamics because the fuel salt
composition changes with time and new actinides appear in the fuel, which
alters the emission of delayed neutrons. Figures~\ref{fig:beta-eol}
and \ref{fig:lamda-eol} show precursor-group-wise delayed neutron fraction
(DNF, $\beta_i$) and decay constant ($\lambda_i$) evolution during 25 years of
\gls{TAP} \gls{MSR} operation. The effective delayed neutron fraction
($\beta_{eff}$) in the \gls{TAP} core decreased dramatically from
$7.245\times10^{-3}$($\pm0.5$\%) at the \gls{BOL} to
$4.564\times10^{-3}$($\pm0.6$\%) at the \gls{EOL} ($-37$\%).
\begin{figure}[htbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/saf_par/betas_evo.png}
\vspace{-5mm}
\caption{Evolution of the precursor-group-wise delayed neutron fraction
($\beta_i$) as a function of time for the \gls{TAP} \gls{MSR}. The
uncertainty $\pm\sigma$
region is shaded.}
\label{fig:beta-eol}
\end{figure}
\begin{figure}[hbbp!] % replace 't' with 'b' to
\centering
\includegraphics[width=\textwidth]{ch4/saf_par/lambdas_evo.png}
\vspace{-5mm}
\caption{Evolution of the precursor-group-wise decay constant
($\lambda_i$) as a function of time for the \gls{TAP} \gls{MSR}. The
uncertainty $\pm\sigma$ region is shaded.}
\label{fig:lamda-eol}
\end{figure}
Similarly, the effective precursor decay constant ($\lambda_{eff}$) slipped
slightly from $0.481$ $s^{-1}$($\pm0.8$\%) to $0.468$ $s^{-1}$($\pm1.1$\%)
during 25 years of operation. During operation, the concentration of $^{235}$U
decreases, and the concentration of fissile plutonium isotopes (e.g.,
$^{239}$Pu)
increases. Notably, $^{239}$Pu emits about 2.5 times fewer delayed neutrons
than $^{235}$U; delayed neutron yields are 0.00664 and 0.01650 for the
$^{239}$Pu and $^{235}$U, respectively. Thus, as fuel salt burnup increases,
delayed neutron emission is controlled by plutonium isotopes (e.g., $^{239}$Pu
and $^{241}$Pu) and decreases with time. All decay constants show a slight
decrease toward the \gls{EOL} due to the reactor spectrum hardening. This 37\%
decline in the effective delayed neutron fraction and 3\% decline in the
effective precursor decay constant must be taken into account in the \gls{TAP}
design accident analysis and safety justification.
\newpage
\section{Concluding remarks}
This chapter demonstrated SaltProc v1.0 capabilities for lifetime-long fuel
salt depletion simulations applied to the \gls{TAP} \gls{MSR}.
Section~\ref{sec:tap_design_sum} summarized the \gls{TAP} \gls{MSR} core and
fuel salt reprocessing system details that inform the SaltProc model
(Section~\ref{sec:tap_model}).
%I validated SaltProc v1.0 capabilities for lifetime-long (25 years) depletion
%calculation with assumed gas ideal removal efficiency against
%SCALE/ChemTriton
%simulation by Betzler \emph{et al.} \cite{betzler_assessment_2017-1}.
%Additionally, this chapter presented isotopic composition evolution for
%various, non-ideal gas removal efficiency (e.g, $<100$\% of the target
%isotope
%is removed).
Section~\ref{sec:ben-valid} presented lifetime-long depletion simulations with
SaltProc v1.0. The 25-year simulation assumed ideal removal efficiency
(e.g., 100\% of target neutron poison is being removed at the end of each
depletion step). This validation effort demonstrated good agreement with a
reference ORNL report \cite{betzler_assessment_2017-1}. Full-core 3D
SaltProc/Serpent analysis showed that spectrum hardening over the first 13
years of operation produces a sufficient amount of fissile plutonium to
achieve the fuel salt burnup of 76.3 MWd/kgU after 22.5 years of operation.
SaltProc-calculated inventories of major heavy isotopes at the \gls{EOL} are
consistent with results in the literature. The difference in mass between
SaltProc and the reference was only 3\% and 4\% for fissile ($^{235}$U,
$^{239}$Pu, $^{241}$Pu) and non-fissile ($^{236}$U, $^{238}$U, $^{238}$Pu,
$^{240}$Pu, $^{242}$Pu) isotopes, respectively. Finally, the
SaltProc-calculated feed rate is 460.8 kg of UF$_4$ per year, which consistent
with 480 kg/y reported by Betzler \emph{et al.}
\cite{betzler_assessment_2017-1}
The time step refinement study in Section~\ref{sec:time-refinement} showed
that accurate uranium isotopic content predictions could be obtained with a
relatively long depletion time step (6- or 12-day). However, the significant
absolute difference in plutonium mass at the \gls{EOL} ($\approx10$ kg for a
6-day step) could be a safeguards issue, as this represents more than one
significant quantity (8 kg) over the reactor lifetime. Overall, to get
accurate plutonium isotopic content without raising proliferation issues, a
3-day depletion time step must be used.
Section~\ref{sec:long-term-real} of this chapter demonstrated SaltProc v1.0
for a 25-year depletion simulation with a realistic, physics-based noble gas
removal efficiency. When identifying a reasonable mathematical model
for realistic gas removal efficiency ($\epsilon$), the liquid phase mass
transfer coefficient ($K_L$) demonstrated a strong correlation with
$\epsilon$. Thus, SaltProc simulations using different $K_L$ in validity range
from 0.0847 to 8.4667 $mm/s$ (corresponding $^{135}$Xe removal efficiency
$\epsilon\in [0.031,0.915]$) showed that the larger liquid phase mass transfer
coefficient and corresponding higher noble gas extraction efficiency provided
significant neutronics benefit, better fuel utilization, and longer time
between shutdowns for moderator rod reconfiguration. Notably, the larger mass
transfer coefficient also provides a slightly more thermal neutron spectrum
because poisonous \glspl{FP} ($^{135}$Xe) absorb fewer thermal neutrons. In
the following chapters, the results of these realistic depletion simulations
will be used for short-term transient simulations and safety parameter
analysis.
Finally, this chapter demonstrated safety and operational parameter evolution
during 25 years of the \gls{TAP} \gls{MSR} operation. In general, the safety
of the reactor worsens with time due to actinides and \glspl{FP} accumulating
in the fuel salt. Shifting neutron spectrum from epithermal to thermal by
periodically adding more moderator rods also has a negative influence on
crucial safety an operational characteristics. These observations must be
taken into account in the \gls{TAP} \gls{MSR} designing, accident analysis,
and safety justification. |
function isgn = i4row_compare ( m, n, a, i, j )
%*****************************************************************************80
%
%% I4ROW_COMPARE compares two rows of an I4ROW.
%
% Example:
%
% Input:
%
% M = 3, N = 4, I = 2, J = 3
%
% A = (
% 1 2 3 4
% 5 6 7 8
% 9 10 11 12 )
%
% Output:
%
% ISGN = -1
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 23 April 2005
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer M, N, the number of rows and columns.
%
% Input, integer A(M,N), an array of M rows of vectors of length N.
%
% Input, integer I, J, the rows to be compared.
% I and J must be between 1 and M.
%
% Output, integer ISGN, the results of the comparison:
% -1, row I < row J,
% 0, row I = row J,
% +1, row J < row I.
%
%
% Check that I and J are legal.
%
if ( i < 1 )
fprintf ( 1, '\n' );
fprintf ( 1, 'I4ROW_COMPARE - Fatal error!\n' );
fprintf ( 1, ' Row index I is less than 1.\n' );
fprintf ( 1, ' I = %d\n', i );
error ( 'I4ROW_COMPARE - Fatal error!' );
elseif ( m < i )
fprintf ( 1, '\n' );
fprintf ( 1, 'I4ROW_COMPARE - Fatal error!\n' );
fprintf ( 1, ' Row index I is out of bounds.\n' );
fprintf ( 1, ' I = %d\n', i );
fprintf ( 1, ' Maximum legal value is M = %d\n', m );
error ( 'I4ROW_COMPARE - Fatal error!' );
end
if ( j < 1 )
fprintf ( 1, '\n' );
fprintf ( 1, 'I4ROW_COMPARE - Fatal error!\n' );
fprintf ( 1, ' Row index J is less than 1.\n' );
fprintf ( 1, ' J = %d\n', j );
error ( 'I4ROW_COMPARE - Fatal error!' );
elseif ( m < j )
fprintf ( 1, '\n' );
fprintf ( 1, 'I4ROW_COMPARE - Fatal error!\n' );
fprintf ( 1, ' Row index J is out of bounds.\n' );
fprintf ( 1, ' J = %d\n', j );
fprintf ( 1, ' Maximum legal value is M = %d\n', m );
error ( 'I4ROW_COMPARE - Fatal error!' );
end
isgn = 0;
if ( i == j )
return
end
k = 1;
while ( k <= n )
if ( a(i,k) < a(j,k) )
isgn = -1;
return
elseif ( a(j,k) < a(i,k) )
isgn = +1;
return
end
k = k + 1;
end
return
end
|
[STATEMENT]
lemma bigtheta_powr_0 [landau_simp]:
"eventually (\<lambda>x. (f x :: real) \<noteq> 0) F \<Longrightarrow> (\<lambda>x. f x powr 0) \<in> \<Theta>[F](\<lambda>_. 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F x in F. f x \<noteq> 0 \<Longrightarrow> (\<lambda>x. f x powr 0) \<in> \<Theta>[F](\<lambda>_. 1)
[PROOF STEP]
by (intro bigthetaI_cong) (auto elim!: eventually_mono) |
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon.
20090514 20:32:04 nbsp Welcome to the Wiki. The same account you use here works on wiki:sacramento:Front Page Sacramento Wiki. Perhaps the Salon Arata page belongs there? Alternatively perhaps just using an interwiki link ({{{wiki:sacramento:Page Name Display Name}}}) would be appropriate? Users/JasonAller
|
Formal statement is: lemma pderiv_1 [simp]: "pderiv 1 = 0" Informal statement is: The derivative of the constant polynomial $1$ is $0$. |
Formal statement is: lemma content_dvd_coeff [simp]: "content p dvd coeff p n" Informal statement is: The content of a polynomial divides its coefficient. |
[PlusNatSemi] Semigroup Nat where
(<+>) x y = x + y
[MultNatSemi] Semigroup Nat where
(<+>) x y = x * y
[PlusNatMonoid] Monoid Nat using PlusNatSemi where
neutral = 0
[MultNatMonoid] Monoid Nat using MultNatSemi where
neutral = 1
test : Monoid a => a -> a
test x = x <+> x <+> neutral
[CmpLess] Ord Int where
compare x y = if (x == y) then EQ else
if (boolOp prim__sltInt x y) then GT else LT
foo : Int -> Int -> Bool
foo x y = x < y
using implementation CmpLess
foo' : Int -> Int -> Bool
foo' x y = x < y
using implementation PlusNatMonoid
main : IO ()
main = do printLn (test (the Nat 6))
printLn (test @{MultNatMonoid} 6)
printLn (foo 3 4)
printLn (foo' 3 4)
|
# .libPaths() #this command gives the R library
func1 <- function(){
# Create vector objects.
city <- c("Tampa","Seattle","Hartford","Denver")
state <- c("FL","WA","CT","CO")
zipcode <- c(33602,98104,06161,80294)
# Combine above three vectors into one data frame.
addresses <- cbind(city,state,zipcode)
# Print a header.
cat("# # # # The First data frame\n")
# Print the data frame.
print(addresses)
# Create another data frame with similar columns
new.address <- data.frame(
city = c("Lowry","Charlotte"),
state = c("CO","FL"),
zipcode = c("80230","33949"),
stringsAsFactors = FALSE
)
# Print a header.
cat("# # # The Second data frame\n")
# Print the data frame.
print(new.address)
# Combine rows form both the data frames.
all.addresses <- rbind(addresses,new.address)
# Print a header.
cat("# # # The combined data frame\n")
# Print the result.
print(all.addresses)
#We can merge two data frames by using the merge() function.
#The data frames must have same column names on which the merging happens.
library(MASS)
merged.Pima <- merge(x = Pima.te, y = Pima.tr,
by.x = c("bp", "bmi"),
by.y = c("bp", "bmi")
)
print(merged.Pima)
nrow(merged.Pima)
}
# func1()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# reshaping the data
func2 <- function(){
library(MASS)
print(ships)
library(reshape2)
#Now we melt the data to organize it, converting all columns other than type and year into multiple rows
molten.ships <- melt(ships, id = c("type","year"))
# print(molten.ships)
#Use ‘acast’ or ‘dcast’ depending on whether you want vector/matrix/array output or data frame output.
#We can cast the molten data into a new form where the aggregate of each type of ship for each year is created.
#It is done using the cast() function.(for each id=(type,year) the aggregate of other columns is calculated)
recasted.ship <- dcast(molten.ships, type+year~variable,sum)
print(recasted.ship)
}
func2()
|
# # An equation of second order in one dimmension
#
# Consider the following problem. Given \\(f \in C([0, 1])\\), find a function \\(u\\) satisfying
# -u'' + u = f in (0, 1)
# u(0) = u(1) = 0
using SymFEL
using SymPy
using LinearAlgebra
using SparseArrays
using PyPlot
close("all")
# discretization parameters
N = 101; # number of nodes
dx = 1 / (N - 1); # discretization step
nodes = range(0, stop=1, length=N);
bound_nodes = [1, N];
# exact solution
u_exact = nodes .* sin.(pi * nodes);
# right hand
f = (1 + pi^2) * nodes .* sin.(pi * nodes) - 2 * pi * cos.(pi * nodes);
# elementary matrices
elem_K = SymFEL.get_lagrange_em(1, 1, 1);
elem_M = SymFEL.get_lagrange_em(1, 0, 0);
elem_K_dx = convert(Matrix{Float64}, elem_K.subs(h, dx));
elem_M_dx = convert(Matrix{Float64}, elem_M.subs(h, dx));
K = SymFEL.assemble_1d_FE_matrix(elem_K_dx, N, intNodes1=0, intNodes2=0, dof1=1, dof2=1);
M = SymFEL.assemble_1d_FE_matrix(elem_M_dx, N, intNodes1=0, intNodes2=0, dof1=1, dof2=1);
F = M * f;
# boundary conditions
tgv = 1e100
A = K + M
A[bound_nodes, bound_nodes] += tgv*sparse(Matrix{Float64}(I, 2, 2));
F[bound_nodes] = zeros(2);
u = A \ F;
plot(nodes, u_exact);
plot(nodes, u);
legend(["Exact solution", "Approximate solution"]);
err = u - u_exact;
println("L2 error = ", sqrt(err' * (M * err)))
println("H1 error = ", sqrt(err' * (K * err)))
# error convergence
NV = 10 * 2 .^ (0:10)
EL2 = zeros(11)
EH1 = zeros(11)
i = 1
for N = NV
global i
local dx = 1 / (N - 1); # discretization step
local nodes = range(0, stop=1, length=N);
local bound_nodes = [1, N];
# exact solution
local u_exact = nodes .* sin.(pi * nodes);
# right hand
local f = (1 + pi^2) * nodes .* sin.(pi * nodes) - 2 * pi * cos.(pi * nodes);
local elem_K_dx = convert(Matrix{Float64}, elem_K.subs(h, dx));
local elem_M_dx = convert(Matrix{Float64}, elem_M.subs(h, dx));
local K = SymFEL.assemble_1d_FE_matrix(elem_K_dx, N, intNodes1=0, intNodes2=0, dof1=1, dof2=1);
local M = SymFEL.assemble_1d_FE_matrix(elem_M_dx, N, intNodes1=0, intNodes2=0, dof1=1, dof2=1);
local F = M * f;
# boundary conditions
local tgv = 1e100
local A = K + M
A[bound_nodes, bound_nodes] += tgv*sparse(Matrix{Float64}(I, 2, 2));
F[bound_nodes] = zeros(2);
local u = A \ F;
local err = u - u_exact;
EL2[i] = sqrt(err' * M * err)
EH1[i] = sqrt(err' * K * err)
i += 1
end
figure()
loglog(1 ./ NV, EL2, "d")
loglog(1 ./ NV, EH1)
loglog(1 ./ NV, (1 ./ NV).^2)
xlabel(L"$\Delta x$")
legend(["L2 Error", "H1 Error", L"$\Delta x^2$"])
show()
|
[STATEMENT]
lemma set_spmf_assert_spmf_eq_empty [simp]: "set_spmf (assert_spmf b) = {} \<longleftrightarrow> \<not> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (set_spmf (assert_spmf b) = {}) = (\<not> b)
[PROOF STEP]
by(cases b) simp_all |
Module option.
(* Define the option type, which will be used in the list hd! *)
Inductive option (X:Type) : Type :=
| Some: X -> option X
| None : option X.
End option.
|
[STATEMENT]
lemma fps_divide_shift_denom:
fixes f g :: "'a::{inverse,comm_monoid_add,uminus,mult_zero} fps"
assumes "n \<le> subdegree g" "subdegree g \<le> subdegree f"
shows "f / fps_shift n g = Abs_fps (\<lambda>k. if k<n then 0 else (f/g) $ (k-n))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f / fps_shift n g = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n))
[PROOF STEP]
proof (intro fps_ext)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>na. (f / fps_shift n g) $ na = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ na
[PROOF STEP]
fix k
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>na. (f / fps_shift n g) $ na = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ na
[PROOF STEP]
from assms(1)
[PROOF STATE]
proof (chain)
picking this:
n \<le> subdegree g
[PROOF STEP]
have LHS:
"(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))"
[PROOF STATE]
proof (prove)
using this:
n \<le> subdegree g
goal (1 subgoal):
1. (f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
[PROOF STEP]
using fps_unit_factor_shift[of n g]
[PROOF STATE]
proof (prove)
using this:
n \<le> subdegree g
n \<le> subdegree g \<Longrightarrow> unit_factor (fps_shift n g) = unit_factor g
goal (1 subgoal):
1. (f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
[PROOF STEP]
by (simp add: fps_divide_def)
[PROOF STATE]
proof (state)
this:
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
goal (1 subgoal):
1. \<And>na. (f / fps_shift n g) $ na = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ na
[PROOF STEP]
show "(f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k<n then 0 else (f/g) $ (k-n)) $ k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
proof (cases "k<n")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
2. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
k < n
goal (2 subgoals):
1. k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
2. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
with assms LHS
[PROOF STATE]
proof (chain)
picking this:
n \<le> subdegree g
subdegree g \<le> subdegree f
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
k < n
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
n \<le> subdegree g
subdegree g \<le> subdegree f
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
k < n
goal (1 subgoal):
1. (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
using fps_mult_nth_eq0[of _ f]
[PROOF STATE]
proof (prove)
using this:
n \<le> subdegree g
subdegree g \<le> subdegree f
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
k < n
?n < subdegree f + subdegree ?g \<Longrightarrow> (f * ?g) $ ?n = (0::'a)
goal (1 subgoal):
1. (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
goal (1 subgoal):
1. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> k < n
goal (1 subgoal):
1. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
hence "(f/g) $ (k-n) = (f * inverse (unit_factor g)) $ ((k-n) + subdegree g)"
[PROOF STATE]
proof (prove)
using this:
\<not> k < n
goal (1 subgoal):
1. (f / g) $ (k - n) = (f * inverse (unit_factor g)) $ (k - n + subdegree g)
[PROOF STEP]
by (simp add: fps_divide_def)
[PROOF STATE]
proof (state)
this:
(f / g) $ (k - n) = (f * inverse (unit_factor g)) $ (k - n + subdegree g)
goal (1 subgoal):
1. \<not> k < n \<Longrightarrow> (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
with False LHS assms(1)
[PROOF STATE]
proof (chain)
picking this:
\<not> k < n
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
n \<le> subdegree g
(f / g) $ (k - n) = (f * inverse (unit_factor g)) $ (k - n + subdegree g)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<not> k < n
(f / fps_shift n g) $ k = (f * inverse (unit_factor g)) $ (k + (subdegree g - n))
n \<le> subdegree g
(f / g) $ (k - n) = (f * inverse (unit_factor g)) $ (k - n + subdegree g)
goal (1 subgoal):
1. (f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(f / fps_shift n g) $ k = Abs_fps (\<lambda>k. if k < n then 0::'a else (f / g) $ (k - n)) $ k
goal:
No subgoals!
[PROOF STEP]
qed |
If two topological spaces are homotopy equivalent, then they are contractible if and only if they are contractible. |
#ifndef _MATH_UTIL_EULER_H_
#define _MATH_UTIL_EULER_H_
/* space group numbers for structures known by this program */
/* these are from the International Tables, allowed values are [1-230] */
/*
* #define FCC 225
* #define BCC 229
* #define DIA 227
* #define SIMPLE_CUBIC 221
* #define HEX 194
* #define SAPPHIRE 167
* #define TRICLINIC 1
*/
#ifndef CHECK_FREE
#define CHECK_FREE(A) { if(A) free(A); (A)=NULL;}
#endif
#ifdef _MSC_VER /* identifies this as a Microsoft compiler */
#include <gsl/gsl_math.h> /* added RX2011 for NaN handling */
#define round(A) ceil(A-0.5) /* added RX2011 for round() function */
#define NAN gsl_nan() /* added RX2011 */
#define isNAN(A) gsl_isnan(A) /* added RX2011 */
#endif
#ifndef NAN /* probably only needed for pcs */
#define NAN nan("") /* probably only needed for cygwin on pc */
#endif
#ifndef isNAN /* a good test for NAN */
#define isNAN(A) ( (A) != (A) )
#endif
#define VECTOR_COPY3(A,B) {A[0]=B[0]; A[1]=B[1]; A[2]=B[2];}
void DeletePoints(size_t len, void *ptr, size_t pntLen, size_t numDel);
void EulerMatrix(double alpha,double beta,double gamma,double M_Euler[3][3]);
void rot2EulerAngles(double A[3][3], double *alpha, double *beta, double *gamma);
void MatrixRz(double Rz[3][3],double angle);
void MatrixRy(double Ry[3][3],double angle);
/* void lowestOrderHKL(int hkl[3]); */
/* void lowestAllowedHKL(int hkl[3], int structure); */
/* long allowedHKL(long h, long k, long l, int structure); */
int gcf(int n1, int n2, int n3);
double normalize3(double a[3]);
void cross(double a[3], double b[3], double c[3]);
void vector3cons(double a[3], double x);
double dot3(double a[3],double b[3]);
double determinant33(double a[3][3]);
void MatrixMultiply31(double a[3][3], double v[3], double c[3]);
void MatrixMultiply33(double a[3][3], double b[3][3], double c[3][3]);
void MatrixTranspose33(double A[3][3]); /* transpose the 3x3 matrix A */
void MatrixCopy33(double dest[3][3], double source[3][3]);
double diff3(double a[3], double b[3]);
double matsDelta(double a[3][3], double b[3][3]);
char *num2sexigesmal(char str[40], double seconds, long places);
#endif
|
With the advent of the computer age , typographers began deprecating double spacing , even in monospaced text . In 1989 , Desktop Publishing by Design stated that " typesetting requires only one space after periods , question marks , exclamation points , and colons " , and identified single sentence spacing as a typographic convention . Stop Stealing Sheep & Find Out How Type Works ( 1993 ) and Designing with Type : The Essential Guide to Typography ( 2006 ) both indicate that uniform spacing should be used between words , including between sentences .
|
# Mathematics for Machine Learning: Linear Algebra
## Week3
## Module 3:
### Matrices, vectors, and solving simultaneous equation problems
#### Motivations for linear algebra
```python
from sympy import solve, Poly, Eq, Function, exp
from sympy.abc import x, y, z, a, b
```
$2a + 3b = 8$
$10a +1b = 13$
```python
solve((2 * a + 3* b- 8, 10 * a + b - 13), a, b)
```
{a: 31/28, b: 27/14}
$\begin{pmatrix} 2 & 3 \\ 10 & 1 \end{pmatrix} \begin{bmatrix} a \\ b \end{bmatrix} = \begin{bmatrix} 8 \\ 13 \end{bmatrix}$
## How matrices transform space
```python
import numpy as np
m = np.array([[2 ,3],[10,1]])
v = np.array([[31/28], [27/14]])
```
```python
m@v
```
array([[ 8.],
[13.]])
```python
v.T
```
array([1.10714286, 1.92857143])
```python
m = np.array([[7 ,-6],[12,8]])
```
```python
v1 = np.array([7, 12]); v2 = np.array([-6,8])
```
```python
5*v1 + 6 * v2
```
array([ -1, 108])
```python
m @ [5,6]
```
array([ -1, 108])
# Type of Matrix Transformation
```python
I = np.eye(2);I # Identity matrix
```
array([[1., 0.],
[0., 1.]])
```python
Scale = np.diag([3,2]); Scale # scale matrix
```
array([[3, 0],
[0, 2]])
```python
Scale2 = np.diag([-1,2]); Scale2 # scale matrix
```
array([[-1, 0],
[ 0, 2]])
### rotation matrix
```python
theta = np.pi/6
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]]);R
```
array([[ 0.8660254, -0.5 ],
[ 0.5 , 0.8660254]])
### Composition or combination of matrix transformations
```python
A1 = R
```
```python
A2 = np.array([[-1, 0.0], [0.0, 1]]) #vertical mirror
```
```python
A21=A2@A1
np.where(abs(A21)>1e-15,A21,0).round()
```
array([[ 0., -1.],
[-1., 0.]])
```python
A12 = A1@A2
np.where(abs(A12)>1e-15,A12,0).round()
```
array([[0., 1.],
[1., 0.]])
#### Matrix multipication is not commutative
#### but Matrix multipication is associative
# Using matrices to make transformations
### Practice Quiz
```python
A = np.array([[1/2, -1], [0, 3/4]]); A
```
array([[ 0.5 , -1. ],
[ 0. , 0.75]])
```python
A@[-2, 4]
```
array([-5., 3.])
```python
M= np.array([[1, 0], [0, 8]])@ np.array([[1, 0], [-0.5, 1]]);M
```
array([[ 1., 0.],
[-4., 8.]])
# Solving the apples and bananas problem: Gaussian elimination
```python
M = np.array([[1,1,3],[1, 2, 4],[1, 1, 2]]); M
```
array([[1, 1, 3],
[1, 2, 4],
[1, 1, 2]])
```python
from scipy.linalg import lu
pl, u = lu(M, permute_l=True); u
```
array([[ 1., 1., 3.],
[ 0., 1., 1.],
[ 0., 0., -1.]])
```python
pl@u
```
array([[1., 1., 3.],
[1., 2., 4.],
[1., 1., 2.]])
```python
b = np.array([15, 21, 13])
x = np.linalg.solve(M, b);x
```
array([5., 4., 2.])
```python
I1 = np.array([1, 0, 0])
b1 = np.linalg.solve(M, I1);b1
```
array([ 0., -2., 1.])
```python
I2 = np.array([0, 1, 0])
b2 = np.linalg.solve(M, I2);b2
```
array([-1., 1., -0.])
```python
I3 = np.array([0, 0, 1])
b3 = np.linalg.solve(M, I3);b3
```
array([ 2., 1., -1.])
```python
Minv = np.array([b1,b2,b3]).T;Minv
```
array([[ 0., -1., 2.],
[-2., 1., 1.],
[ 1., -0., -1.]])
```python
M@Minv
```
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
```python
np.linalg.inv(M) == Minv
```
array([[ True, True, True],
[ True, True, True],
[ True, True, True]])
### Practice Quiz
```python
M = np.array([[4,6,2],[3, 4, 1],[2, 8, 13]]); M
```
array([[ 4, 6, 2],
[ 3, 4, 1],
[ 2, 8, 13]])
```python
b = np.array([9, 7, 2])
x = np.linalg.solve(M, b);x.round(1)
```
array([ 3. , -0.5, -0. ])
5)
```python
A = np.array([[1, 1, 1],
[3, 2, 1],
[2, 1, 2]])
s = np.array([15, 28, 23])
p = np.linalg.solve(A, s);p.round(1)
```
array([3., 7., 5.])
```python
np.linalg.inv(A).round(2)
```
array([[-1.5, 0.5, 0.5],
[ 2. , 0. , -1. ],
[ 0.5, -0.5, 0.5]])
# LAB
# Identifying special matrices
## Instructions
In this assignment, you shall write a function that will test if a 4×4 matrix is singular, i.e. to determine if an inverse exists, before calculating it.
You shall use the method of converting a matrix to echelon form, and testing if this fails by leaving zeros that can’t be removed on the leading diagonal.
Don't worry if you've not coded before, a framework for the function has already been written.
Look through the code, and you'll be instructed where to make changes.
We'll do the first two rows, and you can use this as a guide to do the last two.
### Matrices in Python
In the *numpy* package in Python, matrices are indexed using zero for the top-most column and left-most row.
I.e., the matrix structure looks like this:
```python
A[0, 0] A[0, 1] A[0, 2] A[0, 3]
A[1, 0] A[1, 1] A[1, 2] A[1, 3]
A[2, 0] A[2, 1] A[2, 2] A[2, 3]
A[3, 0] A[3, 1] A[3, 2] A[3, 3]
```
You can access the value of each element individually using,
```python
A[n, m]
```
which will give the n'th row and m'th column (starting with zero).
You can also access a whole row at a time using,
```python
A[n]
```
Which you will see will be useful when calculating linear combinations of rows.
A final note - Python is sensitive to indentation.
All the code you should complete will be at the same level of indentation as the instruction comment.
### How to submit
Edit the code in the cell below to complete the assignment.
Once you are finished and happy with it, press the *Submit Assignment* button at the top of this notebook.
Please don't change any of the function names, as these will be checked by the grading script.
If you have further questions about submissions or programming assignments, here is a [list](https://www.coursera.org/learn/linear-algebra-machine-learning/discussions/weeks/1/threads/jB4klkn5EeibtBIQyzFmQg) of Q&A. You can also raise an issue on the discussion forum. Good luck!
```python
# GRADED FUNCTION
import numpy as np
# Our function will go through the matrix replacing each row in order turning it into echelon form.
# If at any point it fails because it can't put a 1 in the leading diagonal,
# we will return the value True, otherwise, we will return False.
# There is no need to edit this function.
def isSingular(A) :
B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.
try:
fixRowZero(B)
fixRowOne(B)
fixRowTwo(B)
fixRowThree(B)
except MatrixIsSingular:
return True
return False
# This next line defines our error flag. For when things go wrong if the matrix is singular.
# There is no need to edit this line.
class MatrixIsSingular(Exception): pass
# For Row Zero, all we require is the first element is equal to 1.
# We'll divide the row by the value of A[0, 0].
# This will get us in trouble though if A[0, 0] equals 0, so first we'll test for that,
# and if this is true, we'll add one of the lower rows to the first one before the division.
# We'll repeat the test going down each lower row until we can do the division.
# There is no need to edit this function.
def fixRowZero(A) :
if A[0,0] == 0 :
A[0] = A[0] + A[1]
if A[0,0] == 0 :
A[0] = A[0] + A[2]
if A[0,0] == 0 :
A[0] = A[0] + A[3]
if A[0,0] == 0 :
raise MatrixIsSingular()
A[0] = A[0] / A[0,0]
return A
# First we'll set the sub-diagonal elements to zero, i.e. A[1,0].
# Next we want the diagonal element to be equal to one.
# We'll divide the row by the value of A[1, 1].
# Again, we need to test if this is zero.
# If so, we'll add a lower row and repeat setting the sub-diagonal elements to zero.
# There is no need to edit this function.
def fixRowOne(A) :
A[1] = A[1] - A[1,0] * A[0]
if A[1,1] == 0 :
A[1] = A[1] + A[2]
A[1] = A[1] - A[1,0] * A[0]
if A[1,1] == 0 :
A[1] = A[1] + A[3]
A[1] = A[1] - A[1,0] * A[0]
if A[1,1] == 0 :
raise MatrixIsSingular()
A[1] = A[1] / A[1,1]
return A
# This is the first function that you should complete.
# Follow the instructions inside the function at each comment.
def fixRowTwo(A) :
# Insert code below to set the sub-diagonal elements of row two to zero (there are two of them).
A[2] = A[2] - A[2,0] * A[0]
A[2] = A[2] - A[2,1] * A[1]
# Next we'll test that the diagonal element is not zero.
if A[2,2] == 0 :
# Insert code below that adds a lower row to row 2.
A[2] = A[2] + A[3]
# Now repeat your code which sets the sub-diagonal elements to zero.
A[2] = A[2] - A[2,0] * A[0]
A[2] = A[2] - A[2,1] * A[1]
if A[2,2] == 0 :
raise MatrixIsSingular()
# Finally set the diagonal element to one by dividing the whole row by that element.
A[2] = A[2] / A[2,2]
return A
# You should also complete this function
# Follow the instructions inside the function at each comment.
def fixRowThree(A) :
# Insert code below to set the sub-diagonal elements of row three to zero.
A[3] = A[3] - A[3,0] * A[0]
A[3] = A[3] - A[3,1] * A[1]
A[3] = A[3] - A[3,2] * A[2]
# Complete the if statement to test if the diagonal element is zero.
if A[3,3] == 0 :
raise MatrixIsSingular()
# Transform the row to set the diagonal element to one.
A[3] = A[3] / A[3,3]
return A
```
## Test your code before submission
To test the code you've written above, run the cell (select the cell above, then press the play button [ ▶| ] or press shift-enter).
You can then use the code below to test out your function.
You don't need to submit this cell; you can edit and run it as much as you like.
Try out your code on tricky test cases!
```python
A = np.array([
[2, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 4],
[0, 0, 5, 5]
], dtype=np.float_)
isSingular(A)
```
True
```python
A = np.array([
[0, 7, -5, 3],
[2, 8, 0, 4],
[3, 12, 0, 5],
[1, 3, 1, 3]
], dtype=np.float_)
fixRowZero(A)
```
array([[ 1. , 7.5, -2.5, 3.5],
[ 2. , 8. , 0. , 4. ],
[ 3. , 12. , 0. , 5. ],
[ 1. , 3. , 1. , 3. ]])
```python
fixRowOne(A)
```
array([[ 1. , 7.5 , -2.5 , 3.5 ],
[-0. , 1. , -0.71428571, 0.42857143],
[ 3. , 12. , 0. , 5. ],
[ 1. , 3. , 1. , 3. ]])
```python
fixRowTwo(A)
```
array([[ 1. , 7.5 , -2.5 , 3.5 ],
[-0. , 1. , -0.71428571, 0.42857143],
[ 0. , 0. , 1. , 1.5 ],
[ 1. , 3. , 1. , 3. ]])
```python
fixRowThree(A)
```
array([[ 1. , 7.5 , -2.5 , 3.5 ],
[-0. , 1. , -0.71428571, 0.42857143],
[ 0. , 0. , 1. , 1.5 ],
[ 0. , 0. , 0. , 1. ]])
```python
```
|
#include <boost/spirit/home/karma/format.hpp>
|
function [r,normr,nre,s] = reorth(Q,r,normr,index,alpha,method)
%REORTH Reorthogonalize a vector using iterated Gram-Schmidt
%
% [R_NEW,NORMR_NEW,NRE] = reorth(Q,R,NORMR,INDEX,ALPHA,METHOD)
% reorthogonalizes R against the subset of columns of Q given by INDEX.
% If INDEX==[] then R is reorthogonalized all columns of Q.
% If the result R_NEW has a small norm, i.e. if norm(R_NEW) < ALPHA*NORMR,
% then a second reorthogonalization is performed. If the norm of R_NEW
% is once more decreased by more than a factor of ALPHA then R is
% numerically in span(Q(:,INDEX)) and a zero-vector is returned for R_NEW.
%
% If method==0 then iterated modified Gram-Schmidt is used.
% If method==1 then iterated classical Gram-Schmidt is used.
%
% The default value for ALPHA is 0.5.
% NRE is the number of reorthogonalizations performed (1 or 2).
% References:
% Aake Bjorck, "Numerical Methods for Least Squares Problems",
% SIAM, Philadelphia, 1996, pp. 68-69.
%
% J.~W. Daniel, W.~B. Gragg, L. Kaufman and G.~W. Stewart,
% ``Reorthogonalization and Stable Algorithms Updating the
% Gram-Schmidt QR Factorization'', Math. Comp., 30 (1976), no.
% 136, pp. 772-795.
%
% B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
% Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
% Rasmus Munk Larsen, DAIMI, 1998.
% Check input arguments.
% warning('PROPACK:NotUsingMex','Using slow matlab code for reorth.')
if nargin<2
error('Not enough input arguments.')
end
[n k1] = size(Q);
if nargin<3 | isempty(normr)
% normr = norm(r);
normr = sqrt(r'*r);
end
if nargin<4 | isempty(index)
k=k1;
index = [1:k]';
simple = 1;
else
k = length(index);
if k==k1 & index(:)==[1:k]'
simple = 1;
else
simple = 0;
end
end
if nargin<5 | isempty(alpha)
alpha=0.5; % This choice garanties that
% || Q^T*r_new - e_{k+1} ||_2 <= 2*eps*||r_new||_2,
% cf. Kahans ``twice is enough'' statement proved in
% Parletts book.
end
if nargin<6 | isempty(method)
method = 0;
end
if k==0 | n==0
return
end
if nargout>3
s = zeros(k,1);
end
normr_old = 0;
nre = 0;
while normr < alpha*normr_old | nre==0
if method==1
if simple
t = Q'*r;
r = r - Q*t;
else
t = Q(:,index)'*r;
r = r - Q(:,index)*t;
end
else
for i=index,
t = Q(:,i)'*r;
r = r - Q(:,i)*t;
end
end
if nargout>3
s = s + t;
end
normr_old = normr;
% normr = norm(r);
normr = sqrt(r'*r);
nre = nre + 1;
if nre > 4
% r is in span(Q) to full accuracy => accept r = 0 as the new vector.
r = zeros(n,1);
normr = 0;
return
end
end
|
module Issue384 where
postulate
D : (A : Set) → A → Set
data I : Set where
i : I
D′ : (A : Set) → A → I → Set
D′ A x i = D A x
postulate
Q : (A : Set) → A → Set
q : ∀ j A (x : A) → D′ A x j → Q A x
A : Set
x : A
d : D A x
P : (A : Set) → A → Set
p : P (Q _ _) (q _ _ _ d)
|
/* Copyright 2017-2018 PaGMO development team
This file is part of the PaGMO library.
The PaGMO library is free software; you can redistribute it and/or modify
it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free Software
Foundation; either version 3 of the License, or (at your option) any
later version.
or both in parallel, as here.
The PaGMO library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received copies of the GNU General Public License and the
GNU Lesser General Public License along with the PaGMO library. If not,
see https://www.gnu.org/licenses/. */
#ifndef PAGMO_IPOPT_HPP
#define PAGMO_IPOPT_HPP
#include <pagmo/config.hpp>
#if defined(PAGMO_WITH_IPOPT)
#include <algorithm>
#include <boost/any.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <exception>
#include <functional>
#include <initializer_list>
#include <iomanip>
#include <iterator>
#include <limits>
#include <map>
#include <stdexcept>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
// NOTE: on some Debian-derived distros, this definition
// is apparently necessary:
// https://github.com/casadi/casadi/issues/1010
#define HAVE_CSTDDEF
#include <IpIpoptApplication.hpp>
#include <IpIpoptCalculatedQuantities.hpp>
#include <IpIpoptData.hpp>
#include <IpReturnCodes.hpp>
#include <IpSmartPtr.hpp>
#include <IpTNLP.hpp>
#undef HAVE_CSTDDEF
#include <pagmo/algorithm.hpp>
#include <pagmo/algorithms/not_population_based.hpp>
#include <pagmo/exceptions.hpp>
#include <pagmo/io.hpp>
#include <pagmo/population.hpp>
#include <pagmo/problem.hpp>
#include <pagmo/serialization.hpp>
#include <pagmo/threading.hpp>
#include <pagmo/types.hpp>
#include <pagmo/utils/constrained.hpp>
namespace pagmo
{
namespace detail
{
// Usual trick with global read-only data useful to the Ipopt wrapper.
template <typename = void>
struct ipopt_data {
// A map to link a human-readable description to Ipopt return codes.
// NOTE: in C++11 hashing of enums might not be available. Provide our own.
struct res_hasher {
std::size_t operator()(Ipopt::ApplicationReturnStatus res) const noexcept
{
return std::hash<int>{}(static_cast<int>(res));
}
};
using result_map_t = std::unordered_map<Ipopt::ApplicationReturnStatus, std::string, res_hasher>;
static const result_map_t results;
};
#define PAGMO_DETAIL_IPOPT_RES_ENTRY(name) \
{ \
Ipopt::name, #name " (value = " + std::to_string(static_cast<int>(Ipopt::name)) + ")" \
}
// Static init.
template <typename T>
const typename ipopt_data<T>::result_map_t ipopt_data<T>::results
= {PAGMO_DETAIL_IPOPT_RES_ENTRY(Solve_Succeeded),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Solved_To_Acceptable_Level),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Infeasible_Problem_Detected),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Search_Direction_Becomes_Too_Small),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Diverging_Iterates),
PAGMO_DETAIL_IPOPT_RES_ENTRY(User_Requested_Stop),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Feasible_Point_Found),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Maximum_Iterations_Exceeded),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Restoration_Failed),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Error_In_Step_Computation),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Not_Enough_Degrees_Of_Freedom),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Invalid_Problem_Definition),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Invalid_Option),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Invalid_Number_Detected),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Unrecoverable_Exception),
PAGMO_DETAIL_IPOPT_RES_ENTRY(NonIpopt_Exception_Thrown),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Insufficient_Memory),
PAGMO_DETAIL_IPOPT_RES_ENTRY(Internal_Error)};
#undef PAGMO_DETAIL_IPOPT_RES_ENTRY
// The NLP implementation required by Ipopt's C++ interface.
struct ipopt_nlp final : Ipopt::TNLP {
// Single entry of the log (objevals, objval, n of unsatisfied const, constr. violation, feasibility).
using log_line_type = std::tuple<unsigned long, double, vector_double::size_type, double, bool>;
// The log.
using log_type = std::vector<log_line_type>;
// Some shortcuts from the Ipopt namespace.
using Index = Ipopt::Index;
using Number = Ipopt::Number;
static_assert(std::is_same<Number, double>::value, "");
using SolverReturn = Ipopt::SolverReturn;
using IpoptData = Ipopt::IpoptData;
using IpoptCalculatedQuantities = Ipopt::IpoptCalculatedQuantities;
// Ctor from problem, initial guess, verbosity.
ipopt_nlp(const problem &prob, vector_double start, unsigned verbosity)
: m_prob(prob), m_start(std::move(start)), m_verbosity(verbosity)
{
assert(m_start.size() == prob.get_nx());
// Check the problem is single-objective.
if (m_prob.get_nobj() > 1u) {
pagmo_throw(std::invalid_argument,
std::to_string(m_prob.get_nobj()) + " objectives were detected in the input problem named '"
+ m_prob.get_name()
+ "', but the ipopt algorithm can solve only single-objective problems");
}
// We need the gradient.
if (!m_prob.has_gradient()) {
pagmo_throw(std::invalid_argument, "the ipopt algorithm needs the gradient, but the problem named '"
+ m_prob.get_name() + "' does not provide it");
}
// Prepare the dv used for fitness computation.
m_dv.resize(m_start.size());
// This will contain the solution.
m_sol.resize(m_start.size());
// Final values of the constraints.
m_final_eq.resize(m_prob.get_nec());
m_final_ineq.resize(m_prob.get_nic());
// Conversion of the sparsity information to the format required by Ipopt.
// Gradients first.
{
// NOTE: our format for the gradient sparsity matches almost exactly Ipopt's. The only difference
// is that we also report the sparsity for the objective function's gradient, while Ipopt's jacobian
// contains only constraints' gradients. Thus, we will need to discard the the objfun's sparsity
// information and to decrease by one the row indices in the pattern (i.e., a first index of zero in
// a pattern element must refer to the first constraint).
// https://www.coin-or.org/Ipopt/documentation/node22.html
const auto sp = prob.gradient_sparsity();
// Determine where the gradients of the constraints start.
const auto it = std::lower_bound(sp.begin(), sp.end(), sparsity_pattern::value_type(1u, 0u));
// Transform it into the Ipopt format: convert to Index and decrease the first index of each pair by one.
std::transform(it, sp.end(), std::back_inserter(m_jac_sp), [](const sparsity_pattern::value_type &p) {
return std::make_pair(boost::numeric_cast<Index>(p.first - 1u), boost::numeric_cast<Index>(p.second));
});
if (m_prob.has_gradient_sparsity()) {
// Store the objfun gradient sparsity, if user-provided.
std::copy(sp.begin(), it, std::back_inserter(m_obj_g_sp));
}
}
// Hessians.
{
// NOTE: Ipopt requires a single sparsity pattern for the hessian of the lagrangian (that is,
// the pattern must be valid for objfun and all constraints), but we provide a separate sparsity pattern for
// objfun and every constraint. We will thus need to merge our sparsity patterns in a single sparsity
// pattern.
// https://www.coin-or.org/Ipopt/documentation/node22.html
sparsity_pattern merged_sp;
if (m_prob.has_hessians_sparsity()) {
// Store the original hessians sparsity only if it is user-provided.
m_h_sp = m_prob.hessians_sparsity();
for (const auto &sp : m_h_sp) {
// NOTE: we need to create a separate copy each time as std::set_union() requires distinct ranges.
const auto old_merged_sp(merged_sp);
merged_sp.clear();
std::set_union(old_merged_sp.begin(), old_merged_sp.end(), sp.begin(), sp.end(),
std::back_inserter(merged_sp));
}
} else {
// If the hessians sparsity is not user-provided, we don't need the merge operation:
// all patterns are dense and identical. Like this, we avoid using a huge amount of memory
// by calling hessians_sparsity().
merged_sp = detail::dense_hessian(m_prob.get_nx());
}
// Convert into Index pairs.
std::transform(merged_sp.begin(), merged_sp.end(), std::back_inserter(m_lag_sp),
[](const sparsity_pattern::value_type &p) {
return std::make_pair(boost::numeric_cast<Index>(p.first),
boost::numeric_cast<Index>(p.second));
});
}
}
// Default dtor.
~ipopt_nlp() = default;
// Delete everything else.
ipopt_nlp(const ipopt_nlp &) = delete;
ipopt_nlp(ipopt_nlp &&) = delete;
ipopt_nlp &operator=(const ipopt_nlp &) = delete;
ipopt_nlp &operator=(ipopt_nlp &&) = delete;
// Method to return some info about the nlp.
virtual bool get_nlp_info(Index &n, Index &m, Index &nnz_jac_g, Index &nnz_h_lag,
IndexStyleEnum &index_style) override final
{
// NOTE: these try catches and the mechanism to handle exceptions outside the Ipopt
// callbacks are needed because, apparently, Ipopt does not handle gracefully exceptions
// thrown from the callbacks (I suspect this has something to do with the support of
// interfaces for languages other than C++?). This is the same approach we adopt in the
// NLopt wrapper: trap everything in a try/catch block, and store the exception for re-throw
// in ipopt::evolve(). In case of errors we return "false" from the callback, as this
// signals to the the Ipopt API that something went wrong.
try {
// Number of dimensions of the problem.
n = boost::numeric_cast<Index>(m_prob.get_nx());
// Total number of constraints.
m = boost::numeric_cast<Index>(m_prob.get_nc());
// Number of nonzero entries in the jacobian.
nnz_jac_g = boost::numeric_cast<Index>(m_jac_sp.size());
// Number of nonzero entries in the hessian of the lagrangian.
nnz_h_lag = boost::numeric_cast<Index>(m_lag_sp.size());
// We use C style indexing (0-based).
index_style = TNLP::C_STYLE;
return true;
// LCOV_EXCL_START
} catch (...) {
m_eptr = std::current_exception();
return false;
// LCOV_EXCL_STOP
}
}
// Method to return the bounds of the problem.
virtual bool get_bounds_info(Index n, Number *x_l, Number *x_u, Index m, Number *g_l, Number *g_u) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
(void)n;
// Box bounds.
const auto bounds = m_prob.get_bounds();
// Lower bounds.
std::copy(bounds.first.begin(), bounds.first.end(), x_l);
// Upper bounds.
std::copy(bounds.second.begin(), bounds.second.end(), x_u);
// Equality constraints: lb == ub == 0.
std::fill(g_l, g_l + m_prob.get_nec(), 0.);
std::fill(g_u, g_u + m_prob.get_nec(), 0.);
// Inequality constraints: lb == -inf, ub == 0.
std::fill(g_l + m_prob.get_nec(), g_l + m,
std::numeric_limits<double>::has_infinity ? -std::numeric_limits<double>::infinity()
: std::numeric_limits<double>::lowest());
std::fill(g_u + m_prob.get_nec(), g_u + m, 0.);
return true;
// LCOV_EXCL_START
} catch (...) {
m_eptr = std::current_exception();
return false;
// LCOV_EXCL_STOP
}
}
// Method to return the starting point for the algorithm.
virtual bool get_starting_point(Index n, bool init_x, Number *x, bool init_z, Number *, Number *, Index m,
bool init_lambda, Number *) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(n == boost::numeric_cast<Index>(m_start.size()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
(void)n;
(void)m;
if (init_x) {
std::copy(m_start.begin(), m_start.end(), x);
}
// LCOV_EXCL_START
if (init_z) {
pagmo_throw(std::runtime_error,
"we are being asked to provide initial values for the bounds multiplier by "
"the Ipopt API, but in pagmo we do not support them");
}
if (init_lambda) {
pagmo_throw(std::runtime_error,
"we are being asked to provide initial values for the constraints multiplier by "
"the Ipopt API, but in pagmo we do not support them");
}
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
// LCOV_EXCL_STOP
}
}
// Method to return the objective value.
virtual bool eval_f(Index n, const Number *x, bool new_x, Number &obj_value) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
// NOTE: the new_x boolean flag will be false if the last call to any of the eval_* function
// used the same x value. Probably we can ignore this in favour of the upcoming caches work.
(void)new_x;
std::copy(x, x + n, m_dv.begin());
const auto fitness = m_prob.fitness(m_dv);
obj_value = fitness[0];
// Update the log if requested.
if (m_verbosity && !(m_objfun_counter % m_verbosity)) {
// Constraints bits.
const auto ctol = m_prob.get_c_tol();
const auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + m_prob.get_nec(),
ctol.data());
const auto c1ineq
= detail::test_ineq_constraints(fitness.data() + 1 + m_prob.get_nec(),
fitness.data() + fitness.size(), ctol.data() + m_prob.get_nec());
// This will be the total number of violated constraints.
const auto nv = m_prob.get_nc() - c1eq.first - c1ineq.first;
// This will be the norm of the violation.
const auto l = c1eq.second + c1ineq.second;
// Test feasibility.
const auto feas = m_prob.feasibility_f(fitness);
if (!(m_objfun_counter / m_verbosity % 50u)) {
// Every 50 lines print the column names.
print("\n", std::setw(10), "objevals:", std::setw(15), "objval:", std::setw(15),
"violated:", std::setw(15), "viol. norm:", '\n');
}
// Print to screen the log line.
print(std::setw(10), m_objfun_counter + 1u, std::setw(15), obj_value, std::setw(15), nv, std::setw(15),
l, feas ? "" : " i", '\n');
// Record the log.
m_log.emplace_back(m_objfun_counter + 1u, obj_value, nv, l, feas);
}
// Update the counter.
++m_objfun_counter;
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
}
}
// Method to return the gradient of the objective.
virtual bool eval_grad_f(Index n, const Number *x, bool new_x, Number *grad_f) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
(void)new_x;
std::copy(x, x + n, m_dv.begin());
// Compute the full gradient (this includes the constraints as well).
const auto gradient = m_prob.gradient(m_dv);
if (m_prob.has_gradient_sparsity()) {
// Sparse gradient case.
auto g_it = gradient.begin();
// First we fill the dense output gradient with zeroes.
std::fill(grad_f, grad_f + n, 0.);
// Then we iterate over the sparsity pattern of the objfun, and fill in the
// nonzero bits in grad_f.
for (auto it = m_obj_g_sp.begin(); it != m_obj_g_sp.end(); ++it, ++g_it) {
assert(it->first == 0u);
assert(g_it != gradient.end());
grad_f[it->second] = *g_it;
}
} else {
// Dense gradient.
std::copy(gradient.data(), gradient.data() + n, grad_f);
}
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
}
}
// Value of the constraints.
virtual bool eval_g(Index n, const Number *x, bool new_x, Index m, Number *g) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
(void)new_x;
std::copy(x, x + n, m_dv.begin());
const auto fitness = m_prob.fitness(m_dv);
// Eq. constraints.
std::copy(fitness.data() + 1, fitness.data() + 1 + m_prob.get_nec(), g);
// Ineq. constraints.
std::copy(fitness.data() + 1 + m_prob.get_nec(), fitness.data() + 1 + m, g + m_prob.get_nec());
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
}
}
// Method to return:
// 1) The structure of the jacobian (if "values" is NULL)
// 2) The values of the jacobian (if "values" is not NULL)
virtual bool eval_jac_g(Index n, const Number *x, bool new_x, Index m, Index nele_jac, Index *iRow, Index *jCol,
Number *values) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
assert(nele_jac == boost::numeric_cast<Index>(m_jac_sp.size()));
(void)new_x;
if (values) {
std::copy(x, x + n, m_dv.begin());
const auto gradient = m_prob.gradient(m_dv);
// NOTE: here we need the gradients of the constraints only, so we need to discard the gradient of the
// objfun. If the gradient sparsity is user-provided, then the size of the objfun sparse gradient is
// m_obj_g_sp.size(), otherwise the gradient is dense and its size is nx.
std::copy(gradient.data() + (m_prob.has_gradient_sparsity() ? m_obj_g_sp.size() : m_prob.get_nx()),
gradient.data() + gradient.size(), values);
} else {
for (decltype(m_jac_sp.size()) k = 0; k < m_jac_sp.size(); ++k) {
iRow[k] = m_jac_sp[k].first;
jCol[k] = m_jac_sp[k].second;
}
}
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
}
}
// Method to return:
// 1) The structure of the hessian of the lagrangian (if "values" is NULL)
// 2) The values of the hessian of the lagrangian (if "values" is not NULL)
virtual bool eval_h(Index n, const Number *x, bool new_x, Number obj_factor, Index m, const Number *lambda,
bool new_lambda, Index nele_hess, Index *iRow, Index *jCol, Number *values) override final
{
try {
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
assert(nele_hess == boost::numeric_cast<Index>(m_lag_sp.size()));
(void)new_x;
(void)new_lambda;
if (!m_prob.has_hessians()) {
pagmo_throw(
std::invalid_argument,
"the exact evaluation of the Hessian of the Lagrangian was requested, but the problem named '"
+ m_prob.get_name()
+ "' does not provide it. Please consider providing the Hessian or, alternatively, "
"set the option 'hessian_approximation' to 'limited-memory' in the ipopt algorithm options");
}
if (values) {
std::copy(x, x + n, m_dv.begin());
const auto hessians = m_prob.hessians(m_dv);
if (m_prob.has_hessians_sparsity()) {
// Sparse case.
// Objfun first.
assert(hessians[0].size() <= m_lag_sp.size());
auto it_h_sp = m_h_sp[0].begin();
auto it = hessians[0].begin();
assert(hessians[0].size() == m_h_sp[0].size());
// NOTE: the idea here is that we need to fill up values with m_lag_sp.size()
// numbers. Some of these numbers will be zero because, in general, our hessians
// may contain fewer elements. In order to establish which elements to take
// from our hessians and which elements to set to zero, we need to iterate at the
// same time on the original sparsity pattern and compare the indices pairs.
for (decltype(m_lag_sp.size()) i = 0; i < m_lag_sp.size(); ++i) {
// NOTE: static_cast is ok as we already converted via numeric_cast
// earlier.
if (it_h_sp != m_h_sp[0].end() && static_cast<Index>(it_h_sp->first) == m_lag_sp[i].first
&& static_cast<Index>(it_h_sp->second) == m_lag_sp[i].second) {
// This means that we are at a sparsity entry which is both in our original sparsity
// pattern and in the merged one.
assert(it != hessians[0].end());
values[i] = (*it) * obj_factor;
++it;
++it_h_sp;
} else {
// This means we are at a sparsity entry which is in the merged patterns but not in our
// original sparsity pattern. Thus, set the value to zero.
values[i] = 0.;
}
}
// Constraints.
for (decltype(hessians.size()) j = 1; j < hessians.size(); ++j) {
assert(hessians[j].size() <= m_lag_sp.size());
it_h_sp = m_h_sp[j].begin();
it = hessians[j].begin();
assert(hessians[j].size() == m_h_sp[j].size());
// NOTE: the lambda factors refer to the constraints only, hence we need
// to decrease i by 1.
const auto lam = lambda[j - 1u];
for (decltype(m_lag_sp.size()) i = 0; i < m_lag_sp.size(); ++i) {
if (it_h_sp != m_h_sp[j].end() && static_cast<Index>(it_h_sp->first) == m_lag_sp[i].first
&& static_cast<Index>(it_h_sp->second) == m_lag_sp[i].second) {
assert(it != hessians[j].end());
values[i] += (*it) * lam;
++it;
++it_h_sp;
}
}
}
} else {
// Dense case.
// First the objfun.
assert(hessians[0].size() == m_lag_sp.size());
std::transform(hessians[0].begin(), hessians[0].end(), values,
[obj_factor](double a) { return obj_factor * a; });
// The constraints (to be added iteratively to the existing values).
for (decltype(hessians.size()) i = 1; i < hessians.size(); ++i) {
assert(hessians[i].size() == m_lag_sp.size());
// NOTE: the lambda factors refer to the constraints only, hence we need
// to decrease i by 1.
const auto lam = lambda[i - 1u];
std::transform(hessians[i].begin(), hessians[i].end(), values, values,
[lam](double a, double b) { return b + lam * a; });
}
}
} else {
// Fill in the sp of the hessian of the lagrangian.
for (decltype(m_lag_sp.size()) k = 0; k < m_lag_sp.size(); ++k) {
iRow[k] = m_lag_sp[k].first;
jCol[k] = m_lag_sp[k].second;
}
}
return true;
} catch (...) {
m_eptr = std::current_exception();
return false;
}
}
// Solution Methods.
// This method is called when the algorithm is complete so the TNLP can store/write the solution.
// NOTE: no need for try/catch here, nothing can throw.
virtual void finalize_solution(SolverReturn status, Index n, const Number *x, const Number *, const Number *,
Index m, const Number *g, const Number *, Number obj_value, const IpoptData *,
IpoptCalculatedQuantities *) override final
{
assert(n == boost::numeric_cast<Index>(m_prob.get_nx()));
assert(m == boost::numeric_cast<Index>(m_prob.get_nc()));
// Store the solution.
std::copy(x, x + n, m_sol.begin());
// Store the values of the constraints.
std::copy(g, g + m_prob.get_nec(), m_final_eq.begin());
std::copy(g + m_prob.get_nec(), g + m, m_final_ineq.begin());
// Store the final value of the objfun.
m_final_objfun = obj_value;
// Store the status.
m_status = status;
}
// Data members.
// The pagmo problem.
const problem &m_prob;
// Initial guess.
const vector_double m_start;
// Temporary dv used for fitness computation.
vector_double m_dv;
// Dv of the solution.
vector_double m_sol;
// Final values of the constraints.
vector_double m_final_eq;
vector_double m_final_ineq;
// Final value of the objfun.
double m_final_objfun;
// Status at the end of the optimisation.
SolverReturn m_status;
// Sparsity pattern of the gradient of the objfun. We need this for the evaluation
// of the gradient in eval_grad_f(). If the gradient sparsity is not user-provided,
// it will be empty.
sparsity_pattern m_obj_g_sp;
// The original hessians sp from pagmo. We need this if the hessians sparsity
// is user-provided, as we must rebuild the hessian of the lagrangian in Ipopt format.
// If the hessians sparsity is not user-provided, it will be empty.
std::vector<sparsity_pattern> m_h_sp;
// Jacobian sparsity pattern as required by Ipopt: sparse
// rectangular matrix represented as a list of (Row,Col)
// pairs.
// https://www.coin-or.org/Ipopt/documentation/node38.html
std::vector<std::pair<Index, Index>> m_jac_sp;
// Same format for the hessian of the lagrangian (but it's a square matrix).
std::vector<std::pair<Index, Index>> m_lag_sp;
// Verbosity.
const unsigned m_verbosity;
// Objfun counter.
unsigned long m_objfun_counter = 0;
// Log.
log_type m_log;
// This exception pointer will be null, unless an error is raised in one of the virtual methods. If not null, it
// will be re-thrown in the ipopt::evolve() method.
std::exception_ptr m_eptr;
};
} // namespace detail
/// Ipopt.
/**
* \image html ipopt.png "COIN_OR logo." width=3cm
*
* \verbatim embed:rst:leading-asterisk
* .. versionadded:: 2.2
* \endverbatim
*
* This class is a user-defined algorithm (UDA) that wraps the Ipopt (Interior Point OPTimizer) solver,
* a software package for large-scale nonlinear optimization. Ipopt is a powerful solver that
* is able to handle robustly and efficiently constrained nonlinear opimization problems at high dimensionalities.
*
* Ipopt supports only single-objective minimisation, and it requires the availability of the gradient in the
* optimisation problem. If possible, for best results the Hessians should be provided as well (but Ipopt
* can estimate numerically the Hessians if needed).
*
* In order to support pagmo's population-based optimisation model, ipopt::evolve() will select
* a single individual from the input pagmo::population to be optimised.
* If the optimisation produces a better individual (as established by pagmo::compare_fc()),
* the optimised individual will be inserted back into the population.
* The selection and replacement strategies can be configured via set_selection(const std::string &),
* set_selection(population::size_type), set_replacement(const std::string &) and
* set_replacement(population::size_type).
*
* Configuring the optimsation run
* -------------------------------
*
* Ipopt supports a large amount of options for the configuration of the optimisation run. The options
* are divided into three categories:
* - *string* options (i.e., the type of the option is ``std::string``),
* - *integer* options (i.e., the type of the option is ``Ipopt::Index`` - an alias for some integer type, typically
* ``int``),
* - *numeric* options (i.e., the type of the option is ``double``).
*
* The full list of options is available on the
* <a href="https://www.coin-or.org/Ipopt/documentation/node40.html">Ipopt website</a>. pagmo::ipopt allows to configure
* any Ipopt option via methods such as ipopt::set_string_options(), ipopt::set_string_option(),
* ipopt::set_integer_options(), etc., which need to be used before invoking ipopt::evolve().
*
* If the user does not set any option, pagmo::ipopt will use Ipopt's default values for the options (see the
* <a href="https://www.coin-or.org/Ipopt/documentation/node40.html">documentation</a>), with the following
* modifications:
* - if the ``"print_level"`` integer option is **not** set by the user, it will be set to 0 by pagmo::ipopt (this will
* suppress most screen output produced by the solver - note that we support an alternative form of logging via
* the ipopt::set_verbosity() method);
* - if the ``"hessian_approximation"`` string option is **not** set by the user and the optimisation problem does
* **not** provide the Hessians, then the option will be set to ``"limited-memory"`` by pagmo::ipopt. This makes it
* possible to optimise problems without Hessians out-of-the-box (i.e., Ipopt will approximate numerically the
* Hessians for you);
* - if the ``"constr_viol_tol"`` numeric option is **not** set by the user and the optimisation problem is constrained,
* then pagmo::ipopt will compute the minimum value ``min_tol`` in the vector returned by pagmo::problem::get_c_tol()
* for the optimisation problem at hand. If ``min_tol`` is nonzero, then the ``"constr_viol_tol"`` Ipopt option will
* be set to ``min_tol``, otherwise the default Ipopt value (1E-4) will be used for the option. This ensures that,
* if the constraint tolerance is not explicitly set by the user, a solution deemed feasible by Ipopt is also
* deemed feasible by pagmo (but the opposite is not necessarily true).
*
* \verbatim embed:rst:leading-asterisk
* .. warning::
*
* A moved-from :cpp:class:`pagmo::ipopt` is destructible and assignable. Any other operation will result
* in undefined behaviour.
*
* .. note::
*
* This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_IPOPT`` option
* enabled (see the :ref:`installation instructions <install>`).
*
* .. seealso::
*
* https://projects.coin-or.org/Ipopt.
*
* \endverbatim
*/
class ipopt : public not_population_based
{
template <typename Pair>
static void opt_checker(bool status, const Pair &p, const std::string &op_type)
{
if (!status) {
pagmo_throw(std::invalid_argument, "failed to set the ipopt " + op_type + " option '" + p.first
+ "' to the value: " + detail::to_string(p.second));
}
}
public:
/// Single data line for the algorithm's log.
/**
* A log data line is a tuple consisting of:
* - the number of objective function evaluations made so far,
* - the objective function value for the current decision vector,
* - the number of constraints violated by the current decision vector,
* - the constraints violation norm for the current decision vector,
* - a boolean flag signalling the feasibility of the current decision vector.
*/
using log_line_type = std::tuple<unsigned long, double, vector_double::size_type, double, bool>;
/// Log type.
/**
* The algorithm log is a collection of ipopt::log_line_type data lines, stored in chronological order
* during the optimisation if the verbosity of the algorithm is set to a nonzero value
* (see ipopt::set_verbosity()).
*/
using log_type = std::vector<log_line_type>;
private:
static_assert(std::is_same<log_line_type, detail::ipopt_nlp::log_line_type>::value, "Invalid log line type.");
public:
/// Evolve population.
/**
* This method will select an individual from \p pop, optimise it with Ipopt, replace an individual in \p pop with
* the optimised individual, and finally return \p pop.
* The individual selection and replacement criteria can be set via set_selection(const std::string &),
* set_selection(population::size_type), set_replacement(const std::string &) and
* set_replacement(population::size_type). The return status of the Ipopt optimisation run will be recorded (it can
* be fetched with get_last_opt_result()).
*
* @param pop the population to be optimised.
*
* @return the optimised population.
*
* @throws std::invalid_argument in the following cases:
* - the population's problem is multi-objective or it does not provide the gradient,
* - the setup of the Ipopt solver options fails (e.g., an invalid option was specified by the user),
* - the components of the individual selected for optimisation contain NaNs or they are outside
* the problem's bounds,
* - the exact evaluation of the Hessians was requested, but the problem does not support it.
* @throws std::runtime_error if the initialization of the Ipopt solver fails.
* @throws unspecified any exception thrown by the public interface of pagmo::problem or
* pagmo::not_population_based.
*/
population evolve(population pop) const
{
if (!pop.size()) {
// In case of an empty pop, just return it.
return pop;
}
auto &prob = pop.get_problem();
// Setup of the initial guess. Store also the original fitness
// of the selected individual, old_f, for later use.
auto sel_xf = select_individual(pop);
vector_double initial_guess(std::move(sel_xf.first)), old_f(std::move(sel_xf.second));
// Check the initial guess.
// NOTE: this should be guaranteed by the population's invariants.
assert(initial_guess.size() == prob.get_nx());
const auto bounds = prob.get_bounds();
for (decltype(bounds.first.size()) i = 0; i < bounds.first.size(); ++i) {
if (std::isnan(initial_guess[i])) {
pagmo_throw(std::invalid_argument,
"the value of the initial guess at index " + std::to_string(i) + " is NaN");
}
if (initial_guess[i] < bounds.first[i] || initial_guess[i] > bounds.second[i]) {
pagmo_throw(std::invalid_argument, "the value of the initial guess at index " + std::to_string(i)
+ " is outside the problem's bounds");
}
}
// Initialize the Ipopt machinery, following the tutorial.
Ipopt::SmartPtr<Ipopt::TNLP> nlp = ::new detail::ipopt_nlp(pop.get_problem(), initial_guess, m_verbosity);
// Store a reference to the derived class for later use.
detail::ipopt_nlp &inlp = dynamic_cast<detail::ipopt_nlp &>(*nlp);
Ipopt::SmartPtr<Ipopt::IpoptApplication> app = ::IpoptApplicationFactory();
app->RethrowNonIpoptException(true);
// Logic for the handling of constraints tolerances. The logic is as follows:
// - if the user provides the "constr_viol_tol" option, use that *unconditionally*. Otherwise,
// - compute the minimum tolerance min_tol among those provided by the problem. If zero, ignore
// it and use the ipopt default value for "constr_viol_tol" (1e-4). Otherwise, use min_tol as the value for
// "constr_viol_tol".
if (prob.get_nc() && !m_numeric_opts.count("constr_viol_tol")) {
const auto c_tol = prob.get_c_tol();
assert(!c_tol.empty());
const double min_tol = *std::min_element(c_tol.begin(), c_tol.end());
if (min_tol > 0.) {
const auto tmp_p = std::make_pair(std::string("constr_viol_tol"), min_tol);
opt_checker(app->Options()->SetNumericValue(tmp_p.first, tmp_p.second), tmp_p, "numeric");
}
}
// Logic for the hessians computation:
// - if the problem does *not* provide the hessians, and the "hessian_approximation" is *not*
// set, then we set it to "limited-memory".
// This way, problems without hessians will work out of the box.
if (!prob.has_hessians() && !m_string_opts.count("hessian_approximation")) {
const auto tmp_p = std::make_pair(std::string("hessian_approximation"), std::string("limited-memory"));
opt_checker(app->Options()->SetStringValue(tmp_p.first, tmp_p.second), tmp_p, "string");
}
// Logic for print_level: change the default to zero.
if (!m_integer_opts.count("print_level")) {
const auto tmp_p = std::make_pair(std::string("print_level"), Ipopt::Index(0));
opt_checker(app->Options()->SetIntegerValue(tmp_p.first, tmp_p.second), tmp_p, "integer");
}
// Set the other options.
for (const auto &p : m_string_opts) {
opt_checker(app->Options()->SetStringValue(p.first, p.second), p, "string");
}
for (const auto &p : m_numeric_opts) {
opt_checker(app->Options()->SetNumericValue(p.first, p.second), p, "numeric");
}
for (const auto &p : m_integer_opts) {
opt_checker(app->Options()->SetIntegerValue(p.first, p.second), p, "integer");
}
// NOTE: Initialize() can take a filename as input, defaults to "ipopt.opt". This is a file
// which is supposed to contain ipopt's options. Since we can set the options from the code,
// let's disable this functionality by passing an empty string.
const Ipopt::ApplicationReturnStatus status = app->Initialize("");
if (status != Ipopt::Solve_Succeeded) {
// LCOV_EXCL_START
pagmo_throw(std::runtime_error,
"the initialisation of the ipopt algorithm failed. The return status code is: "
+ detail::ipopt_data<>::results.at(status));
// LCOV_EXCL_STOP
}
// Run the optimisation.
m_last_opt_res = app->OptimizeTNLP(nlp);
if (m_verbosity) {
// Print to screen the result of the optimisation, if we are being verbose.
std::cout << "\nOptimisation return status: " << detail::ipopt_data<>::results.at(m_last_opt_res) << '\n';
}
// Replace the log.
m_log = std::move(inlp.m_log);
// Handle any exception that might've been thrown.
if (inlp.m_eptr) {
std::rethrow_exception(inlp.m_eptr);
}
// Compute the new fitness vector.
const auto new_f = prob.fitness(inlp.m_sol);
// Store the new individual into the population, but only if better.
if (compare_fc(new_f, old_f, prob.get_nec(), prob.get_c_tol())) {
replace_individual(pop, inlp.m_sol, new_f);
}
// Return the evolved pop.
return pop;
}
/// Get the result of the last optimisation.
/**
* @return the result of the last evolve() call, or ``Ipopt::Solve_Succeeded`` if no optimisations have been
* run yet.
*/
Ipopt::ApplicationReturnStatus get_last_opt_result() const
{
return m_last_opt_res;
}
/// Get the algorithm's name.
/**
* @return <tt>"Ipopt"</tt>.
*/
std::string get_name() const
{
return "Ipopt: Interior Point Optimization";
}
/// Get extra information about the algorithm.
/**
* @return a human-readable string containing useful information about the algorithm's properties
* (e.g., the Ipopt optimisation options, the selection/replacement policies, etc.).
*/
std::string get_extra_info() const
{
return "\tLast optimisation return code: " + detail::ipopt_data<>::results.at(m_last_opt_res)
+ "\n\tVerbosity: " + std::to_string(m_verbosity) + "\n\tIndividual selection "
+ (boost::any_cast<population::size_type>(&m_select)
? "idx: " + std::to_string(boost::any_cast<population::size_type>(m_select))
: "policy: " + boost::any_cast<std::string>(m_select))
+ "\n\tIndividual replacement "
+ (boost::any_cast<population::size_type>(&m_replace)
? "idx: " + std::to_string(boost::any_cast<population::size_type>(m_replace))
: "policy: " + boost::any_cast<std::string>(m_replace))
+ (m_string_opts.size() ? "\n\tString options: " + detail::to_string(m_string_opts) : "")
+ (m_integer_opts.size() ? "\n\tInteger options: " + detail::to_string(m_integer_opts) : "")
+ (m_numeric_opts.size() ? "\n\tNumeric options: " + detail::to_string(m_numeric_opts) : "") + "\n";
}
/// Set verbosity.
/**
* This method will set the algorithm's verbosity. If \p n is zero, no output is produced during the optimisation
* and no logging is performed. If \p n is nonzero, then every \p n objective function evaluations the status
* of the optimisation will be both printed to screen and recorded internally. See ipopt::log_line_type and
* ipopt::log_type for information on the logging format. The internal log can be fetched via get_log().
*
* Example (verbosity 1):
* @code{.unparsed}
* objevals: objval: violated: viol. norm:
* 1 48.9451 1 1.25272 i
* 2 30.153 1 0.716591 i
* 3 26.2884 1 1.04269 i
* 4 14.6958 2 7.80753 i
* 5 14.7742 2 5.41342 i
* 6 17.093 1 0.0905025 i
* 7 17.1772 1 0.0158448 i
* 8 17.0254 2 0.0261289 i
* 9 17.0162 2 0.00435195 i
* 10 17.0142 2 0.000188461 i
* 11 17.014 1 1.90997e-07 i
* 12 17.014 0 0
* @endcode
* The ``i`` at the end of some rows indicates that the decision vector is infeasible. Feasibility
* is checked against the problem's tolerance.
*
* By default, the verbosity level is zero.
*
* \verbatim embed:rst:leading-asterisk
* .. warning::
*
* The number of constraints violated, the constraints violation norm and the feasibility flag stored in the log
* are all determined via the facilities and the tolerances specified within :cpp:class:`pagmo::problem`. That
* is, they might not necessarily be consistent with Ipopt's notion of feasibility. See the explanation
* of how the ``"constr_viol_tol"`` numeric option is handled in :cpp:class:`pagmo::ipopt`.
*
* .. note::
*
* Ipopt supports its own logging format and protocol, including the ability to print to screen and write to
* file. Ipopt's screen logging is disabled by default (i.e., the Ipopt verbosity setting is set to 0 - see
* :cpp:class:`pagmo::ipopt`). On-screen logging can be enabled via the ``"print_level"`` string option.
*
* \endverbatim
*
* @param n the desired verbosity level.
*/
void set_verbosity(unsigned n)
{
m_verbosity = n;
}
/// Get the optimisation log.
/**
* See ipopt::log_type for a description of the optimisation log. Logging is turned on/off via
* set_verbosity().
*
* @return a const reference to the log.
*/
const log_type &get_log() const
{
return m_log;
}
/// Save to archive.
/**
* @param ar the target archive.
*
* @throws unspecified any exception thrown by the serialization of primitive types or pagmo::not_population_based.
*/
template <typename Archive>
void save(Archive &ar) const
{
ar(cereal::base_class<not_population_based>(this), m_string_opts, m_integer_opts, m_numeric_opts,
m_last_opt_res, m_verbosity, m_log);
}
/// Load from archive.
/**
* In case of exceptions, \p this will be reset to a default-constructed state.
*
* @param ar the source archive.
*
* @throws unspecified any exception thrown by the deserialization of primitive types or
* pagmo::not_population_based.
*/
template <typename Archive>
void load(Archive &ar)
{
try {
ar(cereal::base_class<not_population_based>(this), m_string_opts, m_integer_opts, m_numeric_opts,
m_last_opt_res, m_verbosity, m_log);
// LCOV_EXCL_START
} catch (...) {
*this = ipopt{};
throw;
// LCOV_EXCL_STOP
}
}
/// Set string option.
/**
* This method will set the optimisation string option \p name to \p value.
* The optimisation options are passed to the Ipopt API when calling evolve().
*
* @param name of the option.
* @param value of the option.
*/
void set_string_option(const std::string &name, const std::string &value)
{
m_string_opts[name] = value;
}
/// Set integer option.
/**
* This method will set the optimisation integer option \p name to \p value.
* The optimisation options are passed to the Ipopt API when calling evolve().
*
* @param name of the option.
* @param value of the option.
*/
void set_integer_option(const std::string &name, Ipopt::Index value)
{
m_integer_opts[name] = value;
}
/// Set numeric option.
/**
* This method will set the optimisation numeric option \p name to \p value.
* The optimisation options are passed to the Ipopt API when calling evolve().
*
* @param name of the option.
* @param value of the option.
*/
void set_numeric_option(const std::string &name, double value)
{
m_numeric_opts[name] = value;
}
/// Set string options.
/**
* This method will set the optimisation string options contained in \p m.
* It is equivalent to calling set_string_option() passing all the name-value pairs in \p m
* as arguments.
*
* @param m the name-value map that will be used to set the options.
*/
void set_string_options(const std::map<std::string, std::string> &m)
{
for (const auto &p : m) {
set_string_option(p.first, p.second);
}
}
/// Set integer options.
/**
* This method will set the optimisation integer options contained in \p m.
* It is equivalent to calling set_integer_option() passing all the name-value pairs in \p m
* as arguments.
*
* @param m the name-value map that will be used to set the options.
*/
void set_integer_options(const std::map<std::string, Ipopt::Index> &m)
{
for (const auto &p : m) {
set_integer_option(p.first, p.second);
}
}
/// Set numeric options.
/**
* This method will set the optimisation numeric options contained in \p m.
* It is equivalent to calling set_numeric_option() passing all the name-value pairs in \p m
* as arguments.
*
* @param m the name-value map that will be used to set the options.
*/
void set_numeric_options(const std::map<std::string, double> &m)
{
for (const auto &p : m) {
set_numeric_option(p.first, p.second);
}
}
/// Get string options.
/**
* @return the name-value map of optimisation string options.
*/
std::map<std::string, std::string> get_string_options() const
{
return m_string_opts;
}
/// Get integer options.
/**
* @return the name-value map of optimisation integer options.
*/
std::map<std::string, Ipopt::Index> get_integer_options() const
{
return m_integer_opts;
}
/// Get numeric options.
/**
* @return the name-value map of optimisation numeric options.
*/
std::map<std::string, double> get_numeric_options() const
{
return m_numeric_opts;
}
/// Clear all string options.
void reset_string_options()
{
m_string_opts.clear();
}
/// Clear all integer options.
void reset_integer_options()
{
m_integer_opts.clear();
}
/// Clear all numeric options.
void reset_numeric_options()
{
m_numeric_opts.clear();
}
/// Thread safety level.
/**
* According to the official Ipopt documentation, it is not safe to use Ipopt in a multithreaded environment.
*
* @return thread_safety::none.
*
* \verbatim embed:rst:leading-asterisk
* .. seealso::
* https://projects.coin-or.org/Ipopt/wiki/FAQ
*
* \endverbatim
*/
thread_safety get_thread_safety() const
{
return thread_safety::none;
}
private:
// Options maps.
std::map<std::string, std::string> m_string_opts;
std::map<std::string, Ipopt::Index> m_integer_opts;
std::map<std::string, double> m_numeric_opts;
// Solver return status.
mutable Ipopt::ApplicationReturnStatus m_last_opt_res = Ipopt::Solve_Succeeded;
// Verbosity/log.
unsigned m_verbosity = 0;
mutable log_type m_log;
};
} // namespace pagmo
PAGMO_REGISTER_ALGORITHM(pagmo::ipopt)
#else // PAGMO_WITH_IPOPT
#error The ipopt.hpp header was included, but pagmo was not compiled with Ipopt support
#endif // PAGMO_WITH_IPOPT
#endif
|
GALLIPOLIS, Ohio — The Cliffside Men’s Senior Golf League will begin its 2018 season on Tuesday, April 24, at Cliffside Golf Course. Registration will begin weekly at 8:15 a.m. and there will be a shotgun start at 9 a.m. There is also a $5 fee for every competitor during each week of participation.
Players will be grouped in two-or-more flights, depending on the number of players for each week’s play. Weekly pairings are determined by a blind draw.
The top three players of each flight will receive weekly prize money and each player will earn points towards the overall league championship. Each player plays their own ball and has to play at least 10 of the 19 weeks of competitive play to be eligible for end of season prize money.
For more information on the Cliffside Men’s Senior Golf League, contact the Cliffside Golf Course at 740-446-4653. |
/- -----------------------------------------------------------------------
Misc facts about Lean.
----------------------------------------------------------------------- -/
namespace qp
namespace stdaux
/-! #brief funext, but for heterogeneous equality.
-/
theorem {ℓ₁ ℓ₂} hfunext
: ∀ {A₁ A₂ : Type ℓ₁}
{B : Type ℓ₂}
{f₁ : A₁ → B} {f₂ : A₂ → B}
(ωA : A₁ = A₂)
(ω : ∀ (a : A₁), f₁ a = f₂ (cast ωA a))
, f₁ == f₂
| A .(A) B f₁ f₂ (eq.refl .(A)) ω
:= begin
apply heq_of_eq,
apply funext,
intro a,
exact ω a
end
/-! #brief The punit type is uniquely inhabited.
-/
theorem {ℓ} punit.uniq
: ∀ (u₁ u₂ : punit.{ℓ})
, u₁ = u₂
| punit.star punit.star := rfl
/-! #brief Equality helper for products.
-/
theorem {ℓ₁ ℓ₂} prod.eq {A : Type ℓ₁} {B : Type ℓ₂}
: ∀ {ab₁ ab₂ : prod A B}
, ab₁^.fst = ab₂^.fst
→ ab₁^.snd = ab₂^.snd
→ ab₁ = ab₂
| (prod.mk a b) (prod.mk .(a) .(b)) (eq.refl .(a)) (eq.refl .(b)) := rfl
/-! #brief The diagonal map.
-/
definition {ℓ} prod.diag {X : Type ℓ} (x : X)
: prod X X
:= (x, x)
/-! #brief Max of two natural numbers.
-/
definition nat.max (a b : ℕ) : ℕ
:= if a ≤ b then b else a
theorem nat.le_max_left (a b : ℕ)
: a ≤ nat.max a b
:= if ω : a ≤ b
then begin
unfold nat.max,
rw if_pos ω,
exact ω
end
else begin
unfold nat.max,
rw if_neg ω
end
theorem nat.le_max_right (a b : ℕ)
: b ≤ nat.max a b
:= if ω : a ≤ b
then begin
unfold nat.max,
rw if_pos ω
end
else begin
unfold nat.max,
rw if_neg ω,
exact le_of_not_le ω
end
/-! #brief Handy absurd lemma.
-/
theorem nat.not_lt_add_left
{P : Prop} (n : ℕ)
: ∀ (m : ℕ) (ω : n + m < n)
, P
| 0 ω := absurd ω (nat.lt_irrefl n)
| (nat.succ m) ω := nat.not_lt_add_left m
(by calc n + m < n + nat.succ m : nat.self_lt_succ _
... < n : ω)
/-! #brief Handy absurd lemma.
-/
theorem nat.not_lt_add_right
{P : Prop} (n : ℕ)
: ∀ (m : ℕ) (ω : n + m < m)
, P
| 0 ω := by cases ω
| (nat.succ m) ω := nat.not_lt_add_right m (nat.lt_of_succ_lt_succ ω)
/-! #brief The axiom of choice, for unique existence.
-/
noncomputable definition {ℓ} unique_choice {A : Type ℓ} {P : A → Prop}
: (∃! (a : A), P a) → A
:= λ pr, classical.some (exists_of_exists_unique pr)
/-! #brief The axiom of unique choice satisfies the indicated property.
-/
theorem {ℓ} unique_choice.has_prop {A : Type ℓ} {P : A → Prop}
(pr : ∃! (a : A), P a)
: P (unique_choice pr)
:= classical.some_spec (exists_of_exists_unique pr)
/-! #brief The axiom of unique choice does what you'd think.
-/
theorem {ℓ} unique_choice.simp {A : Type ℓ} {P : A → Prop}
{a : A} {ωa : P a} {ωP : ∀ (b : A), P b → b = a}
: unique_choice (exists_unique.intro a ωa ωP) = a
:= begin
apply ωP,
apply unique_choice.has_prop
end
/-! #brief Mere existence.
-/
definition {ℓ} MerelyExists (A : Type ℓ) : Prop
:= ∃ (a : A), true
/-! #brief Introducing something which merely exists.
-/
definition {ℓ} MerelyExists.intro {A : Type ℓ}
(a : A)
: MerelyExists A
:= exists.intro a true.intro
/-! #brief Elimintation of mere existence.
-/
definition {ℓ} MerelyExists.elim {A : Type ℓ}
: ∀ (ex : MerelyExists A)
(P : Prop)
(ω : A → P)
, P
| (exists.intro a ωa) P ω := ω a
/-! #brief The axiom of choice allows us to unbox something which merely exists.
-/
noncomputable definition {ℓ} MerelyExists.choice {A : Type ℓ}
(ex : MerelyExists A)
: A
:= classical.some ex
end stdaux
end qp
|
"""
RBF Kernel
all parameters are in log-scale
"""
struct ArdGaussKernel{T, VT<:AbstractVector{T}} <: AbstractKernel
ll::VT
lσ::VT
end
@functor ArdGaussKernel
function (GK::ArdGaussKernel{T, VT})(x; λ=T(1e-6)) where {T, VT}
length(GK.ll) == size(x, 1) || throw(DimensionMismatch("size of length scale parameter should be the same as number of features"))
n = size(x, 2)
scaled_x = @. x*exp(-GK.ll)
d = square_metric(scaled_x)
σ_square = exp(2*GK.lσ[1])
(@. σ_square*exp(-0.5*d)) + Diagonal(λ*ones(n))
end
function (GK::ArdGaussKernel{T, VT})(x, xo) where {T, VT}
σ_square = exp(2*GK.lσ[1])
scaled_x = @. x*exp(-GK.ll)
scaled_xo = @. xo*exp(-GK.ll)
d = square_metric(scaled_x, scaled_xo)
@. σ_square*exp(-0.5*d)
end
function ArdGaussKernel(n_features::Int)
ll = rand(n_features)
lσ = rand(1)
ArdGaussKernel(ll, lσ)
end
reset(GK::ArdGaussKernel) = ArdGaussKernel(length(GK.ll))
function Base.show(io::IO, kernel::ArdGaussKernel)
print(io, "ArdGaussKernel(")
print(io, "ll=", kernel.ll, ", ", "lσ=", kernel.lσ)
print(io, ")")
end
## iso kernel
struct IsoGaussKernel{T, VT<:AbstractVector{T}} <: AbstractKernel
ll::VT
lσ::VT
end
@functor IsoGaussKernel
function (IsoGK::IsoGaussKernel{T, VT})(x; λ=T(1e-6)) where {T, VT}
n = size(x, 2)
scaled_x = x .* exp(-IsoGK.ll[1])
d = square_metric(scaled_x)
σ_square = exp(2*IsoGK.lσ[1])
(@. σ_square*exp(-0.5*d)) + Diagonal(λ*ones(n))
end
function (IsoGK::IsoGaussKernel{T, VT})(x, xo) where {T, VT}
σ_square = exp(2*IsoGK.lσ[1])
scaled_x = x .* exp(-IsoGK.ll[1])
scaled_xo = xo .* exp(-IsoGK.ll[1])
d = square_metric(scaled_x, scaled_xo)
@. σ_square*exp(-0.5*d)
end
function IsoGaussKernel()
ll = rand(1)
lσ = rand(1)
IsoGaussKernel(ll, lσ)
end
reset(IsoGK::IsoGaussKernel) = IsoGaussKernel()
function Base.show(io::IO, kernel::IsoGaussKernel)
print(io, "IsoGaussKernel(")
print(io, "ll=", kernel.ll[1], ", ", "lσ=", kernel.lσ[1])
print(io, ")")
end
|
Require Export Pullback Limits.
Require Import Common.
Set Implicit Arguments.
Generalizable All Variables.
Set Asymmetric Patterns.
Set Universe Polymorphism.
Section subobject_classifier.
(** Quoting Wikipedia:
For the general definition, we start with a category [C] that has
a terminal object, which we denote by [1]. The object [Ω] of [C]
is a subobject classifier for [C] if there exists a morphism [m :
1 → Ω] with the following property: for each monomorphism [j : U →
X] there is a unique morphism [χj : X → Ω] such that the following
commutative diagram:
[[
U ----> 1
| |
j | | m
↓ ↓
X ----> Ω
χj
]]
is a pullback diagram — that is, [U] is the limit of the diagram:
[[
1
|
| m
↓
X ----> Ω
χj
]]
The morphism [χj] is then called the classifying morphism for the
subobject represented by [j].
*)
(** Quoting nCatLab:
Definition 1. In a category [C] with finite limits, a subobject
classifier is a monomorphism [true : * → Ω] out of the terminal
object, such that for every monomorphism [U → X] in [C] there is a unique
morphism [χU : X → Ω] such that there is a pullback diagram
[[
U ----> *
| |
| | true
↓ ↓
X ----> Ω
χU
]]
See for instance (MacLane-Moerdijk, p. 22).
*)
Context `(C : @SpecializedCategory objC).
Local Reserved Notation "'Ω'".
Record SubobjectClassifier :=
{
SubobjectClassifierOne : TerminalObject C where "1" := (TerminalObject_Object SubobjectClassifierOne);
ObjectOfTruthValues : C where "'Ω'" := ObjectOfTruthValues;
TrueValue : C.(Morphism) 1 Ω;
TrueIsMonomorphism : IsMonomorphism TrueValue;
SubobjectClassifyingMap : forall U X (j : C.(Morphism) U X),
IsMonomorphism j
-> { χj : Morphism C X Ω &
{ H : Compose χj j =
Compose TrueValue (TerminalObject_Morphism SubobjectClassifierOne U)
& IsPullbackGivenMorphisms
X 1 Ω
χj TrueValue
U j
(TerminalObject_Morphism SubobjectClassifierOne U)
H } }
}.
End subobject_classifier.
|
\documentclass[author-year, review, 11pt]{components/elsarticle} %review=doublespace preprint=single 5p=2 column
%%% Begin My package additions %%%%%%%%%%%%%%%%%%%
\usepackage[hyphens]{url}
\usepackage{lineno} % add
\linenumbers % turns line numbering on
\bibliographystyle{elsarticle-harv}
\biboptions{sort&compress} % For natbib
\usepackage{graphicx}
\usepackage{booktabs} % book-quality tables
%% Redefines the elsarticle footer
\makeatletter
\def\ps@pprintTitle{%
\let\@oddhead\@empty
\let\@evenhead\@empty
\def\@oddfoot{\it \hfill\today}%
\let\@evenfoot\@oddfoot}
\makeatother
% A modified page layout
\textwidth 6.75in
\oddsidemargin -0.15in
\evensidemargin -0.15in
\textheight 9in
\topmargin -0.5in
%%%%%%%%%%%%%%%% end my additions to header
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\usepackage{fontspec}
\ifxetex
\usepackage{xltxtra,xunicode}
\fi
\defaultfontfeatures{Mapping=tex-text,Scale=MatchLowercase}
\newcommand{\euro}{€}
\fi
% use microtype if available
\IfFileExists{microtype.sty}{\usepackage{microtype}}{}
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{{#1}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textbf{{#1}}}
\newcommand{\NormalTok}[1]{{#1}}
\ifxetex
\usepackage[setpagesize=false, % page size defined by xetex
unicode=false, % unicode breaks when used with xetex
xetex]{hyperref}
\else
\usepackage[unicode=true]{hyperref}
\fi
\hypersetup{breaklinks=true,
bookmarks=true,
pdfauthor={},
pdftitle={RNeXML: a package for reading and writing richly annotated phylogenetic, character, and trait data in R},
colorlinks=true,
urlcolor=blue,
linkcolor=magenta,
pdfborder={0 0 0}}
\urlstyle{same} % don't use monospace font for urls
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\setcounter{secnumdepth}{0}
% Pandoc toggle for numbering sections (defaults to be off)
\setcounter{secnumdepth}{0}
% Pandoc header
\begin{document}
\begin{frontmatter}
\title{RNeXML: a package for reading and writing richly annotated phylogenetic,
character, and trait data in R}
\author[ucb]{Carl Boettiger\corref{c1}}
\ead{cboettig(at)gmail.com}
\cortext[c1]{Corresponding author}
\author[ropensci]{Scott Chamberlain}
\author[NBC]{Rutger Vos}
\author[dukeplus]{Hilmar Lapp}
\address[ucb]{University of California, Berkeley, 130 Mulford Hall \#3114, Berkeley,
CA 94720-3114, USA}
\address[ropensci]{University of California, Berkeley, CA, USA}
\address[NBC]{Naturalis Biodiversity Center, Leiden, the Netherlands}
\address[dukeplus]{Center for Genomic and Computational Biology, Duke University, and
National Evolutionary Synthesis Center, Durham, NC, USA}
\begin{abstract}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
NeXML is a powerful and extensible exchange standard recently proposed
to better meet the expanding needs for phylogenetic data and metadata
sharing. Here we present the RNeXML package, which provides users of
the R programming language with easy-to-use tools for reading and
writing NeXML documents, including rich metadata, in a way that
interfaces seamlessly with the extensive library of phylogenetic tools
already available in the R ecosystem.
\item
Wherever possible, we designed RNeXML to map NeXML document contents,
whose arrangement is influenced by the format's XML Schema definition,
to their most intuitive or useful representation in R. To make NeXML's
powerful facility for recording semantically rich machine-readable
metadata accessible to R users, we designed a functional programming
interface to it that hides the semantic web standards leveraged by
NeXML from R users who are unfamiliar with them.
\item
RNeXML can read any NeXML document that validates, and it generates
valid NeXML documents from phylogeny and character data in various R
representations in use. The metadata programming interface at a basic
level aids fulfilling data documentation best practices, and at an
advanced level preserves NeXML's nearly limitless extensibility, for
which we provide a fully working demonstration. Furthermore, to lower
the barriers to sharing well-documented phylogenetic data, RNeXML has
started to integrate with taxonomic metadata augmentation services on
the web, and with online repositories for data archiving.
\item
RNeXML allows R's rich ecosystem to read and write data in the NeXML
format through an interface that is no more involved than reading or
writing data from other, less powerful data formats. It also provides
an interface designed to feel familiar to R programmers and to be
consistent with recommended practices for R package development, yet
that retains the full power for users to add their own custom data and
metadata to the phylogenies they work with, without introducing
potentially incompatible changes to the exchange standard.
\end{enumerate}
\end{abstract}
\end{frontmatter}
\section{Introduction}\label{introduction}
Users of the popular statistical and mathematical computing platform R
(R Core Team 2014) enjoy a wealth of readily installable comparative
phylogenetic methods and tools (O'Meara 2014). Exploiting the
opportunities arising from this wealth for complex and integrative
comparative research questions relies on the ability to reuse and
integrate previously generated or published data and metadata. The
expanding data exchange needs of the evolutionary research community are
rapidly outpacing the capabilities of most current and widely used data
exchange standards (Vos \emph{et al.} 2012), which were all developed a
decade or more ago. This has resulted in a radiation of different data
representations and exchange standard ``flavors'' that are no longer
interoperable at the very time when the growth of available data and
methods has made that interoperability most valuable. In response to the
unmet needs for standardized data exchange in phylogenetics, a modern
XML-based exchange standard, called NeXML, has recently been developed
(Vos \emph{et al.} 2012). NeXML comprehensively supports current data
exchange needs, is predictably machine-readable, and is forward
compatible.
The exchange problem for phylogenetic data is particularly acute in
light of the challenges in finding and sharing phylogenetic data without
the otherwise common loss of most data and metadata semantics (Stoltzfus
\emph{et al.} 2012; Drew \emph{et al.} 2013; Cranston \emph{et al.}
2014). For example, the still popular NEXUS file format (Maddison
\emph{et al.} 1997) cannot consistently represent horizontal gene
transfer or ambiguity in reading a character (such as a DNA sequence
base pair). This and other limitations have led to modifications of
NEXUS in different ways for different needs, with the unfortunate result
that NEXUS files generated by one program can be incompatible with
another (Vos \emph{et al.} 2012). Without a formal grammar, software
based on NEXUS files may also make inconsistent assumptions about
tokens, quoting, or element lengths. Vos et al. (2012) estimates that as
many as 15\% of the NEXUS files in the CIPRES portal contain
unrecoverable but hard to diagnose errors.
A detailed account of how the NeXML standard addresses these and other
relevant challenges can be found in Vos et al. (2012). In brief, NeXML
was designed with the following important properties. First, NeXML is
defined by a precise grammar that can be programmatically
\textbf{validated}; i.e., it can be verified whether a file precisely
follows this grammar, and therefore whether it can be read (parsed)
without errors by software that uses the NeXML grammar (e.g.~RNeXML) is
predictable. Second, NeXML is \textbf{extensible}: a user can define
representations of new, previously unanticipated information (as we will
illustrate) without violating its defining grammar. Third and most
importantly, NeXML is rich in \textbf{computable semantics}: it is
designed for expressing metadata such that machines can understand their
meaning and make inferences from it. For example, OTUs in a tree or
character matrix for frog species can be linked to concepts in a
formally defined hierarchy of taxonomic concepts such as the Vertebrate
Taxonomy Ontology (Midford \emph{et al.} 2013), which enables a machine
to infer that a query for amphibia is to include the frog data in what
is returned. (For a more broader discussion of the value of such
capabilities for evolutionary and biodiversity science we refer the
reader to Parr et al. (2011).)
To make the capabilities of NeXML available to R users in an easy-to-use
form, and to lower the hurdles to adoption of the standard, we present
RNeXML, an R package that aims to provide easy programmatic access to
reading and writing NeXML documents, tailored for the kinds of use-cases
that will be common for users and developers of the wealth of
evolutionary analysis methods within the R ecosystem.
\section{The RNeXML package}\label{the-rnexml-package}
The \texttt{RNeXML} package is written entirely in R and available under
a Creative Commons Zero public domain waiver. The current development
version can be found on Github at
\href{}{\url{https://github.com/ropensci/RNeXML}}, and the stable
version can be installed from the CRAN repository. \texttt{RNeXML} is
part of the rOpenSci project. Users of \texttt{RNeXML} are encouraged to
submit bug reports or feature requests in the issues log on Github, or
the phylogenetics R users group list at
\texttt{[email protected]} for help. Vignettes with more
detailed examples of specific features of RNeXML are distributed with
the R package and serve as a supplement to this manuscript. Each of the
vignettes can be found at
\href{}{\url{http://ropensci.github.io/RNeXML/}}.
\subsection{Representation of NeXML documents in
R}\label{representation-of-nexml-documents-in-r}
Conceptually, a NeXML document has the following components: (1)
phylogeny topology and branch length data, (2) character or trait data
in matrix form, (3) operational taxonomic units (OTUs), and (4)
metadata. To represent the contents of a NeXML document (currently in
memory), \texttt{RNeXML} defines the \texttt{nexml} object type. This
type therefore holds phylogenetic trees as well as character or trait
matrices, and all metadata, which is similar to the phylogenetic data
object types defined in the \texttt{phylobase} package (NESCENT R
Hackathon Team 2014), but contrasts with the more widely used ones
defined in the \texttt{ape} package (Paradis \emph{et al.} 2004), which
represents trees alone.
When reading and writing NeXML documents, \texttt{RNeXML} aims to map
their components to and from, respectively, their most widely used
representations in R. As a result, the types of objects accepted or
returned by the package's methods are the \texttt{phylo} and
\texttt{multiPhylo} objects from the \texttt{ape} package (Paradis
\emph{et al.} 2004) for phylogenies, and R's native \texttt{data.frame}
list structure for data matrices.
\subsection{Reading phylogenies and character
data}\label{reading-phylogenies-and-character-data}
The method \texttt{nexml\_read()} reads NeXML files, either from a local
file, or from a remote location via its URL, and returns an object of
type \texttt{nexml}:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{nex <-}\StringTok{ }\KeywordTok{nexml_read}\NormalTok{(}\StringTok{"components/trees.xml"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
The method \texttt{get\_trees\_list()} can be used to extract the
phylogenies as an \texttt{ape::multiPhylo} object, which can be treated
as a list of \texttt{ape::phylo} objects:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{phy <-}\StringTok{ }\KeywordTok{get_trees_list}\NormalTok{(nex)}
\end{Highlighting}
\end{Shaded}
The \texttt{get\_trees\_list()} method is designed for use in scripts,
providing a consistent and predictable return type regardless of the
number of phylogenies a NeXML document contains. For greater convenience
in interactive use, the method \texttt{get\_trees()} returns the R
object most intuitive given the arrangement of phylogeny data in the
source NeXML document. For example, the method returns an
\texttt{ape::phylo} object if the NeXML document contains a single
phylogeny, an \texttt{ape::multiPhylo} object if it contains multiple
phylogenies arranged in a single \texttt{trees} block, and a list of
\texttt{ape::multiPhylo} objects if it contains multiple \texttt{trees}
blocks (the capability for which NeXML inherits from NEXUS).
If the location parameter with which the \texttt{nexml\_read()} method
is invoked is recognized as a URL, the method will automatically
download the document to the local working directory and read it from
there. This gives convenient and rapid access to phylogenetic data
published in NeXML format on the web, such as the content of the
phylogenetic data repository TreeBASE (Piel \emph{et al.} 2002, 2009).
For example, the following plots a tree in TreeBASE (using ape's plot
function):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{tb_nex <-}\StringTok{ }\KeywordTok{nexml_read}\NormalTok{(}
\StringTok{"https://raw.github.com/TreeBASE/supertreebase/master/data/treebase/S100.xml"}\NormalTok{)}
\NormalTok{tb_phy <-}\StringTok{ }\KeywordTok{get_trees_list}\NormalTok{(tb_nex)}
\KeywordTok{plot}\NormalTok{(tb_phy[[}\DecValTok{1}\NormalTok{]]) }
\end{Highlighting}
\end{Shaded}
The method \texttt{get\_characters()} obtains character data matrices
from a \texttt{nexml} object, and returns them as a standard
\texttt{data.frame} R object with columns as characters and rows as
taxa:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{nex <-}\StringTok{ }\KeywordTok{nexml_read}\NormalTok{(}\StringTok{"components/comp_analysis.xml"}\NormalTok{)}
\KeywordTok{get_characters}\NormalTok{(nex)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
log snout-vent length reef-dwelling
taxon_8 -3.2777799 0
taxon_9 2.0959433 1
taxon_10 3.1373971 0
taxon_1 4.7532824 1
taxon_2 -2.7624146 0
taxon_3 2.1049413 0
taxon_4 -4.9504770 0
taxon_5 1.2714718 1
taxon_6 6.2593966 1
taxon_7 0.9099634 1
\end{verbatim}
A NeXML data matrix can be of molecular (for molecular sequence
alignments), discrete (for most morphological character data), or
continuous type (for many trait data). To enable strict validation of
data types NeXML allows, and if their data types differ requires
multiple data matrices to be separated into different ``blocks''. Since
the \texttt{data.frame} data structure in R has no such constraints, the
\texttt{get\_characters()} method combines such blocks as separate
columns into a single \texttt{data.frame} object, provided they
correspond to the same taxa. Otherwise, a list of \texttt{data.frame}s
is returned, with list elements corresponding to characters blocks.
Similar to the methods for obtaining trees, there is also a method
\texttt{get\_characters\_list()}, which always returns a list of
\texttt{data.frame}s, one for each character block.
\subsection{Writing phylogenies and character
data}\label{writing-phylogenies-and-character-data}
The method \texttt{nexml\_write()} generates a NeXML file from its input
parameters. In its simplest invocation, the method writes a tree to a
file:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{data}\NormalTok{(bird.orders)}
\KeywordTok{nexml_write}\NormalTok{(bird.orders, }\DataTypeTok{file =} \StringTok{"birds.xml"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
The first argument to \texttt{nexml\_write()} is either an object of
type \texttt{nexml}, or any object that can be coerced to it, such as in
the above example an \texttt{ape::phylo} phylogeny. Alternatively,
passing a \texttt{multiPhylo} object would write a list of phylogenies
to the file.
In addition to trees, the \texttt{nexml\_write()} method also allows to
specify character data as another parameter. The following example uses
data from the comparative phylogenetics R package \texttt{geiger}
(Pennell \emph{et al.} 2014).
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(}\StringTok{"geiger"}\NormalTok{)}
\KeywordTok{data}\NormalTok{(geospiza)}
\KeywordTok{nexml_write}\NormalTok{(}\DataTypeTok{trees =} \NormalTok{geospiza$phy, }
\DataTypeTok{characters =} \NormalTok{geospiza$dat,}
\DataTypeTok{file=}\StringTok{"geospiza.xml"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Note that the NeXML format is well-suited for incomplete data: for
instance, here it does not assume the character matrix has data for
every tip in the tree.
\subsection{Validating NeXML}\label{validating-nexml}
File validation is a central feature of the NeXML format which ensures
that any properly implemented NeXML parser will always be able to read
the NeXML file. The function takes the path to any NeXML file and
returns \texttt{TRUE} to indicate a valid file, or \texttt{FALSE}
otherwise, along with a display of any error messages generated by the
validator.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{nexml_validate}\NormalTok{(}\StringTok{"geospiza.xml"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
[1] TRUE
\end{verbatim}
The \texttt{nexml\_validate()} function performs this validation using
the online NeXML validator (when a network connection is available),
which performs additional checks not expressed in the NeXML schema
itself (Vos \emph{et al.} 2012). If a network connection is not
available, the function falls back on the schema validation method from
the \texttt{XML} package (Lang 2013).
\subsection{Creating and populating \texttt{nexml}
objects}\label{creating-and-populating-nexml-objects}
Instead of packaging the various components for a NeXML file at the time
of writing the file, \texttt{RNeXML} also allows users to create and
iteratively populate in-memory \texttt{nexml} objects. The methods to do
this are \texttt{add\_characters()}, \texttt{add\_trees()}, and
\texttt{add\_meta()}, for adding characters, trees, and metadata,
respectively. Each of these functions will automatically create a new
nexml object if not supplied with an existing one as the last (optional)
argument.
For example, here we use \texttt{add\_trees()} to first create a
\texttt{nexml} object with the phylogeny data, and then add the
character data to it:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{nexObj <-}\StringTok{ }\KeywordTok{add_trees}\NormalTok{(geospiza$phy)}
\NormalTok{nexObj <-}\StringTok{ }\KeywordTok{add_characters}\NormalTok{(geospiza$dat, nexObj)}
\end{Highlighting}
\end{Shaded}
The data with which a \texttt{nexml} object is populated need not share
the same OTUs. \texttt{RNeXML} automatically adds new, separate OTU
blocks into the NeXML file for each data matrix and tree that uses a
different set of OTUs.
Other than storage size, there is no limit to the number of phylogenies
and character matrices that can be included in a single NeXML document.
This allows, for example, to capture samples from a posterior
probability distribution of inferred or simulated phylogenies and
character states in a single NeXML file.
\subsection{Data documentation and annotation with built-in
metadata}\label{data-documentation-and-annotation-with-built-in-metadata}
NeXML allows attaching (``\emph{annotating}'') metadata to any data
element, and even to metadata themselves. Whether at the level of the
document as a whole or an individual data matrix or phylogeny, metadata
can provide bibliographic and provenance information, for example about
the study as part of which the phylogeny was generated or applied, which
data matrix and which methods were used to generate it. Metadata can
also be attached to very specific elements of the data, such as specific
traits, individual OTUs, nodes, or even edges of the phylogeny.
As described in Vos et al. (2012), to encode metadata annotations NeXML
uses the ``Resource Description Framework in Annotations'' (RDFa)
(Prud'hommeaux 2014). This standard provides for a strict
machine-readable format yet enables future backwards compatibility with
compliant NeXML parsers (and thus \texttt{RNeXML}), because the capacity
of a tool to \emph{parse} annotations is not predicated on
\emph{understanding} the meaning of annotations it has not seen before.
To lower the barriers to sharing well-documented phylogenetic data,
\texttt{RNeXML} aims to make recording useful and machine-readable
metadata easier at several levels.
First, when writing a NeXML file the package adds certain basic metadata
automatically if they are absent, using default values consistent with
recommended best practices (Cranston \emph{et al.} 2014). Currently,
this includes naming the software generating the NeXML, a time-stamp of
when a tree was produced, and an open data license. These are merely
default arguments to \texttt{add\_basic\_meta()} and can be configured.
Second, \texttt{RNeXML} provides a simple method, called
\texttt{add\_basic\_metadata()}, to set metadata attributes commonly
recommended for inclusion with data to be publicly archived or shared
(Cranston \emph{et al.} 2014). The currently accepted parameters include
\texttt{title}, \texttt{description}, \texttt{creator},
\texttt{pubdate}, \texttt{rights}, \texttt{publisher}, and
\texttt{citation}. Behind the scenes the method automatically anchors
these attributes in common vocabularies (such as Dublin Core).
Third, \texttt{RNeXML} integrates with the R package \texttt{taxize}
(Chamberlain \& Sz{ö}cs 2013) to mitigate one of the most common
obstacles to reuse of phylogenetic data, namely the misspellings and
inconsistent taxonomic naming with which OTU labels are often fraught.
The \texttt{taxize\_nexml()} method in \texttt{RNeXML} uses
\texttt{taxize} to match OTU labels against the NCBI database, and,
where a unique match is found, it annotates the respective OTU with the
matching NCBI identifier.
\subsection{Data annotation with custom
metadata}\label{data-annotation-with-custom-metadata}
The \texttt{RNeXML} interface described above for built-in metadata
allows users to create precise and semantically rich annotations without
confronting any of the complexity of namespaces and ontologies.
Nevertheless, advanced users may desire the explicit control over these
semantic tools that takes full advantage of the flexibility and
extensibility of the NeXML specification (Parr \emph{et al.} 2011; Vos
\emph{et al.} 2012). In this section we detail how to accomplish these
more complex uses in RNeXML.
Using a vocabulary or ontology terms rather than simple text strings to
describe data is crucial for allowing machines to not only parse but
also interpret and potentially reason over their semantics. To achieve
this benefit for custom metadata extensions, the user necessarily needs
to handle certain technical details from which the \texttt{RNeXML}
interface shields her otherwise, in particular the globally unique
identifiers (normally HTTP URIs) of metadata terms and vocabularies. To
be consistent with XML terminology, \texttt{RNeXML} calls vocabulary
URIs \emph{namespaces}, and their abbreviations \emph{prefixes}. For
example, the namespace for the Dublin Core Metadata Terms vocabulary is
``\url{http://purl.org/dc/elements/1.1/}''. Using its common
abbreviation ``dc'', a metadata property ``dc:title'' expands to the
identifier ``\url{http://purl.org/dc/elements/1.1/title}''. This URI
resolves to a human and machine-readable (depending on access)
definition of precisely what the term \texttt{title} in Dublin Core
means. In contrast, just using the text string ``title'' could also mean
the title of a person, a legal title, the verb title, etc. URI
identifiers of metadata vocabularies and terms are not mandated to
resolve, but if machines are to derive the maximum benefit from them,
they should resolve to a definition of their semantics in RDF.
\texttt{RNeXML} includes methods to obtain and manipulate metadata
properties, values, identifiers, and namespaces. The
\texttt{get\_namespaces()} method accepts a \texttt{nexml} object and
returns a named list of namespace prefixes and their corresponding
identifiers known to the object:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{birds <-}\StringTok{ }\KeywordTok{nexml_read}\NormalTok{(}\StringTok{"birds.xml"}\NormalTok{)}
\NormalTok{prefixes <-}\StringTok{ }\KeywordTok{get_namespaces}\NormalTok{(birds)}
\NormalTok{prefixes[}\StringTok{"dc"}\NormalTok{]}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
dc
"http://purl.org/dc/elements/1.1/"
\end{verbatim}
The \texttt{get\_metadata()} method returns, as a named list, the
metadata annotations for a given \texttt{nexml} object at a given level,
with the whole NeXML document being the default level (\texttt{"all"}
extracts all metadata objects):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{meta <-}\StringTok{ }\KeywordTok{get_metadata}\NormalTok{(birds) }
\NormalTok{otu_meta <-}\StringTok{ }\KeywordTok{get_metadata}\NormalTok{(birds, }\DataTypeTok{level=}\StringTok{"otu"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
The returned list does not include the data elements to which the
metadata are attached. Therefore, a different approach, documented in
the metadata vignette, is recommended for accessing the metadata
attached to data elements.
The \texttt{meta()} method creates a new metadata object from a property
name and content (value). For example, the following creates a
modification date metadata object, using a property in the PRISM
vocabulary:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modified <-}\StringTok{ }\KeywordTok{meta}\NormalTok{(}\DataTypeTok{property =} \StringTok{"prism:modificationDate"}\NormalTok{, }\DataTypeTok{content =} \StringTok{"2013-10-04"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Metadata annotations in \texttt{NeXML} can be nested within another
annotation, which the \texttt{meta()} method accommodates by accepting a
parameter \texttt{children}, with the list of nested metadata objects
(which can themselves be nested) as value.
The \texttt{add\_meta()} function adds metadata objects as annotations
to a \texttt{nexml} object at a specified level, with the default level
being the NeXML document as a whole:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{birds <-}\StringTok{ }\KeywordTok{add_meta}\NormalTok{(modified, birds) }
\end{Highlighting}
\end{Shaded}
If the prefix used by the metadata property is not among the built-in
ones (which can be obtained using \texttt{get\_namespaces()}), it has to
be provided along with its URI as the \texttt{namespaces} parameter. For
example, the following uses the
``\href{http://www.w3.org/TR/skos-reference/}{Simple Knowledge
Organization System}'' (SKOS) vocabulary to add a note to the trees in
the \texttt{nexml} object:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{history <-}\StringTok{ }\KeywordTok{meta}\NormalTok{(}\DataTypeTok{property =} \StringTok{"skos:historyNote"}\NormalTok{,}
\DataTypeTok{content =} \StringTok{"Mapped from the bird.orders data in the ape package using RNeXML"}\NormalTok{)}
\NormalTok{birds <-}\StringTok{ }\KeywordTok{add_meta}\NormalTok{(history, }
\NormalTok{birds, }
\DataTypeTok{level =} \StringTok{"trees"}\NormalTok{,}
\DataTypeTok{namespaces =} \KeywordTok{c}\NormalTok{(}\DataTypeTok{skos =} \StringTok{"http://www.w3.org/2004/02/skos/core#"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
Alternatively, additional namespaces can also be added in batch using
the \texttt{add\_namespaces()} method.
By virtue of subsetting the S4 \texttt{nexml} object, \texttt{RNeXML}
also offers fine control of where a \texttt{meta} element is added, for
which the package vignette on S4 subsetting of \texttt{nexml} contains
examples.
Because NeXML expresses all metadata using the RDF standard, and stores
them compliant with RDFa, they can be extracted as an RDF graph,
queried, analyzed, and mashed up with other RDF data, local or on the
web, using a wealth of off-the-shelf tools for working with RDF (see
Prud'hommeaux (2014) or Hartig (2012)). Examples for these possibilities
are included in the \texttt{RNeXML} SPARQL vignette (a recursive acronym
for SPARQL Protocol and RDF Query Language, see
\url{http://www.w3.org/TR/rdf-sparql-query/}), and the package also
comes with a demonstration that can be run from R using the following
command: \texttt{demo("sparql", "RNeXML")}).
\subsection{Using metadata to extend the NeXML
standard}\label{using-metadata-to-extend-the-nexml-standard}
NeXML was designed to prevent the need for future non-interoperable
``flavors'' of the standard in response to new research directions. Its
solution to this inevitable problem is a highly flexible metadata system
without sacrificing strict validation of syntax and structure.
Here we illustrate how \texttt{RNeXML}'s interface to NeXML's metadata
system can be used to record and share a type of phylogenetic data not
taken into account when NeXML was designed, in this case stochastic
character maps (Huelsenbeck \emph{et al.} 2003). Such data assign
certain parts (corresponding to time) of each branch in a
time-calibrated phylogeny to a particular ``state'' (typically of a
morphological characteristic). The current de-facto format for sharing
stochastic character maps, created by \texttt{simmap} (Bollback 2006), a
widely used tool for creating such maps, is a non-interoperable
modification of the standard Newick tree format. This means that
computer programs designed to read Newick or NEXUS formats may fail when
trying to read in a phylogeny that includes \texttt{simmap} annotations.
In contrast, by allowing new data types to be added as --- sometimes
complex --- metadata annotations NeXML can accommodate data extensions
without compromise to its grammar and thus syntax In NeXML. To
illustrate how RNeXML facilitates extending the NeXML standard in this
way, we have implemented two functions in the package,
\texttt{nexml\_to\_simmap} and \texttt{simmap\_to\_nexml}. These
functions show how simmap data can be represented as \texttt{meta}
annotations on the branch length elements of a NeXML tree, and provide
routines to convert between this NeXML representation and the extended
\texttt{ape::phylo} representation of a \texttt{simmap} tree in R that
was introduced by Revell (2012). We encourage readers interested in this
capability to consult the example code in \texttt{simmap\_to\_nexml} to
see how this is implemented.
Extensions to NeXML must also be defined in the file's namespace in
order to valid. This provides a way to ensure that a URI providing
documentation of the extension is always included. Our examples here use
the prefix, \texttt{simmap}, to group the newly introduced metadata
properties in a vocabulary, for which the \texttt{add\_namespace()}
method can be used to give a URI as an identifier:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{nex <-}\StringTok{ }\KeywordTok{add_namespaces}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\DataTypeTok{simmap =}
\StringTok{"https://github.com/ropensci/RNeXML/tree/master/inst/simmap.md"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
Here the URI does not resolve to a fully machine-readable definition of
the terms and their semantics, but it can nonetheless be used to provide
at least a human-readable informal definition of the terms.
\subsection{Publishing NeXML files from
R}\label{publishing-nexml-files-from-r}
Data archiving is increasingly required by scientific journals,
including in evolutionary biology, ecology, and biodiversity (e.g.
Rausher et al. (2010)). The effort involved with preparing and
submitting properly annotated data to archives remains a notable barrier
to the broad adoption of data archiving and sharing as a normal part of
the scholarly publication workflow (Tenopir \emph{et al.} 2011; Stodden
2014). In particular, the majority of phylogenetic trees published in
the scholarly record are inaccessible or lost to the research community
(Drew \emph{et al.} 2013).
One of \texttt{RNeXML}'s aims is to promote the archival of
well-documented phylogenetic data in scientific data repositories, in
the form of NeXML files. To this end, the method
\texttt{nexml\_publish()} provides an API directly from within R that
allows data archival to become a step programmed into data management
scripts. Initially, the method supports the data repository Figshare
(\url{http://figshare.com}):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{doi <-}\StringTok{ }\KeywordTok{nexml_publish}\NormalTok{(birds, }\DataTypeTok{repository=}\StringTok{"figshare"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
This method reserves a permanent identifier (DOI) on the figshare
repository that can later be made public through the figshare web
interface. This also acts as a secure backup of the data to a repository
and a way to share with collaborators prior to public release.
\section{Conclusions and future
directions}\label{conclusions-and-future-directions}
\texttt{RNeXML} allows R's ecosystem to read and write data in the NeXML
format through an interface that is no more involved than reading or
writing data from other phylogenetic data formats. It also carries
immediate benefits for its users compared to other formats. For example,
comparative analysis R packages and users frequently add their own
metadata annotations to the phylogenies they work with, such as
annotations of species, stochastic character maps, trait values, model
estimates and parameter values. \texttt{RNeXML} affords R the capability
to harness machine-readable semantics and an extensible metadata schema
to capture, preserve, and share these and other kinds of information,
all through an API instead of having to understand in detail the schema
underlying the NeXML standard. To assist users in meeting the rising bar
for best practices in data sharing in phylogenetic research (Cranston
\emph{et al.} 2014), \texttt{RNeXML} captures metadata information from
the R environment to the extent possible, and applies reasonable
defaults.
The goals for continued development of \texttt{RNeXML} revolve primarily
around better interoperability with other existing phylogenetic data
representations in R, such as those found in the \texttt{phylobase}
package (NESCENT R Hackathon Team 2014); and better integration of the
rich metadata semantics found in ontologies defined in the Web Ontology
Language (OWL), including programmatic access to machine reasoning with
such metadata.
\subsection{Acknowledgements}\label{acknowledgements}
This project was supported in part by the National Evolutionary
Synthesis Center (NESCent) (NSF \#EF-0905606), and grants from the
National Science Foundation (DBI-1306697) and the Alfred P Sloan
Foundation (Grant 2013-6-22). \texttt{RNeXML} started as a project idea
for the Google Summer of Code(TM), and we thank Kseniia Shumelchyk for
taking the first steps to implement it. We are grateful to F. Michonneau
for helpful comments on an earlier version of this manuscript, and
reviews by Matthew Pennell, Associate Editor Richard FitzJohn, and an
anonymous reviewer. At their behest, the reviews of FitzJohn and Pennell
can be found in this project's GitHub page at
\href{https://github.com/ropensci/RNeXML/issues/121}{github.com/ropensci/RNeXML/issues/120}
and
\href{https://github.com/ropensci/RNeXML/issues/120}{github.com/ropensci/RNeXML/issues/120},
together with our replies and a record of our revisions.
\subsection{Data Accessibility}\label{data-accessibility}
All software, scripts and data used in this paper can be found in the
permanent data archive Zenodo under the digital object identifier
\url{doi:10.5281/zenodo.13131} (Boettiger \emph{et al.} 2014). This DOI
corresponds to a snapshot of the GitHub repository at
\href{https://github.com/ropensci/RNeXML}{github.com/ropensci/RNeXML}.
\section*{References}\label{references}
\addcontentsline{toc}{section}{References}
Boettiger, C., Vos, R., Chamberlain, S. \& Lapp, H. (2014). RNeXML
v2.0.0. Retrieved from \url{http://dx.doi.org/10.5281/zenodo.13131}
Bollback, J. (2006).\emph{BMC Bioinformatics}, \textbf{7}, 88. Retrieved
from \url{http://dx.doi.org/10.1186/1471-2105-7-88}
Chamberlain, S.A. \& Sz{ö}cs, E. (2013). Taxize: Taxonomic search and
retrieval in r. \emph{F1000Research}. Retrieved from
\url{http://dx.doi.org/10.12688/f1000research.2-191.v2}
Cranston, K., Harmon, L.J., O'Leary, M.A. \& Lisle, C. (2014). Best
practices for data sharing in phylogenetic research. \emph{PLoS Curr}.
Retrieved from
\url{http://dx.doi.org/10.1371/currents.tol.bf01eff4a6b60ca4825c69293dc59645}
Drew, B.T., Gazis, R., Cabezas, P., Swithers, K.S., Deng, J., Rodriguez,
R., Katz, L.A., Crandall, K.A., Hibbett, D.S. \& Soltis, D.E. (2013).
Lost branches on the tree of life. \emph{PLoS Biol}, \textbf{11},
e1001636. Retrieved from
\url{http://dx.doi.org/10.1371/journal.pbio.1001636}
Hartig, O. (2012). An introduction to sPARQL and queries over linked
data. \emph{Web engineering} pp. 506--507. Springer Science + Business
Media. Retrieved from
\url{http://dx.doi.org/10.1007/978-3-642-31753-8_56}
Huelsenbeck, J.P., Nielsen, R. \& Bollback, J.P. (2003). Stochastic
mapping of morphological characters. \emph{Systematic Biology},
\textbf{52}, 131--158. Retrieved from
\url{http://dx.doi.org/10.1080/10635150390192780}
Lang, D.T. (2013). \emph{XML: Tools for parsing and generating xML
within r and s-plus.} Retrieved from
\url{http://CRAN.R-project.org/package=XML}
Maddison, D., Swofford, D. \& Maddison, W. (1997). NEXUS: An extensible
file format for systematic information. \emph{Syst. Biol.}, \textbf{46},
590--621. Retrieved from
\url{http://www.ncbi.nlm.nih.gov/pubmed/11975335}
Midford, P., Dececchi, T., Balhoff, J., Dahdul, W., Ibrahim, N., Lapp,
H., Lundberg, J., Mabee, P., Sereno, P., Westerfield, M., Vision, T. \&
Blackburn, D. (2013). The vertebrate taxonomy ontology: A framework for
reasoning across model organism and species phenotypes. \emph{J. Biomed.
Semantics}, \textbf{4}, 34. Retrieved from
\url{http://dx.doi.org/10.1186/2041-1480-4-34}
NESCENT R Hackathon Team. (2014). \emph{Phylobase: Base package for
phylogenetic structures and comparative data}. Retrieved from
\url{http://CRAN.R-project.org/package=phylobase}
O'Meara, B. (2014). CRAN task view: Phylogenetics, especially
comparative methods. Retrieved from
\url{http://cran.r-project.org/web/views/Phylogenetics.html}
Paradis, E., Claude, J. \& Strimmer, K. (2004). APE: Analyses of
phylogenetics and evolution in R language. \emph{Bioinformatics},
\textbf{20}, 289--290.
Parr, C.S., Guralnick, R., Cellinese, N. \& Page, R.D.M. (2011).
Evolutionary informatics: unifying knowledge about the diversity of
life. \emph{Trends in ecology \& evolution}, \textbf{27}, 94--103.
Retrieved from \url{http://www.ncbi.nlm.nih.gov/pubmed/22154516}
Pennell, M.W., Eastman, J.M., Slater, G.J., Brown, J.W., Uyeda, J.C.,
Fitzjohn, R.G., Alfaro, M.E. \& Harmon, L.J. (2014). Geiger v2.0: An
expanded suite of methods for fitting macroevolutionary models to
phylogenetic trees. \emph{Bioinformatics}, \textbf{30}, 2216--2218.
Piel, W.H., Chan, L., Dominus, M.J., Ruan, J., Vos, R.A. \& Tannen, V.
(2009). TreeBASE v. 2: A database of phylogenetic knowledge. Retrieved
from \url{http://www.e-biosphere09.org}
Piel, W.H., Donoghue, M.J. \& Sanderson, M.J. (2002). TreeBASE: A
database of phylogenetic information. \emph{The interoperable `catalog
of life'} (eds J. Shimura, K.L. Wilson \& D. Gordon), pp. 41--47.
Research report. National Institute for Environmental Studies, Tsukuba,
Japan. Retrieved from
\url{http://donoghuelab.yale.edu/sites/default/files/124_piel_shimura02.pdf}
Prud'hommeaux, E. (2014). SPARQL query language for rDF. \emph{W3C}.
Retrieved from \url{http://www.w3.org/TR/rdf-sparql-query/}
R Core Team. (2014). \emph{R: A language and environment for statistical
computing}. R Foundation for Statistical Computing, Vienna, Austria.
Retrieved from \url{http://www.R-project.org/}
Rausher, M.D., McPeek, M.A., Moore, A.J., Rieseberg, L. \& Whitlock,
M.C. (2010). Data archiving. \emph{Evolution}, \textbf{64}, 603--604.
Retrieved from \url{http://dx.doi.org/10.1111/j.1558-5646.2009.00940.x}
Revell, L.J. (2012). Phytools: An r package for phylogenetic comparative
biology (and other things). \emph{Methods in Ecology and Evolution},
\textbf{3}, 217--223.
Stodden, V. (2014). The scientific method in practice: Reproducibility
in the computational sciences. \emph{SSRN Journal}. Retrieved from
\url{http://dx.doi.org/10.2139/ssrn.1550193}
Stoltzfus, A., O'Meara, B., Whitacre, J., Mounce, R., Gillespie, E.L.,
Kumar, S., Rosauer, D.F. \& Vos, R.A. (2012). Sharing and re-use of
phylogenetic trees (and associated data) to facilitate synthesis.
\emph{BMC Research Notes}, \textbf{5}, 574. Retrieved from
\url{http://dx.doi.org/10.1186/1756-0500-5-574}
Tenopir, C., Allard, S., Douglass, K., Aydinoglu, A.U., Wu, L., Read,
E., Manoff, M. \& Frame, M. (2011). Data sharing by scientists:
Practices and perceptions (C. Neylon, Ed.). \emph{PLoS ONE}, \textbf{6},
e21101. Retrieved from
\url{http://dx.doi.org/10.1371/journal.pone.0021101}
Vos, R.A., Balhoff, J.P., Caravas, J.A., Holder, M.T., Lapp, H.,
Maddison, W.P., Midford, P.E., Priyam, A., Sukumaran, J., Xia, X. \&
Stoltzfus, A. (2012). NeXML: Rich, extensible, and verifiable
representation of comparative data and metadata. \emph{Systematic
Biology}, \textbf{61}, 675--689. Retrieved from
\url{http://dx.doi.org/10.1093/sysbio/sys025}
\end{document}
|
/*
*
* Copyright (c) 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "spiral/log/Statement.h"
#include "spiral/log/Logger.h"
#include <boost/bind.hpp>
#include <stdexcept>
#include <algorithm>
#include <ctype.h>
namespace spiral {
namespace log {
namespace {
using namespace std;
struct NonPrint { bool operator()(unsigned char c) { return !isprint(c) && !isspace(c); } };
const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
std::string quote(const std::string& str) {
NonPrint nonPrint;
size_t n = std::count_if(str.begin(), str.end(), nonPrint);
if (n==0) return str;
std::string ret;
ret.reserve(str.size()+2*n); // Avoid extra allocations.
for (string::const_iterator i = str.begin(); i != str.end(); ++i) {
if (nonPrint(*i)) {
ret.push_back('\\');
ret.push_back('x');
ret.push_back(hex[((*i) >> 4)&0xf]);
ret.push_back(hex[(*i) & 0xf]);
}
else ret.push_back(*i);
}
return ret;
}
}
void Statement::log(const std::string& message) {
Logger::instance().log(*this, quote(message));
}
Statement::Initializer::Initializer(Statement& s) : statement(s) {
Logger::instance().add(s);
}
namespace {
const char* names[LevelTraits::COUNT] = {
"trace", "debug", "info", "notice", "warning", "error", "critical"
};
} // namespace
Level LevelTraits::level(const char* name) {
for (int i =0; i < LevelTraits::COUNT; ++i) {
if (strcmp(names[i], name)==0)
return Level(i);
}
throw std::runtime_error(std::string("Invalid log level name: ")+name);
}
const char* LevelTraits::name(Level l) {
return names[l];
}
}} // namespace spiral::log
|
```python
%matplotlib inline
```
심화 과정 : Bi-LSTM CRF와 동적 결정
======================================================
동적, 정적 딥 러닝 툴킷(toolkits) 비교
--------------------------------------------
Pytorch는 *동적* 신경망 툴킷입니다. 다른 동적 신경망 툴킷으로는
`Dynet <https://github.com/clab/dynet>`_ 이 있습니다.(이 툴킷을
예로 든 이유는 사용하는 법이 Pytorch와 비슷하기 때문입니다. Dynet의 예제를 보면
Pytorch로 구현할 때도 도움이 될 것입니다.) 반대로 *정적* 툴킷들로
Theano, Keras, TensorFlow 등이 있습니다. 주요 차이점은 다음과 같습니다:
* 정적 툴킷을 쓸 때는 계산 그래프를 한 번만 정의하고, 컴파일 한 후,
데이터를 계산 그래프에 넘깁니다.
* 동적 툴킷에서는 *각 데이터* 의 계산 그래프를 정의하며 컴파일하지
않고 즉각 실행됩니다.
경험이 많지 않다면 두 방식의 차이를 알기 어렵습니다. 딥 러닝 기반의
구구조 분석기(constituent parser)를 예로 들어보겠습니다. 모델은 대략
다음과 같은 과정을 수행합니다:
* 트리를 상향식(bottom-up)으로 만들어 나갑니다.
* 최상위 노드를 태깅합니다. (문장의 각 단어)
* 거기서부터 신경망과 단어들의 임베딩을 이용해 구구조를 이루는 조합을
찾아냅니다. 새로운 구구조를 생성할 때마다 구구조의 임베딩을 얻기 위한
어떤 기술이 필요합니다. 지금은 신경망이 오직 입력 문장만 참고할
것입니다. "The green cat scratched the wall"이란 문장에서, 모델의 어느 시점에
$(i,j,r) = (1, 3, \text{NP})$ 범위 (단어 1에서부터 단어 3까지가
NP 구구조라는 뜻이며, 이 문장에서는 "The green cat") 를 결합하길 원할
것입니다.
그런데, 또다른 문장 "Somewhere, the big fat cat scratched the wall" 에서는
어느 시점에 $(2, 4, NP)$ 구구조를 만들기를 원할 것입니다. 우리가
만들기 원하는 구구조들은 문장에 따라 다릅니다. 만약 정적 툴킷에서처럼
계산 그래프를 한 번만 컴파일한다면, 이 과정을 프로그래밍하기 매우 어렵거나
불가능할 것입니다. 하지만 동적 툴킷에서는 하나의 계산 그래프만 있지
않습니다. 각 문장들마다 새로운 계산 그래프가 있을 수 있기 때문에 이런
문제가 없습니다.
동적 틀킷은 디버깅 하기 더 쉽고, 코드가 기반 언어와 더 비슷합니다
(Pytorch와 Dynet이 Keras 또는 Theano 보다 Python 코드와 더 비슷합니다).
Bi-LSTM Conditional Rnadom Field 설명
-------------------------------------------
이 영역에서는 개체명 인식을 수행하는 완성된 Bi-LSTM Conditional Random
Field 예시를 살펴보겠습니다. 위에 나온 LSTM 태거(tagger)는 일반적으로
품사 태깅을 하기에 충분합니다. 하지만 CRF 같은 연속된 데이터를 다루는
모델은 좋은 개체명 인식 모델(NER)에 꼭 필요합니다. 여러분이 CRF를 잘 알고
있다고 가정하겠습니다. 이름이 무섭게 들릴 수도 있지만, LSTM이 특징을
제공하는 점을 제외하면 이 모델은 CRF 입니다. 하지만 더 발전된 모델이며,
이 튜토리얼의 앞부분에 나왔던 모델보다 훨씬 복잡합니다. 넘어가고 싶다면
넘어가도 괜찮습니다. 이해할 수 있다고 생각한다면, 아래를 읽어보세요:
- 태그 k에 대한 i번째 단계의 비터비(viterbi) 변수를 위해 순환 흐름을 만든다.
- 순방향 변수를 계산하기 위해 위의 순한 흐름을 조정한다.
- 순방향 변수를 로그 공간에서 계산하기 위해 다시 한 번 조정한다.
(힌트 : 로그-합-지수승)
위의 세가지를 할 수 있다면, 아래의 코드를 이해할 수 있을 것입니다.
CRF는 조건부 확률을 계산한다는 점을 기억하세요. $y$ 를 연속된
태그라 하고, $x$ 를 연속된 입력 단어라 하겠습니다. 그러면 아래의
식을 계산할 수 있습니다.
\begin{align}P(y|x) = \frac{\exp{(\text{Score}(x, y)})}{\sum_{y'} \exp{(\text{Score}(x, y')})}\end{align}
점수(score) 함수는 아래와 같이 정의된 로그 포텐셜(potential) $\log \psi_i(x,y)$
함수에 의해 결정됩니다.
\begin{align}\text{Score}(x,y) = \sum_i \log \psi_i(x,y)\end{align}
분배 함수(partition function)를 단순화하기 위해서, 포텐셜이 주변의
특징들만 반영한다고 하겠습니다.
Bi-LSTM CRF 안에 배출(emission), 전이(transition) 두 종류의 포텐셜을
정의합니다. $i$ 번째 단어에 대한 배출 포텐셜은 Bi-LSTM의
$i$ 번째 시점의 은닉 상태가 결정합니다. 전이 점수는 $|T|x|T|$
형태인 행렬 $\textbf{P}$ 에 저장되어 있습니다. $T$ 는
태그의 집합입니다. 이 구현에서, $\textbf{P}_{j,k}$ 는 tag $j$ 에서
tag $k$ 로의 전이 점수를 의미합니다. 따라서:
\begin{align}\text{Score}(x,y) = \sum_i \log \psi_\text{EMIT}(y_i \rightarrow x_i) + \log \psi_\text{TRANS}(y_{i-1} \rightarrow y_i)\end{align}
\begin{align}= \sum_i h_i[y_i] + \textbf{P}_{y_i, y_{i-1}}\end{align}
두 번째 식에서 고유하고 음수가 아닌 인덱스에 의해 태그가 부여됐다고
간주합니다.
위의 설명이 너무 간단하다고 생각한다면 CRF에 대한 Michael Collins의
글을 `여기 <http://www.cs.columbia.edu/%7Emcollins/crf.pdf>`__ 에서
읽어보세요.
구현 문서
--------------------
아래의 예시는 로그 공간에서 분배 함수를 계산하기 위한 순방향 알고리즘과
복호화하기 위한 비터비 알고리즘을 구현한 것입니다. 역전파
단계에서 변화도는 자동으로 계산될 것입니다. 우리가 직접 할 일은
없습니다.
이 구현은 최적의 상태가 아닙니다. 과정을 이해했다면, 순방향 알고리즘
상에서 다음 태그를 순차적으로 처리하는 과정을 하나의 큰 연산으로 줄일
수 있다는 것을 아마 빠르게 알 수 있을 것입니다. 이 코드는 가능한 읽기
쉽게 작성했습니다. 적절하게 수정하면, 이 태거를 실제 문제들에 사용할
수도 있을 것입니다.
```python
# 작성자: Robert Guthrie
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
```
코드 가독성을 높여주는 보조 함수들
```python
def argmax(vec):
# argmax를 파이썬 정수형으로 반환합니다.
_, idx = torch.max(vec, 1)
return idx.item()
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
# 순방향 알고리즘을 위해 수치적으로 안정적인 방법으로 로그 합 지수승을 계산합니다.
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
```
모델 생성
```python
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=1, bidirectional=True)
# LSTM의 출력을 태그 공간으로 대응시킵니다.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# 전이 매개변수 행렬. i, j 성분은 i에서 j로 변할 때의 점수입니다.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# 이 두 코드는 시작 태그로 전이하지 않고, 정지 태그에서부터
# 전이하지 않도록 강제합니다.
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim // 2),
torch.randn(2, 1, self.hidden_dim // 2))
def _forward_alg(self, feats):
# 분배 함수를 계산하기 위해 순방향 알고리즘을 수행합니다.
init_alphas = torch.full((1, self.tagset_size), -10000.)
# START_TAG는 모든 점수를 갖고 있습니다.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# 자동으로 역전파 되도록 변수로 감쌉니다.
forward_var = init_alphas
# 문장의 각 성분을 반복 처리합니다.
for feat in feats:
alphas_t = [] # 현재 시점의 순방향 텐서
for next_tag in range(self.tagset_size):
# 이전의 태그와 상관없이 배출 점수를 전파합니다.
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# trans_score의 i번째 성분은 i로부터 next_tag로 전이할 점수입니다.
trans_score = self.transitions[next_tag].view(1, -1)
# next_tag_var의 i번째 성분은 로그-합-지수승을 계산하기 전
# i에서 next_tag로 가는 간선의 값입니다.
next_tag_var = forward_var + trans_score + emit_score
# 이 태그의 순방향 변수는 모든 점수들의 로그-합-지수승 계산
# 결과입니다.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# 주어진 태그 순열에 점수를 매깁니다.
score = torch.zeros(1)
tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# 비터비 변수를 로그 공간 상에 초기화합니다.
init_vvars = torch.full((1, self.tagset_size), -10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# i 단계의 forward_var는 i-1 단계의 비터비 변수를 갖고 있습니다.
forward_var = init_vvars
for feat in feats:
bptrs_t = [] # 현재 단계의 backpointer를 갖고 있습니다.
viterbivars_t = [] # 현재 단계의 비터비 변수를 갖고 있습니다.
for next_tag in range(self.tagset_size):
# next_tag_var[i]는 이전 단계의 태그 i에 대한 비터비 변수와,
# 태그 i에서 next_tag로 전이할 점수를 더한 값을 갖고 있습니다.
# 배출 점수는 argmax와 상관 없기 때문에(아래 코드에서 추가할 것입니다)
# 여기에 포함되지 않습니다.
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# 이제 배출 점수를 더합니다. 그리고 방금 계산한 비터비 변수의
# 집합을 forward_var에 할당합니다.
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# STAP_TAG로의 전이
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# 최적의 경로를 구하기 위해 back pointer를 따라갑니다.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# 시작 태그를 빼냅니다 (시작 태그는 반환된 필요가 없습니다)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # 완결성 검사 (Sanity check)
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # 이 함수와 위의 _forward_alg를 헷갈리지 마세요.
# Bi-LSTM으로부터 배출 점수를 얻습니다.
lstm_feats = self._get_lstm_features(sentence)
# 주어진 특징(배출 점수)들로 최적의 경로를 찾아냅니다.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
```
훈련 실행
```python
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# 훈련용 데이터를 만듭니다.
training_data = [(
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
)]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# 훈련 전 예측 결과를 확인합니다.
with torch.no_grad():
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)
print(model(precheck_sent))
# Make sure prepare_sequence from earlier in the LSTM section is loaded
# 위의 보조 함수 영역에 있는 prepare_sequence 함수가 불러와 졌는지 확인합니다.
for epoch in range(
300): # 다시 말하지만, 아마 300 에폭을 실행하진 않을 것입니다. 이것은 연습용 데이터입니다.
for sentence, tags in training_data:
# 1단계. Pytorch가 변화도를 누적한다는 것을 기억하세요.
# 그것들을 제거합니다.
model.zero_grad()
# 2단계. 입력 데이터를 신경망에 사용될 수 있도록 단어
# 인덱스들의 텐서로 변환합니다.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)
# 3단계. 순방향 계산을 수행합니다.
loss = model.neg_log_likelihood(sentence_in, targets)
# 4단계. 손실값, 변화도를 계산하고 optimizer.step()을 호출하여
# 매개변수들을 갱신합니다.
loss.backward()
optimizer.step()
# 훈련이 끝난 후 예측 결과를 확인합니다.
with torch.no_grad():
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print(model(precheck_sent))
# 다 했습니다!
```
연습 : 판별적(discriminative) 태깅을 위한 새로운 손실 함수
------------------------------------------------------------
사실 복호화 할 때는 비터비 경로 점수로 역전파를 하지 않기 때문에 계산
그래프를 만들 필요가 없었습니다. 그러나 이미 만들었으니, 비터비 경로
점수와 실제 정답 경로 점수의 차이를 손실 함수로 사용해서 태거를
학습시켜 보세요. 손실 함수의 값은 음수가 아니어야 하며, 예측된 태그
순열이 정답이라면 손실 함수의 값은 0이어야 합니다. 이것은 본질적으로
*구조화된 퍼셉트론* 입니다.
이미 비터비와 score_sentence 함수가 구현되어 있기 때문에 간단히 수정할
수 있습니다. 이 모델은 *학습 데이터에 따라 변하는* 계산 그래프의 한
예시입니다. 이 모델을 정적 툴킷에서 구현해 보지는 않았는데, 구현이
가능하지만 덜 직관적일 수 있습니다.
실제 데이터를 사용해보고 비교해보세요!
|
#' Read MODFLOW .lpf File
#'
#' This function reads in a lpf file and creates a list
#' composed of the following vectors:
#' \describe{
#' \item{ILPFCB}{is a flag and a unit number. If ILPFCB > 0, cell-by-cell flow terms will be written to this unit
#' number when "SAVE BUDGET" or a non-zero value for ICBCFL is specified in Output Control. The terms that are saved
#' are storage, constant-head flow, and flow between adjacent cells.
#' If ILPFCB = 0, cell-by-cell flow terms will not be written.
#' If ILPFCB < 0, cell-by-cell flow for constant-head cells will be written in the listing file when "SAVE BUDGET"
#' or a non-zero value for ICBCFL is specified in Output Control. Cell-by-cell flow to storage and between
#' adjacent cells will not be written to any file.}
#' \item{HDRY}{is the head that is assigned to cells that are converted to dry during a simulation. Although this value plays
#' no role in the model calculations, HDRY values are useful as indicators when looking at the resulting heads that are
#' output from the model. HDRY is thus similar to HNOFLO in the Basic Package, which is the value assigned to cells
#' that are no-flow cells at the start of a model simulation.}
#' \item{NPLPF}{is the number of LPF parameters}
#' \item{LAYTYP}{contains a flag for each layer that specifies the layer type.
#' 0 – confined
#' >0 – convertible
#' <0 – convertible unless the THICKSTRT option is in effect. When THICKSTRT is in effect, a negative value of
#' LAYTYP indicates that the layer is confined, and its saturated thickness will be computed as STRT-BOT.}
#' \item{LAYAVG}{contains a flag for each layer that defines the method of calculating interblock transmissivity.
#' 0—harmonic mean
#' 1—logarithmic mean
#' 2—arithmetic mean of saturated thickness and logarithmic-mean hydraulic conductivity.}
#' \item{CHANI}{contains a value for each layer that is a flag or the horizontal anisotropy. If CHANI is less than or equal to
#' 0, then variable HANI defines horizontal anisotropy. If CHANI is greater than 0, then CHANI is the horizontal
#' anisotropy for the entire layer, and HANI is not read. If any HANI parameters are used, CHANI for all layers must
#' be less than or equal to 0.}
#' \item{LAYVKA}{contains a flag for each layer that indicates whether variable VKA is vertical hydraulic conductivity or
#' the ratio of horizontal to vertical hydraulic conductivity.
#' 0—indicates VKA is vertical hydraulic conductivity
#' not 0—indicates VKA is the ratio of horizontal to vertical hydraulic conductivity, where the horizontal hydraulic
#' conductivity is specified as HK in item 10.}
#' \item{LAYWET}{contains a flag for each layer that indicates whether wetting is active.
#' 0—indicates wetting is inactive
#' not 0—indicates wetting is active}
#' \item{WETFCT}{is a factor that is included in the calculation of the head that is initially established at a cell when the cell
#' is converted from dry to wet. (See IHDWET.)
#' IWETIT—is the iteration interval for attempting to wet cells. Wetting is attempted every IWETIT iteration. If using
#' the PCG solver (Hill, 1990), this applies to outer iterations, not inner iterations. If IWETIT ≤ 0, the value is changed
#' to 1.}
#' \item{IHDWET}{is a flag that determines which equation is used to define the initial head at cells that become wet:
#' If IHDWET = 0, equation 5-32A is used: h = BOT + WETFCT (hn - BOT) .
#' If IHDWET is not 0, equation 5-32B is used: h = BOT + WETFCT(THRESH)}
#' \item{PARNAM}{is the name of a parameter to be defined. This name can consist of 1 to 10 characters and is not case
#' sensitive. That is, any combination of the same characters with different case will be equivalent.}
#' \item{PARTYP}{is the type of parameter to be defined. For the LPF Package, the allowed parameter types are:
#' HK—defines variable HK, horizontal hydraulic conductivity
#' HANI—defines variable HANI, horizontal anisotropy
#' VK—defines variable VKA for layers for which VKA represents vertical hydraulic conductivity (LAYVKA=0)
#' VANI—defines variable VKA for layers for which VKA represents vertical anisotropy (LAYVKA!=0)
#' SS—defines variable Ss, the specific storage
#' SY—defines variable Sy, the specific yield
#' VKCB—defines variable VKCB, the vertical hydraulic conductivity of a Quasi-3D confining layer.}
#' \item{Parval}{is the parameter value. This parameter value may be overridden by a value in the Parameter Value File.}
#' \item{NCLU}{is the number of clusters required to define the parameter. Each repetition of Item 9 is a cluster (variables
#' Layer, Mltarr, Zonarr, and IZ). Each layer that is associated with a parameter usually has only one cluster. For
#' example, parameters which apply to cells in a single layer generally will be defined by just one cluster. However,
#' having more than one cluster for the same layer is acceptable.}
#' \item{Layer}{is the layer number to which a cluster definition applies.}
#' \item{Mltarr}{is the name of the multiplier array to be used to define variable values that are associated with a parameter.
#' The name “NONE” means that there is no multiplier array, and the variable values will be set equal to Parval.}
#' \item{Zonarr}{is the name of the zone array to be used to define the cells that are associated with a parameter. The name
#' “ALL” means that there is no zone array, and all cells in the specified layer are part of the parameter.}
#' \item{IZ}{is up to 10 zone numbers (separated by spaces) that define the cells that are associated with a parameter. These
#' values are not used if ZONARR is specified as “ALL”. Values can be positive or negative, but 0 is not allowed. The
#' end of the line, a zero value, or a non-numeric entry terminates the list of values.}
#' \item{PROPS}{Data frame of LAY, ROW, COL, HK, HANI, VKA, Ss, Sy, VKCB, WETDRY}
#' }
#' @param rootname This is the root name of the lpf file
#' @export
#' @examples
#' readlpf("F95")
#' $ILPFCB
#' [1] 50
#'
#' $HDRY
#' [1] -1e+30
#'
#' $NPLPF
#' [1] 0
#'
#' $LAYTYP
#' [1] 1 3 3 3 0 0 0 0
#'
#' $LAYAVG
#' [1] 0 0 0 0 0 0 0 0
#'
#' $CHANI
#' [1] -1 -1 -1 -1 -1 -1 -1 -1
#'
#' etc.
#'
#' Use this to develop summary statistics of the hydraulic properties
#' p <- readlpf("F95")
#' p$PROPS %>% group_by(LAY) %>% summarise(MIN_K = min(HK), MEDIAN_K = median(HK), MAX_K = max(HK))
#'
# A tibble: 8 x 4
# LAY MIN_K MEDIAN_K MAX_K
# <int> <dbl> <dbl> <dbl>
# 1 1 0.1000004 3.229593 17.91849
# 2 2 0.0080000 0.008000 0.00800
# 3 3 0.0080000 0.008000 0.00800
# 4 4 0.0080000 0.008000 0.00800
# 5 5 0.0080000 0.008000 0.00800
# 6 6 0.0080000 0.008000 0.00800
# 7 7 0.1000000 6.449597 17.46494
# 8 8 20.0000000 20.000000 20.00000
readlpf <- function(rootname = NA){
if(is.na(rootname)){
rootname <- MFtools::getroot()
}
infl <- paste0(rootname, ".lpf")
d <- MFtools::readdis(rootname)
linin <- readr::read_lines(infl)
indx <- max(grep("#", linin)) + 1
ILPFCB <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[1]] %>%
as.integer()
HDRY <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[2]] %>%
as.numeric()
NPLPF <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[3]] %>%
as.integer()
indx <- indx + 1
BLOCKEND <- ceiling(d$NLAY / 50)
BLOCKEND_NUM <- ceiling(d$NLAY / 20)
LAYTYP <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.integer()
indx <- indx + BLOCKEND
LAYAVG <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.integer()
indx <- indx + BLOCKEND
CHANI <- linin[indx + seq(1:BLOCKEND_NUM) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND_NUM
LAYVKA <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.integer()
indx <- indx + BLOCKEND
LAYWET <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.integer()
indx <- indx + BLOCKEND
WETFCT <- c(NA)
IWETIT <- c(NA)
IHDWET <- c(NA)
if(sum(LAYWET) > 0){
WETFCT <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[1]] %>%
as.numeric()
IWETIT <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[2]] %>%
as.integer()
IHDWET <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[3]] %>%
as.integer()
indx <- indx + 1
}
PARNAM <- vector(mode = "character", length = NPLPF)
PARTYP <- vector(mode = "character", length = NPLPF)
Parval <- vector(mode = "numeric", length = NPLPF)
NCLU <- vector(mode = "integer", length = NPLPF)
Layer <- c(NULL)
Mltarr <- c(NULL)
Zonarr <- c(NULL)
IZ <- c(NULL)
if(NPLPF > 0){
for(Q in 1:NPLPF){
PARNAM[Q] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[1]]
PARTYP[Q] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[2]]
Parval[Q] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[3]] %>%
as.numeric()
NCLU[Q] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[4]] %>%
as.numeric()
indx <- indx + 1
for(ii in 1:NCLU[Q]){
indx <- indx + 1
Layer[ii] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[1]] %>%
as.integer()
Mltarr[ii] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[2]]
Zonarr[ii] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[3]]
IZ[ii] <- linin[indx] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
.[[4:nchar(linin[indx])]] %>%
as.integer()
}
}
}
HKin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
if(min(CHANI) < 0){
HANIin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
}else{
HANIin <- rep(NA, d$NCOL * d$NROW * d$NLAY)
}
VKAin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
if("TR" %in% d$SS){
Ssin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
}else{
Ssin <- rep(NA, d$NCOL * d$NROW * d$NLAY)
}
if(("TR" %in% d$SS)&(min(abs(LAYTYP)) == 0)){
Syin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
}else{
Syin <- rep(NA, d$NCOL * d$NROW * d$NLAY)
}
if(any(d$LAYCBD) != 0){
VKCBDin <- vector(mode = "numeric", length = d$NCOL * d$NROW * d$NLAY)
}else{
VKCBDin <- rep(NA, d$NCOL * d$NROW * d$NLAY)
}
if((sum(LAYWET) == d$NLAY) & (sum(LAYTYP) != 0)){
WETDRYin <- vector(mode = "numeric", length = d$NCOL * d$NROW * sum(LAYWET))
}else{
WETDRYin <- rep(NA, d$NCOL * d$NROW * d$NLAY) %>% as.numeric()
}
for(K in 1:d$NLAY){
# READ HORIZONTAL HYDRAULIC CONDUCTIVITY
#########################################################
# print(paste("READING HK: LAYER", K))
FROM <- (K - 1) * d$NROW * d$NCOL + 1
TO <- K * d$NROW * d$NCOL
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
# print(paste("FRMTREP[",K,"] = ", FRMTREP, sep = ""))
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
HKin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
HKin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
# READ HORIZONTAL ANISOTROPY IF CHANI < 0
########################################################
# print(paste("READING HANI: LAYER", K))
if(CHANI[K] <= 0){
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- (ceiling(d$NCOL / FRMTREP)) * d$NROW
indx <- indx + 1
if(UNI == 0){
HANIin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
HANIin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
}
# READ VKA
##########################################################
# print(paste("READING VKA: LAYER", K))
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
VKAin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
VKAin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
# READ Ss IF NUMBER OF TRANSIENT > 0
##########################################################
if("TR" %in% d$SS){
# print(paste("READING Ss: LAYER", K))
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
Ssin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
Ssin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
}
# READ Sy IF NUMBER OF TRANSIENT > 0 & LAYTYP == UNCONFINED (0)
##########################################################
if(("TR" %in% d$SS)&(LAYTYP[K] != 0)){
# print(paste("READING Sy: LAYER", K))
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
Syin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
Syin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
}
# READ VKCBD
##########################################################
if(d$LAYCBD[K] != 0){
# print(paste("READING VKBD: LAYER", K))
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
rR }else{
VKCBDin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
}
# READ WETDRY
##########################################################
if((LAYWET[K] != 0) & (LAYTYP[K] != 0)){
# print(paste("READING WETDRY: LAYER", K))
UNI <- substr(linin[indx], start = 1, stop = 10) %>% as.integer()
MULT <- substr(linin[indx], start = 11, stop = 20) %>% as.numeric()
FRMT <- substr(linin[indx], start = 21, stop = 30)
FRMTREP <- as.integer(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[1]]
FRMTWIDTH <- as.numeric(regmatches(FRMT, gregexpr("[[:digit:]]+", FRMT))[[1]])[[2]]
BLOCKEND <- ceiling(d$NCOL / FRMTREP) * d$NROW
indx <- indx + 1
if(UNI == 0){
WETDRYin[FROM:TO] <- rep(MULT, d$NROW * d$NCOL)
}else{
WETDRYin[FROM:TO] <- linin[indx + seq(1:BLOCKEND) - 1] %>%
strsplit("\\s+") %>%
unlist() %>%
subset(. != "") %>%
as.numeric()
indx <- indx + BLOCKEND
}
}
}
PROPS <- tibble::data_frame(
LAY = rep(1:d$NLAY, each = d$NCOL * d$NROW) %>% as.integer(),
ROW = rep(rep(1:d$NROW, each = d$NCOL), d$NLAY) %>% as.integer(),
COL = rep(rep(seq(1, d$NCOL, 1), d$NROW), d$NLAY) %>% as.integer(),
HK = HKin %>% as.numeric(),
HANI = HANIin %>% as.numeric(),
VKA = VKAin %>% as.numeric(),
Ss = Ssin %>% as.numeric(),
Sy = Syin %>% as.numeric(),
VKCBD = VKCBDin %>% as.numeric(),
WETDRY = WETDRYin %>% as.numeric()
)
rm(HKin)
rm(HANIin)
rm(VKAin)
rm(Ssin)
rm(Syin)
rm(VKCBDin)
rm(WETDRYin)
rm(d)
gc()
PROPLIST <- list(ILPFCB = ILPFCB,
HDRY = HDRY,
NPLPF = NPLPF,
LAYTYP = LAYTYP,
LAYAVG = LAYAVG,
CHANI = CHANI,
LAYVKA = LAYVKA,
LAYWET = LAYWET,
WETFCT = WETFCT,
IWETIT = IWETIT,
IHDWET = IHDWET,
PARNAM = PARNAM,
PARTYP = PARTYP,
Parval = Parval,
NCLU = NCLU,
Layer = Layer,
Mltarr = Mltarr,
Zonarr = Zonarr,
IZ = IZ,
PROPS = PROPS)
return(PROPLIST)
}
|
Just delivered a full sized arcade style Air Hockey Table for a weekend party to a private address in Central London.
Air Hockey is great fun, We hire for one night, one day, long term. For parties, weddings, corporate events and pubs and clubs etc. |
-----------------------------------------------------------------------------
-- |
-- Module : Finance.Hqfl.Pricer.Black
-- Copyright : (C) 2016 Mika'il Khan
-- License : (see the file LICENSE)
-- Maintainer : Mika'il Khan <[email protected]>
-- Stability : stable
-- Portability : portable
--
----------------------------------------------------------------------------
{-# LANGUAGE FlexibleInstances #-}
module Finance.Hqfl.Pricer.Black where
import Finance.Hqfl.Instrument
import Statistics.Distribution.Normal
import Data.Random
class Black a where
price :: a -> Double -> Double -> Double
instance Black (Option Future) where
price (Option (Future f) m European k t) r v =
case m of
Call -> exp (-r * t) * (f * cdf normal d1 - k * cdf normal d2)
Put -> exp (-r * t) * (k * cdf normal (-d2) - f * cdf normal (-d1))
where d1 = (log (f / k) + ((v * v) / 2) * t) / (v * sqrt t)
d2 = d1 - v * sqrt t
normal = Normal (0 :: Double) 1
|
(* Title: HOL/ex/CTL.thy
Author: Gertrud Bauer
*)
section \<open>CTL formulae\<close>
theory CTL
imports MainRLT
begin
text \<open>
We formalize basic concepts of Computational Tree Logic (CTL) @{cite
"McMillan-PhDThesis" and "McMillan-LectureNotes"} within the simply-typed
set theory of HOL.
By using the common technique of ``shallow embedding'', a CTL formula is
identified with the corresponding set of states where it holds.
Consequently, CTL operations such as negation, conjunction, disjunction
simply become complement, intersection, union of sets. We only require a
separate operation for implication, as point-wise inclusion is usually not
encountered in plain set-theory.
\<close>
lemmas [intro!] = Int_greatest Un_upper2 Un_upper1 Int_lower1 Int_lower2
type_synonym 'a ctl = "'a set"
definition imp :: "'a ctl \<Rightarrow> 'a ctl \<Rightarrow> 'a ctl" (infixr "\<rightarrow>" 75)
where "p \<rightarrow> q = - p \<union> q"
lemma [intro!]: "p \<inter> p \<rightarrow> q \<subseteq> q" unfolding imp_def by auto
lemma [intro!]: "p \<subseteq> (q \<rightarrow> p)" unfolding imp_def by rule
text \<open>
\<^smallskip>
The CTL path operators are more interesting; they are based on an arbitrary,
but fixed model \<open>\<M>\<close>, which is simply a transition relation over states
\<^typ>\<open>'a\<close>.
\<close>
axiomatization \<M> :: "('a \<times> 'a) set"
text \<open>
The operators \<open>\<^bold>E\<^bold>X\<close>, \<open>\<^bold>E\<^bold>F\<close>, \<open>\<^bold>E\<^bold>G\<close> are taken as primitives, while \<open>\<^bold>A\<^bold>X\<close>,
\<open>\<^bold>A\<^bold>F\<close>, \<open>\<^bold>A\<^bold>G\<close> are defined as derived ones. The formula \<open>\<^bold>E\<^bold>X p\<close> holds in a
state \<open>s\<close>, iff there is a successor state \<open>s'\<close> (with respect to the model
\<open>\<M>\<close>), such that \<open>p\<close> holds in \<open>s'\<close>. The formula \<open>\<^bold>E\<^bold>F p\<close> holds in a state
\<open>s\<close>, iff there is a path in \<open>\<M>\<close>, starting from \<open>s\<close>, such that there exists a
state \<open>s'\<close> on the path, such that \<open>p\<close> holds in \<open>s'\<close>. The formula \<open>\<^bold>E\<^bold>G p\<close>
holds in a state \<open>s\<close>, iff there is a path, starting from \<open>s\<close>, such that for
all states \<open>s'\<close> on the path, \<open>p\<close> holds in \<open>s'\<close>. It is easy to see that \<open>\<^bold>E\<^bold>F
p\<close> and \<open>\<^bold>E\<^bold>G p\<close> may be expressed using least and greatest fixed points
@{cite "McMillan-PhDThesis"}.
\<close>
definition EX ("\<^bold>E\<^bold>X _" [80] 90)
where [simp]: "\<^bold>E\<^bold>X p = {s. \<exists>s'. (s, s') \<in> \<M> \<and> s' \<in> p}"
definition EF ("\<^bold>E\<^bold>F _" [80] 90)
where [simp]: "\<^bold>E\<^bold>F p = lfp (\<lambda>s. p \<union> \<^bold>E\<^bold>X s)"
definition EG ("\<^bold>E\<^bold>G _" [80] 90)
where [simp]: "\<^bold>E\<^bold>G p = gfp (\<lambda>s. p \<inter> \<^bold>E\<^bold>X s)"
text \<open>
\<open>\<^bold>A\<^bold>X\<close>, \<open>\<^bold>A\<^bold>F\<close> and \<open>\<^bold>A\<^bold>G\<close> are now defined dually in terms of \<open>\<^bold>E\<^bold>X\<close>,
\<open>\<^bold>E\<^bold>F\<close> and \<open>\<^bold>E\<^bold>G\<close>.
\<close>
definition AX ("\<^bold>A\<^bold>X _" [80] 90)
where [simp]: "\<^bold>A\<^bold>X p = - \<^bold>E\<^bold>X - p"
definition AF ("\<^bold>A\<^bold>F _" [80] 90)
where [simp]: "\<^bold>A\<^bold>F p = - \<^bold>E\<^bold>G - p"
definition AG ("\<^bold>A\<^bold>G _" [80] 90)
where [simp]: "\<^bold>A\<^bold>G p = - \<^bold>E\<^bold>F - p"
subsection \<open>Basic fixed point properties\<close>
text \<open>
First of all, we use the de-Morgan property of fixed points.
\<close>
lemma lfp_gfp: "lfp f = - gfp (\<lambda>s::'a set. - (f (- s)))"
proof
show "lfp f \<subseteq> - gfp (\<lambda>s. - f (- s))"
proof
show "x \<in> - gfp (\<lambda>s. - f (- s))" if l: "x \<in> lfp f" for x
proof
assume "x \<in> gfp (\<lambda>s. - f (- s))"
then obtain u where "x \<in> u" and "u \<subseteq> - f (- u)"
by (auto simp add: gfp_def)
then have "f (- u) \<subseteq> - u" by auto
then have "lfp f \<subseteq> - u" by (rule lfp_lowerbound)
from l and this have "x \<notin> u" by auto
with \<open>x \<in> u\<close> show False by contradiction
qed
qed
show "- gfp (\<lambda>s. - f (- s)) \<subseteq> lfp f"
proof (rule lfp_greatest)
fix u
assume "f u \<subseteq> u"
then have "- u \<subseteq> - f u" by auto
then have "- u \<subseteq> - f (- (- u))" by simp
then have "- u \<subseteq> gfp (\<lambda>s. - f (- s))" by (rule gfp_upperbound)
then show "- gfp (\<lambda>s. - f (- s)) \<subseteq> u" by auto
qed
qed
lemma lfp_gfp': "- lfp f = gfp (\<lambda>s::'a set. - (f (- s)))"
by (simp add: lfp_gfp)
lemma gfp_lfp': "- gfp f = lfp (\<lambda>s::'a set. - (f (- s)))"
by (simp add: lfp_gfp)
text \<open>
In order to give dual fixed point representations of \<^term>\<open>\<^bold>A\<^bold>F p\<close> and
\<^term>\<open>\<^bold>A\<^bold>G p\<close>:
\<close>
lemma AF_lfp: "\<^bold>A\<^bold>F p = lfp (\<lambda>s. p \<union> \<^bold>A\<^bold>X s)"
by (simp add: lfp_gfp)
lemma AG_gfp: "\<^bold>A\<^bold>G p = gfp (\<lambda>s. p \<inter> \<^bold>A\<^bold>X s)"
by (simp add: lfp_gfp)
lemma EF_fp: "\<^bold>E\<^bold>F p = p \<union> \<^bold>E\<^bold>X \<^bold>E\<^bold>F p"
proof -
have "mono (\<lambda>s. p \<union> \<^bold>E\<^bold>X s)" by rule auto
then show ?thesis by (simp only: EF_def) (rule lfp_unfold)
qed
lemma AF_fp: "\<^bold>A\<^bold>F p = p \<union> \<^bold>A\<^bold>X \<^bold>A\<^bold>F p"
proof -
have "mono (\<lambda>s. p \<union> \<^bold>A\<^bold>X s)" by rule auto
then show ?thesis by (simp only: AF_lfp) (rule lfp_unfold)
qed
lemma EG_fp: "\<^bold>E\<^bold>G p = p \<inter> \<^bold>E\<^bold>X \<^bold>E\<^bold>G p"
proof -
have "mono (\<lambda>s. p \<inter> \<^bold>E\<^bold>X s)" by rule auto
then show ?thesis by (simp only: EG_def) (rule gfp_unfold)
qed
text \<open>
From the greatest fixed point definition of \<^term>\<open>\<^bold>A\<^bold>G p\<close>, we derive as
a consequence of the Knaster-Tarski theorem on the one hand that \<^term>\<open>\<^bold>A\<^bold>G p\<close> is a fixed point of the monotonic function
\<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close>.
\<close>
lemma AG_fp: "\<^bold>A\<^bold>G p = p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p"
proof -
have "mono (\<lambda>s. p \<inter> \<^bold>A\<^bold>X s)" by rule auto
then show ?thesis by (simp only: AG_gfp) (rule gfp_unfold)
qed
text \<open>
This fact may be split up into two inequalities (merely using transitivity
of \<open>\<subseteq>\<close>, which is an instance of the overloaded \<open>\<le>\<close> in Isabelle/HOL).
\<close>
lemma AG_fp_1: "\<^bold>A\<^bold>G p \<subseteq> p"
proof -
note AG_fp also have "p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p \<subseteq> p" by auto
finally show ?thesis .
qed
lemma AG_fp_2: "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p"
proof -
note AG_fp also have "p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by auto
finally show ?thesis .
qed
text \<open>
On the other hand, we have from the Knaster-Tarski fixed point theorem that
any other post-fixed point of \<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close> is smaller than
\<^term>\<open>\<^bold>A\<^bold>G p\<close>. A post-fixed point is a set of states \<open>q\<close> such that \<^term>\<open>q \<subseteq> p \<inter> \<^bold>A\<^bold>X q\<close>. This leads to the following co-induction principle for
\<^term>\<open>\<^bold>A\<^bold>G p\<close>.
\<close>
lemma AG_I: "q \<subseteq> p \<inter> \<^bold>A\<^bold>X q \<Longrightarrow> q \<subseteq> \<^bold>A\<^bold>G p"
by (simp only: AG_gfp) (rule gfp_upperbound)
subsection \<open>The tree induction principle \label{sec:calc-ctl-tree-induct}\<close>
text \<open>
With the most basic facts available, we are now able to establish a few more
interesting results, leading to the \<^emph>\<open>tree induction\<close> principle for \<open>\<^bold>A\<^bold>G\<close>
(see below). We will use some elementary monotonicity and distributivity
rules.
\<close>
lemma AX_int: "\<^bold>A\<^bold>X (p \<inter> q) = \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X q" by auto
lemma AX_mono: "p \<subseteq> q \<Longrightarrow> \<^bold>A\<^bold>X p \<subseteq> \<^bold>A\<^bold>X q" by auto
lemma AG_mono: "p \<subseteq> q \<Longrightarrow> \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G q"
by (simp only: AG_gfp, rule gfp_mono) auto
text \<open>
The formula \<^term>\<open>AG p\<close> implies \<^term>\<open>AX p\<close> (we use substitution of
\<open>\<subseteq>\<close> with monotonicity).
\<close>
lemma AG_AX: "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p"
proof -
have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by (rule AG_fp_2)
also have "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1)
moreover note AX_mono
finally show ?thesis .
qed
text \<open>
Furthermore we show idempotency of the \<open>\<^bold>A\<^bold>G\<close> operator. The proof is a good
example of how accumulated facts may get used to feed a single rule step.
\<close>
lemma AG_AG: "\<^bold>A\<^bold>G \<^bold>A\<^bold>G p = \<^bold>A\<^bold>G p"
proof
show "\<^bold>A\<^bold>G \<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p" by (rule AG_fp_1)
next
show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G \<^bold>A\<^bold>G p"
proof (rule AG_I)
have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p" ..
moreover have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" by (rule AG_fp_2)
ultimately show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p" ..
qed
qed
text \<open>
\<^smallskip>
We now give an alternative characterization of the \<open>\<^bold>A\<^bold>G\<close> operator, which
describes the \<open>\<^bold>A\<^bold>G\<close> operator in an ``operational'' way by tree induction:
In a state holds \<^term>\<open>AG p\<close> iff in that state holds \<open>p\<close>, and in all
reachable states \<open>s\<close> follows from the fact that \<open>p\<close> holds in \<open>s\<close>, that \<open>p\<close>
also holds in all successor states of \<open>s\<close>. We use the co-induction principle
@{thm [source] AG_I} to establish this in a purely algebraic manner.
\<close>
theorem AG_induct: "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) = \<^bold>A\<^bold>G p"
proof
show "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> \<^bold>A\<^bold>G p" (is "?lhs \<subseteq> _")
proof (rule AG_I)
show "?lhs \<subseteq> p \<inter> \<^bold>A\<^bold>X ?lhs"
proof
show "?lhs \<subseteq> p" ..
show "?lhs \<subseteq> \<^bold>A\<^bold>X ?lhs"
proof -
{
have "\<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> p \<rightarrow> \<^bold>A\<^bold>X p" by (rule AG_fp_1)
also have "p \<inter> p \<rightarrow> \<^bold>A\<^bold>X p \<subseteq> \<^bold>A\<^bold>X p" ..
finally have "?lhs \<subseteq> \<^bold>A\<^bold>X p" by auto
}
moreover
{
have "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) \<subseteq> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" ..
also have "\<dots> \<subseteq> \<^bold>A\<^bold>X \<dots>" by (rule AG_fp_2)
finally have "?lhs \<subseteq> \<^bold>A\<^bold>X \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" .
}
ultimately have "?lhs \<subseteq> \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)" ..
also have "\<dots> = \<^bold>A\<^bold>X ?lhs" by (simp only: AX_int)
finally show ?thesis .
qed
qed
qed
next
show "\<^bold>A\<^bold>G p \<subseteq> p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)"
proof
show "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1)
show "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p)"
proof -
have "\<^bold>A\<^bold>G p = \<^bold>A\<^bold>G \<^bold>A\<^bold>G p" by (simp only: AG_AG)
also have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p" by (rule AG_AX) moreover note AG_mono
also have "\<^bold>A\<^bold>X p \<subseteq> (p \<rightarrow> \<^bold>A\<^bold>X p)" .. moreover note AG_mono
finally show ?thesis .
qed
qed
qed
subsection \<open>An application of tree induction \label{sec:calc-ctl-commute}\<close>
text \<open>
Further interesting properties of CTL expressions may be demonstrated with
the help of tree induction; here we show that \<open>\<^bold>A\<^bold>X\<close> and \<open>\<^bold>A\<^bold>G\<close> commute.
\<close>
theorem AG_AX_commute: "\<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>X \<^bold>A\<^bold>G p"
proof -
have "\<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>X p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G \<^bold>A\<^bold>X p" by (rule AG_fp)
also have "\<dots> = \<^bold>A\<^bold>X (p \<inter> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p)" by (simp only: AX_int)
also have "p \<inter> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p = \<^bold>A\<^bold>G p" (is "?lhs = _")
proof
have "\<^bold>A\<^bold>X p \<subseteq> p \<rightarrow> \<^bold>A\<^bold>X p" ..
also have "p \<inter> \<^bold>A\<^bold>G (p \<rightarrow> \<^bold>A\<^bold>X p) = \<^bold>A\<^bold>G p" by (rule AG_induct)
also note Int_mono AG_mono
ultimately show "?lhs \<subseteq> \<^bold>A\<^bold>G p" by fast
next
have "\<^bold>A\<^bold>G p \<subseteq> p" by (rule AG_fp_1)
moreover
{
have "\<^bold>A\<^bold>G p = \<^bold>A\<^bold>G \<^bold>A\<^bold>G p" by (simp only: AG_AG)
also have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>X p" by (rule AG_AX)
also note AG_mono
ultimately have "\<^bold>A\<^bold>G p \<subseteq> \<^bold>A\<^bold>G \<^bold>A\<^bold>X p" .
}
ultimately show "\<^bold>A\<^bold>G p \<subseteq> ?lhs" ..
qed
finally show ?thesis .
qed
end
|
module Extra.C.Types
-- TODO: c int,uint is not guaranteed to be 32 bit
export
data Float : Type where [external]
public export
CUInt8 : Type
CUInt8 = Bits8
public export
CUInt16 : Type
CUInt16 = Bits16
public export
CUInt32 : Type
CUInt32 = Bits32
public export
CUInt64 : Type
CUInt64 = Bits64
||| `size_t in C`
public export
SizeT : Type
SizeT = Bits32
||| `ssize_t` in C
public export
SSizeT : Type
SSizeT = Int
public export
CInt32 : Type
CInt32 = Int
||| `int` in C
public export
CInt : Type
CInt = Int
||| `unsigned int` in C
public export
CUInt : Type
CUInt = CUInt32
public export
CChar : Type
CChar = Bits8
public export
CUChar : Type
CUChar = Bits8
public export
CString : Type
CString = Ptr CChar
public export
CDouble : Type
CDouble = Double
--- TODO: this is unholy
||| `SizeT` but uses `Int` for `Data.Int.Order` reasons
public export
SizeInt : Type
SizeInt = Int
|
(* https://coq-math-problems.github.io/Problem1/ *)
Require Import PeanoNat.
Require Import Hack.CMP.Decr.
Require Import Hack.CMP.Bounded.
Require Hack.CMP.Arith.
(* The solution was first prototyped in OCaml in p1.ml, and
the Coq proof work the same way, by noticing that either:
- the valley continues,
- the function from there on has a lower bound, and if not,
- we've eventually reached a bound of 0 *)
Definition eventually_bounded_by_at (f: nat -> nat) (n: nat) (x: nat)
:= forall y, x <= y -> f y <= n.
Definition eventually_bounded_by (f: nat -> nat) (n: nat)
:= exists x, eventually_bounded_by_at f n x.
Definition eventually_bounded (f: nat -> nat)
:= exists n, eventually_bounded_by f n.
Theorem bounded_is_eventually_bounded: forall f, bounded f -> eventually_bounded f.
Proof.
intros.
destruct H as [b pb].
refine (ex_intro _ b _).
refine (ex_intro _ 0 _).
unfold eventually_bounded_by_at.
intros.
exact (pb y).
Qed.
Definition valley (f: nat -> nat)(n x : nat) :=
forall y, (x <= y) -> (y <= x+n) -> f y = f x.
Lemma eventually_bounded_by_0:
forall f,
eventually_bounded_by f 0 ->
forall n, exists x, valley f n x.
Proof.
intros.
destruct H as [x px].
refine (ex_intro _ x _).
unfold valley.
intros.
transitivity 0.
- exact (proj1 (Nat.le_0_r _) (px y H)).
- symmetry.
exact (proj1 (Nat.le_0_r _) (px x (le_n x))).
Qed.
Lemma valley_continues_or_bound_decreases:
forall f n x m,
decr f ->
f x = m -> valley f n x ->
valley f (S n) x \/ (eventually_bounded_by f (pred m)).
Proof.
intros.
case (Nat.eq_dec (f (x + S n)) m).
+ intro.
left.
unfold valley.
intros y G0 G1.
assert (x + S n = S (x + n)). { apply eq_sym; trivial. }
rewrite H2 in G1.
case (Arith.leq_and_not _ _ G1).
- intro.
rewrite H3.
rewrite <- H2.
transitivity m.
* assumption.
* symmetry.
assumption.
- intro.
exact (H1 _ G0 H3).
+ intro.
right.
refine (ex_intro _ (x + S n) _).
intros y H2.
pose (p := H1 (x + n) (Nat.le_add_r _ _) (le_n _)).
rewrite H0 in p.
pose (q := decr_estimate _ H _ _ (proj1 (Nat.add_le_mono_l _ _ x) (le_S n n (le_n _)))).
rewrite p in q.
exact (Nat.le_trans _ _ _ (decr_estimate _ H _ _ H2) (Arith.leq_and_not'' _ _ q n0)).
Qed.
Lemma decr_and_eventually_bounded_by:
forall f n m x,
decr f ->
eventually_bounded_by_at f (S m) x ->
valley f n x \/ (eventually_bounded_by f m).
Proof.
intro f.
induction n.
+ intros.
left.
unfold valley.
intros y H1 H2.
assert (x = y).
- rewrite <- plus_n_O in H2.
exact (Nat.le_antisymm _ _ H1 H2).
- rewrite H3.
reflexivity.
+ intros m x D pb.
case (IHn m x D pb).
- intro.
case (valley_continues_or_bound_decreases f n x (f x) D eq_refl H).
* apply or_introl.
* intros H1.
right.
destruct H1 as [x0 px0].
refine (ex_intro _ x0 _).
intros y H1.
exact (Nat.le_trans _ _ _ (px0 y H1) (Nat.pred_le_mono _ _ (pb x (le_n _)))).
- apply or_intror.
Qed.
Lemma decr_valleys_lemma:
forall m f,
eventually_bounded_by f m -> decr f ->
forall n, exists x, valley f n x.
Proof.
induction m.
+ intros f eB0 D.
exact (eventually_bounded_by_0 f eB0).
+ intros f eBSM D n.
destruct eBSM as [x pb].
case (decr_and_eventually_bounded_by f n m x D pb).
- intro.
refine (ex_intro _ x _).
assumption.
- intro eBM.
exact (IHm f eBM D n).
Qed.
Theorem decr_valleys: forall n f, decr f -> exists x, valley f n x.
Proof.
intros.
destruct (bounded_is_eventually_bounded f (decr_is_bounded f H)) as [b pb].
exact (decr_valleys_lemma b f pb H n).
Qed.
|
State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a✝ b✝ a'✝ b'✝ : PreEnvelGroup R
ha : PreEnvelGroupRel' R a✝ a'✝
hb : PreEnvelGroupRel' R b✝ b'✝
⊢ mapAux f (PreEnvelGroup.mul a✝ b✝) = mapAux f (PreEnvelGroup.mul a'✝ b'✝) State After: no goals Tactic: simp [toEnvelGroup.mapAux, well_def f ha, well_def f hb] State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a✝ a'✝ : PreEnvelGroup R
ha : PreEnvelGroupRel' R a✝ a'✝
⊢ mapAux f (PreEnvelGroup.inv a✝) = mapAux f (PreEnvelGroup.inv a'✝) State After: no goals Tactic: simp [toEnvelGroup.mapAux, well_def f ha] State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a b c : PreEnvelGroup R
⊢ mapAux f (PreEnvelGroup.mul (PreEnvelGroup.mul a b) c) = mapAux f (PreEnvelGroup.mul a (PreEnvelGroup.mul b c)) State After: no goals Tactic: apply mul_assoc State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a : PreEnvelGroup R
⊢ mapAux f (PreEnvelGroup.mul unit a) = mapAux f a State After: no goals Tactic: simp [toEnvelGroup.mapAux] State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a : PreEnvelGroup R
⊢ mapAux f (PreEnvelGroup.mul a unit) = mapAux f a State After: no goals Tactic: simp [toEnvelGroup.mapAux] State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
a : PreEnvelGroup R
⊢ mapAux f (PreEnvelGroup.mul (PreEnvelGroup.inv a) a) = mapAux f unit State After: no goals Tactic: simp [toEnvelGroup.mapAux] State Before: R : Type u_1
inst✝¹ : Rack R
G : Type u_2
inst✝ : Group G
f : R →◃ Quandle.Conj G
x y : R
⊢ mapAux f (PreEnvelGroup.mul (PreEnvelGroup.mul (incl x) (incl y)) (PreEnvelGroup.inv (incl x))) =
mapAux f (incl (x ◃ y)) State After: no goals Tactic: simp [toEnvelGroup.mapAux] |
"""
Primary driver for network pairwise.
"""
function network_pairwise(T, V, cfg)::Matrix{T}
# Get input
networkdata = get_network_data(T, V, cfg)
# Get compute flags
flags = get_network_flags(cfg)
# Compute graph data based on compute flags
graphdata = compute_graph_data(networkdata, cfg)
# Send to main kernel
single_ground_all_pairs(graphdata, flags, cfg)
end
function compute_graph_data(data::NetworkData{T,V}, cfg)::GraphData{T,V} where {T,V}
i,j,v = data.coords
idx = findfirst(x -> x < 1, i)
idx != nothing && throw("Indices no good")
idx = findfirst(x -> x < 1, j)
idx != nothing && throw("Indices no good")
m = max(i[end], j[end])
A = sparse(i,j,v,m,m)
A = A + A'
cc = connected_components(SimpleWeightedGraph(A))
t = @elapsed G = laplacian(A)
csinfo("Time taken to construct graph laplacian = $t", cfg["suppress_messages"] in TRUELIST)
# T = eltype(i)
exclude_pairs = Tuple{V,V}[]
nodemap = Matrix{V}(undef,0,0)
polymap = Matrix{V}(undef,0,0)
hbmeta = RasterMeta()
cellmap = Matrix{T}(undef,0,0)
cum = initialize_cum_vectors(v)
GraphData(G, cc, data.fp, data.fp,
exclude_pairs, nodemap, polymap, hbmeta, cellmap, cum)
end
function get_network_flags(cfg)
# Computation flags
is_raster = false
is_advanced = cfg["scenario"] in ADVANCED
is_alltoone = false
is_onetoall = false
grnd_file_is_res = cfg["ground_file_is_resistances"] in TRUELIST
policy = Symbol(cfg["remove_src_or_gnd"])
solver = cfg["solver"]
# Output flags
write_volt_maps = cfg["write_volt_maps"] in TRUELIST
write_cur_maps = cfg["write_cur_maps"] in TRUELIST
write_cum_cur_maps_only = cfg["write_cum_cur_map_only"] in TRUELIST
write_max_cur_maps = cfg["write_max_cur_maps"] in TRUELIST
set_null_currents_to_nodata = cfg["set_null_currents_to_nodata"] in TRUELIST
set_null_voltages_to_nodata = cfg["set_null_voltages_to_nodata"] in TRUELIST
compress_grids = cfg["compress_grids"] in TRUELIST
log_transform_maps = cfg["log_transform_maps"] in TRUELIST
o = OutputFlags(write_volt_maps, write_cur_maps,
write_cum_cur_maps_only, write_max_cur_maps,
set_null_currents_to_nodata, set_null_voltages_to_nodata,
compress_grids, log_transform_maps)
NetworkFlags(is_raster, is_advanced, is_alltoone, is_onetoall,
grnd_file_is_res, policy, solver, o)
end
struct NetworkFlags
is_raster::Bool
is_advanced::Bool
is_alltoone::Bool
is_onetoall::Bool
grnd_file_is_res::Bool
policy::Symbol
solver::String
outputflags::OutputFlags
end
|
function z = plus( x, y )
%PLUS Addition of two TT/MPS tensors.
% Z = PLUS(X,Y) adds two TT/MPS tensors. The rank of the resulting
% tensor is 2*R.
%
% See also MINUS, UMINUS.
% TTeMPS Toolbox.
% Michael Steinlechner, 2013-2016
% Questions and contact: [email protected]
% BSD 2-clause license, see LICENSE.txt
% add sanity check...
rx = x.rank;
ry = y.rank;
nx = x.size;
z = TTeMPS( cell(1, x.order) );
% first core:
p = size(x.U{1},4);
tmp = zeros( 1, nx(1), rx(2)+ry(2), p );
tmp( 1, :, 1:rx(2), : ) = x.U{1};
tmp( 1, :, rx(2)+1:end, : ) = y.U{1};
z.U{1} = tmp;
% central cores:
for i = 2:x.order-1
% possibility of block format:
p = size(x.U{i},4);
tmp = zeros( rx(i)+ry(i), nx(i), rx(i+1)+ry(i+1), p);
tmp( 1:rx(i), :, 1:rx(i+1), :) = x.U{i};
tmp( rx(i)+1:end, :, rx(i+1)+1:end, :) = y.U{i};
z.U{i} = tmp;
end
% last core:
p = size(x.U{end},4);
tmp = zeros( rx(end-1)+ry(end-1), nx(end), 1, p );
tmp( 1:rx(end-1), :, 1, : ) = x.U{end};
tmp( rx(end-1)+1:end, :, 1, : ) = y.U{end};
z.U{end} = tmp;
end
|
The $n$th coefficient of a polynomial $p$ is equal to the $n$th element of the list of coefficients of $p$. |
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝⁴ : LinearOrder α
inst✝³ : TopologicalSpace γ
a b c : α
h : a ≤ b
inst✝² : TopologicalSpace α
inst✝¹ : OrderTopology α
inst✝ : TopologicalSpace β
s : Set ↑(Icc a b)
hs : IsOpen (projIcc a b h ⁻¹' s)
⊢ Subtype.val ⁻¹' (projIcc a b h ⁻¹' s) = s
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝⁴ : LinearOrder α
inst✝³ : TopologicalSpace γ
a b c : α
h : a ≤ b
inst✝² : TopologicalSpace α
inst✝¹ : OrderTopology α
inst✝ : TopologicalSpace β
s : Set ↑(Icc a b)
hs : IsOpen (projIcc a b h ⁻¹' s)
x✝ : { x // x ∈ Icc a b }
⊢ x✝ ∈ Subtype.val ⁻¹' (projIcc a b h ⁻¹' s) ↔ x✝ ∈ s
[PROOFSTEP]
simp
|
The Lebesgue integral of any function over the empty set is zero. |
[STATEMENT]
lemma timpls_transformable_to_inv:
assumes "timpls_transformable_to TI (Fun f T) (Fun g S)"
shows "length T = length S"
and "\<And>i. i < length T \<Longrightarrow> timpls_transformable_to TI (T ! i) (S ! i)"
and "f \<noteq> g \<Longrightarrow> (is_Abs f \<and> is_Abs g \<and> (the_Abs f, the_Abs g) \<in> set TI)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length T = length S &&& (\<And>i. i < length T \<Longrightarrow> timpls_transformable_to TI (T ! i) (S ! i)) &&& (f \<noteq> g \<Longrightarrow> is_Abs f \<and> is_Abs g \<and> (the_Abs f, the_Abs g) \<in> set TI)
[PROOF STEP]
using assms list_all2_conv_all_nth[of "timpls_transformable_to TI" T S]
[PROOF STATE]
proof (prove)
using this:
timpls_transformable_to TI (Fun f T) (Fun g S)
list_all2 (timpls_transformable_to TI) T S = (length T = length S \<and> (\<forall>i<length T. timpls_transformable_to TI (T ! i) (S ! i)))
goal (1 subgoal):
1. length T = length S &&& (\<And>i. i < length T \<Longrightarrow> timpls_transformable_to TI (T ! i) (S ! i)) &&& (f \<noteq> g \<Longrightarrow> is_Abs f \<and> is_Abs g \<and> (the_Abs f, the_Abs g) \<in> set TI)
[PROOF STEP]
by auto |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _MSC_VER
#define BOOST_TEST_DYN_LINK
#endif
#include <boost/test/unit_test.hpp>
#include <ignite/ignition.h>
#include <ignite/test_utils.h>
using namespace ignite;
using namespace ignite::common::concurrent;
using namespace boost::unit_test;
/*
* Test setup fixture.
*/
struct ClusterTestSuiteFixture
{
Ignite node;
/*
* Constructor.
*/
ClusterTestSuiteFixture() :
#ifdef IGNITE_TESTS_32
node(ignite_test::StartNode("cache-test-32.xml", "ClusterTest"))
#else
node(ignite_test::StartNode("cache-test.xml", "ClusterTest"))
#endif
{
// No-op.
}
/*
* Destructor.
*/
~ClusterTestSuiteFixture()
{
Ignition::StopAll(true);
}
};
/*
* Test setup fixture.
*/
struct ClusterTestSuiteFixtureIsolated
{
Ignite node;
/*
* Constructor.
*/
ClusterTestSuiteFixtureIsolated() :
#ifdef IGNITE_TESTS_32
node(ignite_test::StartNode("isolated-32.xml", "ClusterTestIsolated"))
#else
node(ignite_test::StartNode("isolated.xml", "ClusterTestIsolated"))
#endif
{
// No-op.
}
/*
* Destructor.
*/
~ClusterTestSuiteFixtureIsolated()
{
Ignition::StopAll(true);
}
};
BOOST_FIXTURE_TEST_SUITE(ClusterTestSuite, ClusterTestSuiteFixture)
BOOST_AUTO_TEST_CASE(IgniteImplProjection)
{
impl::IgniteImpl* impl = impl::IgniteImpl::GetFromProxy(node);
BOOST_REQUIRE(impl != 0);
BOOST_REQUIRE(impl->GetProjection().IsValid());
}
BOOST_AUTO_TEST_CASE(IgniteImplForServers)
{
impl::IgniteImpl* impl = impl::IgniteImpl::GetFromProxy(node);
BOOST_REQUIRE(impl != 0);
SharedPointer<impl::cluster::ClusterGroupImpl> clusterGroup = impl->GetProjection();
BOOST_REQUIRE(clusterGroup.IsValid());
IgniteError err;
BOOST_REQUIRE(clusterGroup.Get()->ForServers().IsValid());
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_SUITE(ClusterTestSuiteIsolated, ClusterTestSuiteFixtureIsolated)
BOOST_AUTO_TEST_CASE(IgniteSetActive)
{
BOOST_REQUIRE(node.IsActive());
node.SetActive(false);
BOOST_REQUIRE(!node.IsActive());
node.SetActive(true);
BOOST_REQUIRE(node.IsActive());
}
BOOST_AUTO_TEST_SUITE_END() |
The Jewish History Museum, formerly known as the Jewish Heritage Center of the Southwest, is a museum housed in a historic synagogue building in Tucson, Arizona. The museum’s building, which housed the first synagogue in the Arizona Territory, is the oldest synagogue building in the state. The building operated as a synagogue until 2008 when it was designated a museum. The Holocaust History Center at the Jewish History Museum is an educational institute dedicated to an ongoing examination of the Holocaust through the experiences of individuals who survived the war and later lived in southern Arizona. Two of our residents are recognized in the exhibit, Mike Bokor a survivor and Toetie Oberman whose family helped families survive the atrocities of the holocaust. We had the privilege of a private docent tour last week. |
postulate
F : Set → Set
_ : {@0 A : Set} → F λ { → A }
|
On the other hand , opponents of the Bedell plan had a very different view of this legislation . Representatives such as Pat Roberts claimed that the referendum was redundant because the farmers already voted the politicians into office , and this bill was an example of the politicians not doing their jobs . The Reagan Administration opposed the bill because of their opposition to production controls , and the President threatened to veto the farm bill if Bedell 's plan was left in place . When the bill got to the floor , an amendment was proposed to strike this provision , and it was passed 251 @-@ 174 .
|
Require Export MicroBFTprops2.
Section MicroBFTass_diss_if_kn.
Local Open Scope eo.
Local Open Scope proc.
Context { dtc : DTimeContext }.
Context { microbft_context : MicroBFT_context }.
Context { m_initial_keys : MicroBFT_initial_keys }.
Context { u_initial_keys : USIG_initial_keys }.
Context { usig_hash : USIG_hash }.
Context { microbft_auth : MicroBFT_auth }.
Lemma ASSUMPTION_disseminate_if_knows_true :
forall (eo : EventOrdering) d, assume_eo eo (ASSUMPTION_disseminate_if_knows d).
Proof.
introv diss.
simpl in *.
exrepnd.
unfold disseminate_data in *.
unfold knows_after. simpl in *.
(* unfold MicroBFT_data_knows.
unfold MicroBFT_data_in_log. *)
unfold state_after. simpl.
(* exists c. *)
unfold M_byz_output_sys_on_event in *; simpl.
rewrite M_byz_output_ls_on_event_as_run in diss0; simpl.
unfold M_byz_output_ls_on_this_one_event in *.
allrw; simpl.
unfold MicroBFTheader.node2name in *. simpl in *; subst.
unfold MicroBFTsys in *. simpl in *.
SearchAbout M_byz_run_ls_before_event.
pose proof (ex_M_byz_run_ls_before_event_MicroBFTlocalSys e (loc e)) as run.
repndors.
{
exrepnd.
rewrite run0 in *. clear run0. simpl in *.
remember (trigger e) as trig. symmetry in Heqtrig.
destruct trig; simpl in *; ginv; tcsp;[].
unfold state_of_trusted in *. simpl in *.
unfold USIG_update in *. destruct i; simpl in *; ginv; subst; tcsp;[|].
{
destruct diss0; simpl in *; ginv; tcsp.
eexists; dands; eauto;[|].
{
eexists; dands; eauto.
erewrite M_state_sys_on_event_unfold.
erewrite map_option_Some.
|
///////////////////////////////////////////////////////////////////////////////
// random::gen_to_random.hpp //
// //
// Copyright 2009 Erwann Rogard. Distributed under the Boost //
// Software License, Version 1.0. (See accompanying file //
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //
///////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_RANDOM_GEN_TO_RANDOM_HPP_ER_2009
#define BOOST_RANDOM_GEN_TO_RANDOM_HPP_ER_2009
#include <ostream>
#include <boost/call_traits.hpp>
namespace boost{
namespace random{
// Adds the interface of RandomDistribution to a (non-random) generator
template<typename G,typename I = typename G::result_type>
struct gen_to_random{
public:
typedef I input_type;
typedef typename G::result_type result_type;
//Construction
gen_to_random();
gen_to_random(typename call_traits<G>::param_type );
gen_to_random(const gen_to_random&);
gen_to_random& operator=(const gen_to_random&);
template<typename U> result_type operator()(U& u);
template<typename U> result_type operator()(U& u)const;
// Access
typename call_traits<G>::const_reference generator()const;
private:
result_type impl();
typename call_traits<G>::value_type g_;
};
template<typename G,typename I>
std::ostream& operator<<(std::ostream& os, const gen_to_random<G,I>& g);
// Implementation //
template<typename G,typename I>
std::ostream& operator<<(std::ostream& os, const gen_to_random<G,I>& g)
{
return (os << "gen_to_random(" << g << ')' );
}
template<typename G,typename I>
gen_to_random<G,I>::gen_to_random(){}
template<typename G,typename I>
gen_to_random<G,I>::gen_to_random(typename call_traits<G>::param_type g)
:g_(g){}
template<typename G,typename I>
gen_to_random<G,I>::gen_to_random(const gen_to_random& that)
:g_(that.g_){}
template<typename G,typename I>
gen_to_random<G,I>&
gen_to_random<G,I>::operator=(const gen_to_random& that){
if(&that!=this){
g_ = that.g_;
}
return *this;
}
template<typename G,typename I>
template<typename U>
typename gen_to_random<G,I>::result_type
gen_to_random<G,I>::operator()(U& u){
return (this->impl)();
}
template<typename G,typename I>
template<typename U>
typename gen_to_random<G,I>::result_type
gen_to_random<G,I>::operator()(U& u)const{
return (this->impl)();
}
template<typename G,typename I>
typename call_traits<G>::const_reference
gen_to_random<G,I>::generator()const{
return (this->g_);
}
template<typename G,typename I>
typename gen_to_random<G,I>::result_type
gen_to_random<G,I>::impl(){
return (this->g_)();
}
}// random
}// boost
#endif
|
library(ggplot2)
library("dplyr")
library("tidyr")
library(patchwork)
library("igraph")
library(calibrate)
library("umap")
library(viridis)
#########################################################################
### REACTOME & viridis color
a = read.table('data/Hippocampus_community.txt', header=T, sep="\t")
ggplot(data.frame(a), aes(x=reorder(gsub("Homo.*","",Term), -P.value), y=-log10(P.value))) +
theme(panel.background = element_rect(fill="grey", color="black"), panel.grid.major = element_line(color="grey90"), panel.grid.minor = element_line(color="grey90")) +
theme(axis.text=element_text(size=22), axis.title=element_text(size=30), text=element_text(size=22)) + xlab(expression("Reactome")) +
geom_bar(stat="identity", fill="darkgreen") + ylab(expression("-log(P-value)")) +
geom_col(aes(fill = Adjusted_P.value)) +
scale_fill_viridis() +
coord_flip() + geom_hline(yintercept=-log10(0.05), color="red") +guides(fill = guide_legend(reverse = FALSE)) + labs(fill="Adjusted P-value")
#########################################################################
### Correlation plot
b = read.table('data/15_member_hippocampal_community.v1.txt', header=T)
rownames(b) = b[,1]
b[is.na(b)] <- 0
c = ( b[,-1] + t(b[,-1]) )
c[c >1.98] = Inf
c$y = rownames(c)
c <- c %>% gather(x, value, colnames(c)[1:15])
# centered vjust = 0.5
ggplot(c, aes(x, y, fill=value)) + theme(axis.text=element_text(size=20), axis.title=element_text(size=30), text=element_text(size=22)) +
xlab(expression(bold(""))) + ylab(expression(bold(""))) +
geom_tile() + # scale_fill_gradient(low = "white", high = "orange") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.5)) + # labs(tag = "B")
scale_fill_viridis()
#########################################################################
### Network
c = ( b[,-1] + t(b[,-1]) )
c[c >1.98] = 1
mat = data.matrix(c)
mat[mat < 0.8] <- 0
network = graph_from_adjacency_matrix( mat, mode="undirected", diag=F, weighted=T)
plot(network, vertex.size=30, vertex.label.font=1, vertex.color="yellow" ,layout=layout_with_kk,
vertex.label.color = "darkgreen", vertex.label.cex=1.3, vertex.label.pos=5, vertex.shape="circle", edge.width=6)
#########################################################################
### Stats on communities
a = read.table('data/CommunitySize.txt', header=T, sep="\t")
wilcox.test(a[c(7:19),]$std, a[-c(7:19),]$std)
par(mar=c(5,6,4,1)+.1)
par(mfrow=c(1,2))
hist(a$n_comm, main="", cex=2.0, cex.lab=2.0, cex.axis=2.0, xlab="Number of Communities", col="darkgreen", xlim=c(0, 260), breaks=20)
#plot(a$mean, a$std, main="", cex=2.0, cex.lab=2.0, cex.axis=2.0, xlab="Mean", ylab="Standard Deviation", col="red")
plot(a$n_comm, a$mean, main="", cex=2.0, cex.lab=2.0, cex.axis=2.0, xlab="Number of Communities", ylab="Mean Community Size", col="red", ylim=c(0, 9), xlim=c(25,300))
textxy(a$n_comm, a$mean, labs=ifelse((a$n_comm > 150) | (a$mean > 7.25), as.character(a$Tissue), ""), col="blue", cex=1.2, pos=3)
###########################################################################
### Summary data on communities
d = read.table('data/CommunitySize.txt', header=T, sep="\t")
p1 = ggplot(data.frame(d), aes(n_comm)) +
geom_histogram(data = data.frame(d), binwidth=10, fill="darkgreen", col="black") +
xlab(expression("Number of Communities")) + ylab(expression("Frequency")) +
theme(plot.title = element_text(hjust = 0.5), axis.title=element_text(size=30), text=element_text(size=30), panel.background = element_rect(fill="white"), panel.grid.major = element_line(color="white"), panel.grid.minor = element_line(color="white")) + labs(tag = expression("a")) + theme( axis.line = element_line(colour = "grey90", size = 2, linetype = "solid")) +
xlim(c(25, 300))
p2 = ggplot(data.frame(d), aes(x=n_comm, y=mean)) + geom_point(size=6, col="darkgreen") + geom_text(aes(label=ifelse((n_comm > 150) , as.character(Tissue), "")),hjust=1, vjust=1, size=8) + xlab(expression("Number of Communities")) + ylab(expression("Mean Size")) +
theme(legend.position="none", plot.title = element_text(hjust = 0.5), axis.title=element_text(size=30), text=element_text(size=30), panel.background = element_rect(fill="white"), panel.grid.major = element_line(color="white"), panel.grid.minor = element_line(color="white")) +
xlim(c(25, 300)) + ylim(c(0, 8)) + labs(tag=expression("b")) + theme( axis.line = element_line(colour = "grey90", size = 2, linetype = "solid"))
# p1+p2
p3 = ggplot(d, aes(x = Tissue, y = 18365 - ngenes_no_comm)) + geom_bar(stat="identity", aes(fill= 18365 - ngenes_no_comm)) + scale_fill_viridis() + theme_minimal() + labs(tag=expression("c")) +
theme(panel.border = element_blank(), legend.position="none", plot.title = element_text(hjust = 0.5), axis.title=element_text(size=30), text=element_text(size=26), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme( axis.line = element_line(colour = "grey90", size = 2, linetype = "solid")) +
xlab(expression("Tissue")) + ylab(expression("Frequency")) + theme(axis.text.x = element_text(angle = 60, hjust = 1))
(p1 + p2) / p3
###########################################################################
#### Variational Autoencoder results
clin.tcga = read.table('data/clinical_data.TCGA.tsv', header=T, fill=T, sep="\t")
enc = read.table('data/encoded_rnaseq_onehidden_warmup_batchnorm.tsv', fill=T, sep="\t")
comb=merge(clin.tcga, enc, by.x=1, by.y=2)
#### Metastasis analysis
comb1 = subset(comb, comb[,9] %in% c("Metastatic", "Additional Metastatic", "Primary Tumor", "Solid Tissue Normal"))
vaeplot<-ggplot(data=comb1, aes(x=sample_type, y=V6)) + geom_boxplot(aes(fill="darkgreen")) + geom_point() + theme_minimal() + labs(tag=expression(bold("C"))) +
theme(panel.border = element_blank(), legend.position="none", plot.title = element_text(hjust = 0.5), axis.title=element_text(size=22), text=element_text(size=18), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme( axis.line = element_line(colour = "grey90", size = 2, linetype = "solid")) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + xlab("Sample Type") + ylab("Latent Feature 4 Activation") +
scale_fill_brewer(palette="Dark2") # + geom_jitter(shape=16, position=position_jitter(0.2))
vaeplot
### Compare
wilcox.test(comb1[comb1$sample_type=="Primary Tumor",]$V6, comb1[comb1$sample_type=="Metastatic",]$V6)
# data: comb1[comb1$sample_type == "Primary Tumor", ]$V6 and comb1[comb1$sample_type == "Metastatic", ]$V6
# W = 632560, p-value < 2.2e-16
# alternative hypothesis: true location shift is not equal to 0
wilcox.test(comb1[comb1$sample_type=="Solid Tissue Normal",]$V6, comb1[comb1$sample_type=="Metastatic",]$V6)
# data: comb1[comb1$sample_type == "Solid Tissue Normal", ]$V6 and comb1[comb1$sample_type == "Metastatic", ]$V6
# W = 64866, p-value < 2.2e-16
# alternative hypothesis: true location shift is not equal to 0
###########################################################################
### Stemness
stem = read.table('data/VAE_with_StemnessScore.v1.txt', header=T, sep="\t")
stemness = ggplot(stem, aes(x=log2(X4), y=log2(DNAss) )) + geom_point(size=6, col="yellow") + xlab(expression("Latent Feature 4 Activation")) + ylab(expression("Stemness Index")) +
theme(legend.position="none", plot.title = element_text(hjust = 0.5), axis.title=element_text(size=22), text=element_text(size=18), panel.background = element_rect(fill="white"), panel.grid.major = element_line(color="white"), panel.grid.minor = element_line(color="white")) +
labs(tag=expression(bold("D"))) + theme( axis.line = element_line(colour = "grey90", size = 2, linetype = "solid")) +
# stat_summary(fun.data=mean_cl_normal) +
geom_smooth(method='lm', formula= y~x, col="darkgreen")
|
The coefficients of a polynomial with a leading coefficient $a$ are the same as the coefficients of the original polynomial with $a$ prepended. |
State Before: ⊢ bitwise and = land State After: case h.h
m n : ℤ
⊢ bitwise and m n = land m n Tactic: funext m n State Before: case h.h
m n : ℤ
⊢ bitwise and m n = land m n State After: case h.h.negSucc.ofNat
m n : ℕ
⊢ ↑(Nat.bitwise' (fun x y => !x && y) m n) = land -[m+1] (ofNat n)
case h.h.negSucc.negSucc
m n : ℕ
⊢ -[Nat.bitwise' (fun x y => !(!x && !y)) m n+1] = land -[m+1] -[n+1] Tactic: cases' m with m m <;> cases' n with n n <;> try {rfl}
<;> simp only [bitwise, natBitwise, Bool.not_false, Bool.or_true,
cond_false, cond_true, lor, Nat.ldiff', Bool.and_true, negSucc.injEq,
Bool.and_false, Nat.land'] State Before: case h.h.negSucc.ofNat
m n : ℕ
⊢ ↑(Nat.bitwise' (fun x y => !x && y) m n) = land -[m+1] (ofNat n)
case h.h.negSucc.negSucc
m n : ℕ
⊢ -[Nat.bitwise' (fun x y => !(!x && !y)) m n+1] = land -[m+1] -[n+1] State After: case h.h.negSucc.negSucc
m n : ℕ
⊢ -[Nat.bitwise' (fun x y => !(!x && !y)) m n+1] = land -[m+1] -[n+1] Tactic: . rw [Nat.bitwise'_swap, Function.swap]
congr
funext x y
cases x <;> cases y <;> rfl
rfl State Before: case h.h.negSucc.negSucc
m n : ℕ
⊢ -[Nat.bitwise' (fun x y => !(!x && !y)) m n+1] = land -[m+1] -[n+1] State After: no goals Tactic: . congr
funext x y
cases x <;> cases y <;> rfl State Before: case h.h.negSucc.ofNat
m n : ℕ
⊢ ↑(Nat.bitwise' (fun x y => !x && y) m n) = land -[m+1] (ofNat n) State After: case h.h.negSucc.ofNat
m n : ℕ
⊢ ↑(Nat.bitwise' (fun y x => !x && y) n m) = land -[m+1] (ofNat n)
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false Tactic: rw [Nat.bitwise'_swap, Function.swap] State Before: case h.h.negSucc.ofNat
m n : ℕ
⊢ ↑(Nat.bitwise' (fun y x => !x && y) n m) = land -[m+1] (ofNat n)
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false State After: case h.h.negSucc.ofNat.e_a.e_f
m n : ℕ
⊢ (fun y x => !x && y) = fun a b => a && !b
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false Tactic: congr State Before: case h.h.negSucc.ofNat.e_a.e_f
m n : ℕ
⊢ (fun y x => !x && y) = fun a b => a && !b
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false State After: case h.h.negSucc.ofNat.e_a.e_f.h.h
m n : ℕ
x y : Bool
⊢ (!y && x) = (x && !y)
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false Tactic: funext x y State Before: case h.h.negSucc.ofNat.e_a.e_f.h.h
m n : ℕ
x y : Bool
⊢ (!y && x) = (x && !y)
case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false State After: case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false Tactic: cases x <;> cases y <;> rfl State Before: case h.h.negSucc.ofNat
m n : ℕ
⊢ (!false && false) = false State After: no goals Tactic: rfl State Before: case h.h.negSucc.negSucc
m n : ℕ
⊢ -[Nat.bitwise' (fun x y => !(!x && !y)) m n+1] = land -[m+1] -[n+1] State After: case h.h.negSucc.negSucc.e_a.e_f
m n : ℕ
⊢ (fun x y => !(!x && !y)) = or Tactic: congr State Before: case h.h.negSucc.negSucc.e_a.e_f
m n : ℕ
⊢ (fun x y => !(!x && !y)) = or State After: case h.h.negSucc.negSucc.e_a.e_f.h.h
m n : ℕ
x y : Bool
⊢ (!(!x && !y)) = (x || y) Tactic: funext x y State Before: case h.h.negSucc.negSucc.e_a.e_f.h.h
m n : ℕ
x y : Bool
⊢ (!(!x && !y)) = (x || y) State After: no goals Tactic: cases x <;> cases y <;> rfl |
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : NonUnitalNonAssocSemiring R
a : R
l : List R
h : ∀ (b : R), b ∈ l → Commute a b
⊢ Commute a (sum l)
[PROOFSTEP]
induction' l with x xs ih
[GOAL]
case nil
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : NonUnitalNonAssocSemiring R
a : R
l : List R
h✝ : ∀ (b : R), b ∈ l → Commute a b
h : ∀ (b : R), b ∈ [] → Commute a b
⊢ Commute a (sum [])
[PROOFSTEP]
exact Commute.zero_right _
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : NonUnitalNonAssocSemiring R
a : R
l : List R
h✝ : ∀ (b : R), b ∈ l → Commute a b
x : R
xs : List R
ih : (∀ (b : R), b ∈ xs → Commute a b) → Commute a (sum xs)
h : ∀ (b : R), b ∈ x :: xs → Commute a b
⊢ Commute a (sum (x :: xs))
[PROOFSTEP]
rw [List.sum_cons]
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : NonUnitalNonAssocSemiring R
a : R
l : List R
h✝ : ∀ (b : R), b ∈ l → Commute a b
x : R
xs : List R
ih : (∀ (b : R), b ∈ xs → Commute a b) → Commute a (sum xs)
h : ∀ (b : R), b ∈ x :: xs → Commute a b
⊢ Commute a (x + sum xs)
[PROOFSTEP]
exact (h _ <| mem_cons_self _ _).add_right (ih fun j hj => h _ <| mem_cons_of_mem _ hj)
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CanonicallyOrderedMonoid M
l : List M
h : ∀ (x : M), x ∈ l → x = 1
⊢ prod l = 1
[PROOFSTEP]
rw [List.eq_replicate.2 ⟨_, h⟩, prod_replicate, one_pow]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CanonicallyOrderedMonoid M
l : List M
h : ∀ (x : M), x ∈ l → x = 1
⊢ ℕ
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CanonicallyOrderedMonoid M
l : List M
h : ∀ (x : M), x ∈ l → x = 1
⊢ length l = ?m.7726
[PROOFSTEP]
exact (length l)
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CanonicallyOrderedMonoid M
l : List M
h : ∀ (x : M), x ∈ l → x = 1
⊢ length l = length l
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
l : List ℤ
h : prod l = -1
⊢ -1 ∈ l
[PROOFSTEP]
obtain ⟨x, h₁, h₂⟩ := exists_mem_ne_one_of_prod_ne_one (ne_of_eq_of_ne h (by decide))
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
l : List ℤ
h : prod l = -1
⊢ -1 ≠ 1
[PROOFSTEP]
decide
[GOAL]
case intro.intro
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
l : List ℤ
h : prod l = -1
x : ℤ
h₁ : x ∈ l
h₂ : x ≠ 1
⊢ -1 ∈ l
[PROOFSTEP]
exact
Or.resolve_left (Int.isUnit_iff.mp (prod_isUnit_iff.mp (h.symm ▸ IsUnit.neg isUnit_one : IsUnit l.prod) x h₁)) h₂ ▸ h₁
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
L : List ℕ
h : ∀ (i : ℕ), i ∈ L → 1 ≤ i
⊢ length L ≤ sum L
[PROOFSTEP]
induction' L with j L IH h
[GOAL]
case nil
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
L : List ℕ
h✝ : ∀ (i : ℕ), i ∈ L → 1 ≤ i
h : ∀ (i : ℕ), i ∈ [] → 1 ≤ i
⊢ length [] ≤ sum []
[PROOFSTEP]
simp
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
L✝ : List ℕ
h✝ : ∀ (i : ℕ), i ∈ L✝ → 1 ≤ i
j : ℕ
L : List ℕ
IH : (∀ (i : ℕ), i ∈ L → 1 ≤ i) → length L ≤ sum L
h : ∀ (i : ℕ), i ∈ j :: L → 1 ≤ i
⊢ length (j :: L) ≤ sum (j :: L)
[PROOFSTEP]
rw [sum_cons, length, add_comm]
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
L✝ : List ℕ
h✝ : ∀ (i : ℕ), i ∈ L✝ → 1 ≤ i
j : ℕ
L : List ℕ
IH : (∀ (i : ℕ), i ∈ L → 1 ≤ i) → length L ≤ sum L
h : ∀ (i : ℕ), i ∈ j :: L → 1 ≤ i
⊢ 1 + length L ≤ j + sum L
[PROOFSTEP]
exact add_le_add (h _ (mem_cons_self _ _)) (IH fun i hi => h i (mem_cons.2 (Or.inr hi)))
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommMonoid M
a : M
l : List M
ha : a ∈ l
⊢ a ∣ prod l
[PROOFSTEP]
let ⟨s, t, h⟩ := mem_split ha
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommMonoid M
a : M
l : List M
ha : a ∈ l
s t : List M
h : l = s ++ a :: t
⊢ a ∣ prod l
[PROOFSTEP]
rw [h, prod_append, prod_cons, mul_left_comm]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommMonoid M
a : M
l : List M
ha : a ∈ l
s t : List M
h : l = s ++ a :: t
⊢ a ∣ a * (prod s * prod t)
[PROOFSTEP]
exact dvd_mul_right _ _
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Semiring R
a : R
l : List R
h : ∀ (x : R), x ∈ l → a ∣ x
⊢ a ∣ sum l
[PROOFSTEP]
induction' l with x l ih
[GOAL]
case nil
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Semiring R
a : R
l : List R
h✝ : ∀ (x : R), x ∈ l → a ∣ x
h : ∀ (x : R), x ∈ [] → a ∣ x
⊢ a ∣ sum []
[PROOFSTEP]
exact dvd_zero _
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Semiring R
a : R
l✝ : List R
h✝ : ∀ (x : R), x ∈ l✝ → a ∣ x
x : R
l : List R
ih : (∀ (x : R), x ∈ l → a ∣ x) → a ∣ sum l
h : ∀ (x_1 : R), x_1 ∈ x :: l → a ∣ x_1
⊢ a ∣ sum (x :: l)
[PROOFSTEP]
rw [List.sum_cons]
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Semiring R
a : R
l✝ : List R
h✝ : ∀ (x : R), x ∈ l✝ → a ∣ x
x : R
l : List R
ih : (∀ (x : R), x ∈ l → a ∣ x) → a ∣ sum l
h : ∀ (x_1 : R), x_1 ∈ x :: l → a ∣ x_1
⊢ a ∣ x + sum l
[PROOFSTEP]
exact dvd_add (h _ (mem_cons_self _ _)) (ih fun x hx => h x (mem_cons_of_mem _ hx))
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommGroup α
l₂ : List α
⊢ alternatingProd ([] ++ l₂) = alternatingProd [] * alternatingProd l₂ ^ (-1) ^ length []
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommGroup α
a : α
l₁ l₂ : List α
⊢ alternatingProd (a :: l₁ ++ l₂) = alternatingProd (a :: l₁) * alternatingProd l₂ ^ (-1) ^ length (a :: l₁)
[PROOFSTEP]
simp_rw [cons_append, alternatingProd_cons, alternatingProd_append, length_cons, pow_succ, neg_mul, one_mul, zpow_neg, ←
div_eq_mul_inv, div_div]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommGroup α
⊢ alternatingProd (reverse []) = alternatingProd [] ^ (-1) ^ (length [] + 1)
[PROOFSTEP]
simp only [alternatingProd_nil, one_zpow, reverse_nil]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommGroup α
a : α
l : List α
⊢ alternatingProd (reverse (a :: l)) = alternatingProd (a :: l) ^ (-1) ^ (length (a :: l) + 1)
[PROOFSTEP]
simp_rw [reverse_cons, alternatingProd_append, alternatingProd_reverse, alternatingProd_singleton, alternatingProd_cons,
length_reverse, length, pow_succ, neg_mul, one_mul, zpow_neg, inv_inv]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : CommGroup α
a : α
l : List α
⊢ (alternatingProd l ^ (-1) ^ length l)⁻¹ * a ^ (-1) ^ length l = (a / alternatingProd l) ^ (-1) ^ length l
[PROOFSTEP]
rw [mul_comm, ← div_eq_mul_inv, div_zpow]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
⊢ ∀ (l : List M), op (prod l) = prod (reverse (map op l))
[PROOFSTEP]
intro l
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
l : List M
⊢ op (prod l) = prod (reverse (map op l))
[PROOFSTEP]
induction l with
| nil => rfl
| cons x xs ih => rw [List.prod_cons, List.map_cons, List.reverse_cons', List.prod_concat, op_mul, ih]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
l : List M
⊢ op (prod l) = prod (reverse (map op l))
[PROOFSTEP]
induction l with
| nil => rfl
| cons x xs ih => rw [List.prod_cons, List.map_cons, List.reverse_cons', List.prod_concat, op_mul, ih]
[GOAL]
case nil
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
⊢ op (prod []) = prod (reverse (map op []))
[PROOFSTEP]
| nil => rfl
[GOAL]
case nil
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
⊢ op (prod []) = prod (reverse (map op []))
[PROOFSTEP]
rfl
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
x : M
xs : List M
ih : op (prod xs) = prod (reverse (map op xs))
⊢ op (prod (x :: xs)) = prod (reverse (map op (x :: xs)))
[PROOFSTEP]
| cons x xs ih => rw [List.prod_cons, List.map_cons, List.reverse_cons', List.prod_concat, op_mul, ih]
[GOAL]
case cons
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
x : M
xs : List M
ih : op (prod xs) = prod (reverse (map op xs))
⊢ op (prod (x :: xs)) = prod (reverse (map op (x :: xs)))
[PROOFSTEP]
rw [List.prod_cons, List.map_cons, List.reverse_cons', List.prod_concat, op_mul, ih]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝ : Monoid M
l : List Mᵐᵒᵖ
⊢ unop (prod l) = prod (reverse (map unop l))
[PROOFSTEP]
rw [← op_inj, op_unop, MulOpposite.op_list_prod, map_reverse, map_map, reverse_reverse, op_comp_unop, map_id]
[GOAL]
ι : Type u_1
α : Type u_2
M : Type u_3
N : Type u_4
P : Type u_5
M₀ : Type u_6
G : Type u_7
R : Type u_8
inst✝² : Monoid M
inst✝¹ : Monoid N
F : Type u_9
inst✝ : MonoidHomClass F M Nᵐᵒᵖ
f : F
l : List M
⊢ unop (↑f (prod l)) = prod (reverse (map (unop ∘ ↑f) l))
[PROOFSTEP]
rw [map_list_prod f l, MulOpposite.unop_list_prod, List.map_map]
|
From discprob.basic Require Import base sval order monad bigop_ext nify.
From mathcomp Require Import ssrfun ssreflect eqtype ssrbool seq fintype choice.
Require Import Reals Psatz.
From discprob.monad.idxval Require Import fin_ival fin_ival_dist.
Record ival_couplingP {A1 A2} (Is1: ival A1) (Is2: ival A2) (P: A1 → A2 → Prop) : Type :=
mkICoupling { ic_witness :> ival {xy: A1 * A2 | P (fst xy) (snd xy)};
ic_proj1: eq_ival Is1 (x ← ic_witness; mret (fst (sval x)));
ic_proj2: eq_ival Is2 (x ← ic_witness; mret (snd (sval x)));
}.
Record idist_couplingP {A1 A2} (Is1: ivdist A1) (Is2: ivdist A2) (P: A1 → A2 → Prop) : Type :=
mkIdCoupling { idc_witness :> ivdist {xy: A1 * A2 | P (fst xy) (snd xy)};
idc_proj1: eq_ivd Is1 (x ← idc_witness; mret (fst (sval x)));
idc_proj2: eq_ivd Is2 (x ← idc_witness; mret (snd (sval x)));
}.
From mathcomp Require Import bigop.
Lemma ic_coupling_to_id {A1 A2} (I1: ivdist A1) (I2: ivdist A2) P:
ival_couplingP I1 I2 P →
idist_couplingP I1 I2 P.
Proof.
intros [Ic Hproj1 Hproj2].
assert (Hsum1: \big[Rplus/0%R]_i fin_ival.val Ic i = 1%R).
{
replace 1%R with R1 by auto.
rewrite -(val_sum1 I1).
eapply (eq_ival_sum _ _ (λ x, true)) in Hproj1. rewrite Hproj1.
unshelve (eapply sum_reidx_surj1).
- intros ic. exact (existT ic tt).
- rewrite //= => *. by rewrite Rmult_1_r.
- rewrite //= => *; split; auto.
rewrite /index_enum -enumT. apply mem_enum.
- intros (b&[]) _ _.
exists b. repeat split; auto.
- by rewrite /index_enum -enumT enum_uniq.
- by rewrite /index_enum -enumT enum_uniq.
- intros ??. inversion 1; auto.
}
exists {| ivd_ival := Ic; val_sum1 := Hsum1 |}; auto.
Qed.
Local Open Scope R_scope.
Record ival_coupling_nondepP {A1 A2} (Is1: ival A1) (Is2: ival A2) (P: A1 → A2 → Prop) : Type :=
mkNonDepICoupling { ic_nondep_witness :> ival (A1 * A2);
ic_nondep_support :
∀ xy, (∃ i, ind ic_nondep_witness i = xy
∧ val ic_nondep_witness i > 0) → P (fst xy) (snd xy);
ic_nondep_proj1: eq_ival Is1 (x ← ic_nondep_witness; mret (fst x));
ic_nondep_proj2: eq_ival Is2 (x ← ic_nondep_witness; mret (snd x));
}.
Lemma ival_coupling_nondep_suffice {A1 A2} Is1 Is2 (P: A1 → A2 → Prop):
ival_coupling_nondepP Is1 Is2 P →
ival_couplingP Is1 Is2 P.
Proof.
intros [Ic Isupp Hp1 Hp2].
exists (ival_equip Ic _ Isupp).
- setoid_rewrite Hp1.
setoid_rewrite (ival_bind_P Ic).
eapply ival_bind_congr; first reflexivity.
intros (a1&a2) => //=; reflexivity.
- setoid_rewrite Hp2.
setoid_rewrite (ival_bind_P Ic).
eapply ival_bind_congr; first reflexivity.
intros (a1&a2) => //=; reflexivity.
Qed.
Lemma ival_coupling_refl {A} (I: ival A) : ival_couplingP I I (λ x y, x = y).
Proof.
unshelve (eexists).
{ refine (x ← I; mret (exist _ (x, x) _)).
done. }
- setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
- setoid_rewrite ival_bind_mret_mret. setoid_rewrite ival_right_id. reflexivity.
Qed.
Lemma ival_coupling_sym {A1 A2} (Is1: ival A1) (Is2: ival A2) P
(Ic: ival_couplingP Is1 Is2 P): ival_couplingP Is2 Is1 (λ x y, P y x).
Proof.
destruct Ic as [Ic Hp1 Hp2].
unshelve (eexists).
{ refine (mbind _ Ic). intros ((x&y)&HP).
refine (mret (exist _ (y, x) _)); auto. }
* setoid_rewrite Hp2. setoid_rewrite ival_assoc. apply ival_bind_congr; first by reflexivity.
intros ((x&y)&Hpf). setoid_rewrite ival_left_id => //=. reflexivity.
* setoid_rewrite Hp1. setoid_rewrite ival_assoc; apply ival_bind_congr; first by reflexivity.
intros ((x&y)&Hpf). setoid_rewrite ival_left_id => //=. reflexivity.
Qed.
Lemma ival_coupling_eq {A} (Is1 Is2: ival A)
(Ic: ival_couplingP Is1 Is2 (λ x y, x = y)): eq_ival Is1 Is2.
Proof.
destruct Ic as [Ic Hp1 Hp2].
setoid_rewrite Hp1. setoid_rewrite Hp2.
apply ival_bind_congr; first by reflexivity.
intros ((x&y)&Heq) => //=. rewrite //= in Heq; subst; reflexivity.
Qed.
Lemma ival_coupling_bind {A1 A2 B1 B2} (f1: A1 → ival B1) (f2: A2 → ival B2)
Is1 Is2 P Q (Ic: ival_couplingP Is1 Is2 P):
(∀ x y, P x y → ival_couplingP (f1 x) (f2 y) Q) →
ival_couplingP (mbind f1 Is1) (mbind f2 Is2) Q.
Proof.
intros Hfc.
destruct Ic as [Ic Hp1 Hp2].
unshelve (eexists).
{ refine (xy ← Ic; _). destruct xy as ((x&y)&HP).
exact (Hfc _ _ HP).
}
- setoid_rewrite Hp1; setoid_rewrite ival_assoc;
apply ival_bind_congr; first by reflexivity.
intros ((x&y)&HP). setoid_rewrite ival_left_id => //=.
destruct (Hfc x y); auto.
- setoid_rewrite Hp2; setoid_rewrite ival_assoc;
apply ival_bind_congr; first by reflexivity.
intros ((x&y)&HP). setoid_rewrite ival_left_id => //=.
destruct (Hfc x y); auto.
Qed.
Lemma ival_coupling_mret {A1 A2} x y (P: A1 → A2 → Prop):
P x y →
ival_couplingP (mret x) (mret y) P.
Proof.
intros HP. exists (mret (exist _ (x, y) HP)); setoid_rewrite ival_left_id => //=; reflexivity.
Qed.
Lemma ival_coupling_conseq {A1 A2} (P1 P2: A1 → A2 → Prop) (I1: ival A1) (I2: ival A2):
(∀ x y, P1 x y → P2 x y) →
ival_couplingP I1 I2 P1 →
ival_couplingP I1 I2 P2.
Proof.
intros HP [Ic Hp1 Hp2].
unshelve (eexists).
{ refine (x ← Ic; mret _).
destruct x as (x&P).
exists x. auto. }
- setoid_rewrite Hp1.
setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity; intros (?&?).
setoid_rewrite ival_left_id; reflexivity.
- setoid_rewrite Hp2.
setoid_rewrite ival_assoc.
eapply ival_bind_congr; first reflexivity; intros (?&?).
setoid_rewrite ival_left_id; reflexivity.
Qed.
Lemma ival_coupling_plus' {A1 A2} p
(P : A1 → A2 → Prop) (I1 I1': ival A1) (I2 I2': ival A2) :
ival_couplingP I1 I2 P →
ival_couplingP I1' I2' P →
ival_couplingP (iplus (iscale p I1) (iscale (1 - p) I1'))
(iplus (iscale p I2) (iscale (1 - p) I2')) P.
Proof.
intros [Ic Hp1 Hp2] [Ic' Hp1' Hp2'].
exists (fin_ival.iplus (fin_ival.iscale p Ic) (fin_ival.iscale (1 - p) Ic')).
- setoid_rewrite fin_ival.ival_plus_bind.
setoid_rewrite fin_ival.ival_scale_bind.
setoid_rewrite Hp1.
setoid_rewrite Hp1'.
reflexivity.
- setoid_rewrite fin_ival.ival_plus_bind.
setoid_rewrite fin_ival.ival_scale_bind.
setoid_rewrite Hp2.
setoid_rewrite Hp2'.
reflexivity.
Qed.
Lemma ival_coupling_plus {A1 A2} p Hpf Hpf'
(P : A1 → A2 → Prop) (I1 I1': ivdist A1) (I2 I2': ivdist A2) :
ival_couplingP I1 I2 P →
ival_couplingP I1' I2' P →
ival_couplingP (ivdplus p Hpf I1 I1') (ivdplus p Hpf' I2 I2') P.
Proof.
rewrite //=. apply ival_coupling_plus'.
Qed.
Lemma ival_coupling_idxOf {A1 A2} I1 I2 (P: A1 → A2 → Prop):
ival_couplingP I1 I2 P →
{ Q : {Q: idx I1 → idx I2 → Prop | (∀ i1 i1' i2, Q i1 i2 → Q i1' i2 → i1 = i1')} &
ival_couplingP (idxOf I1) (idxOf I2) (λ i1 i2, P (ind I1 i1) (ind I2 i2) ∧
val I1 i1 > 0 ∧ val I2 i2 > 0 ∧
(sval Q) i1 i2 )}.
Proof.
intros [Ic Hp1 Hp2].
symmetry in Hp1. symmetry in Hp2.
apply ClassicalEpsilon.constructive_indefinite_description in Hp1.
destruct Hp1 as (h1&Hp1).
apply ClassicalEpsilon.constructive_indefinite_description in Hp1.
destruct Hp1 as (h1'&Hp1).
apply ClassicalEpsilon.constructive_indefinite_description in Hp2.
destruct Hp2 as (h2&Hp2).
apply ClassicalEpsilon.constructive_indefinite_description in Hp2.
destruct Hp2 as (h2'&Hp2).
assert (Hgt_coerce: ∀ xy : countable.support (val Ic),
Rgt_dec (val Ic (projT1 (@existT _ (λ _, unit) (sval xy) tt)) * 1) 0).
{ abstract (intros (?&?); rewrite Rmult_1_r => //=; destruct Rgt_dec; auto). }
unshelve (eexists).
exists (λ i1 i2, ∃ ic1 ic2, sval (h1 ic1) = i1 ∧ sval (h2 ic2) = i2 ∧ sval ic1 = sval ic2).
{
abstract (intros i1 i1' i2 (ic1&ic2&Heq1&Heq2&Hcoh) (ic1'&ic2'&Heq1'&Heq2'&Hcoh');
assert (ic2' = ic2);
first ( abstract (destruct Hp2 as (Hbij2&?);
rewrite -(Hbij2 ic2');
rewrite -(Hbij2 ic2);
f_equal; apply sval_inj_pred; congruence));
abstract (subst; do 2 f_equal; apply sval_inj_pred; auto; congruence)). }
unshelve (eexists).
- refine (xy ← supp_idxOf Ic; mret _).
unshelve (refine (exist _ ((sval (h1 _)), sval (h2 _)) _)).
* exact (exist _ (existT (sval xy) tt) (Hgt_coerce xy)).
* exact (exist _ (existT (sval xy) tt) (Hgt_coerce xy)).
*
abstract(
split; [ by
(abstract (rewrite //=;
destruct Hp1 as (?&?&->&?) => //=;
destruct Hp2 as (?&?&->&?) => //=;
destruct (ind Ic (sval xy)) => //=)) |];
split; [ by
(abstract (rewrite //=; try destruct (h1 _); try destruct (h2 _); rewrite //=;
destruct Rgt_dec => //=)) |];
split; [ by
(abstract (rewrite //=; try destruct (h1 _); try destruct (h2 _); rewrite //=;
destruct Rgt_dec => //=)) |];
abstract (
rewrite //=;
exists ((exist _ (existT (sval xy) tt) (Hgt_coerce xy)));
exists ((exist _ (existT (sval xy) tt) (Hgt_coerce xy))); done)
).
- setoid_rewrite ival_bind_mret_mret.
symmetry.
unshelve (eexists).
{
simpl. intros ((ic&?)&Hgt).
simpl in h1.
rewrite Rmult_1_r in Hgt.
destruct ic as (ic&Hgtc).
apply h1.
exists (existT ic tt).
simpl in *. rewrite Rmult_1_r. done.
}
unshelve (eexists).
{ simpl. intros ix.
destruct (h1' ix) as ((i1&?)&Hgt).
simpl in Hgt.
unshelve (eexists).
{ unshelve (refine (existT _ tt)).
exists i1. rewrite Rmult_1_r in Hgt. done.
}
rewrite //=.
}
rewrite //=.
repeat split.
* intros (((a&Hgt)&[])&?). destruct Hp1 as (Hinv1&Hinv1'&?).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
rewrite Hinv1 => //=.
apply sval_inj_pred => //=.
f_equal.
apply sval_inj_pred => //=.
* rewrite //= => a. destruct Hp1 as (Hinv1&Hinv1'&?).
specialize (Hinv1' a).
destruct (h1' a) as ((?&[])&?).
rewrite //=.
rewrite -Hinv1'. destruct (Rmult_1_r (val Ic x)) => //=.
* rewrite //=. intros (((a&Hgt)&[])&?). destruct Hp1 as (Hinv1&Hinv1'&?).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
f_equal. f_equal.
apply sval_inj_pred => //=.
* rewrite //=. intros (((a&Hgt)&[])&?). destruct Hp1 as (Hinv1&Hinv1'&?&Hval).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
rewrite Hval //= !Rmult_1_r //.
- setoid_rewrite ival_bind_mret_mret.
symmetry.
unshelve (eexists).
{
simpl. intros ((ic&?)&Hgt).
simpl in h1.
rewrite Rmult_1_r in Hgt.
destruct ic as (ic&Hgtc).
apply h2.
exists (existT ic tt).
simpl in *. rewrite Rmult_1_r. done.
}
unshelve (eexists).
{ simpl. intros ix.
destruct (h2' ix) as ((i1&?)&Hgt).
simpl in Hgt.
unshelve (eexists).
{ unshelve (refine (existT _ tt)).
exists i1. rewrite Rmult_1_r in Hgt. done.
}
rewrite //=.
}
rewrite //=.
repeat split.
* rewrite //=. intros (((a&Hgt)&[])&?). destruct Hp2 as (Hinv1&Hinv1'&?).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
rewrite Hinv1 => //=.
apply sval_inj_pred => //=.
f_equal.
apply sval_inj_pred => //=.
* rewrite //= => a. destruct Hp2 as (Hinv1&Hinv1'&?).
specialize (Hinv1' a).
destruct (h2' a) as ((?&[])&?).
rewrite //=.
rewrite -Hinv1'. destruct (Rmult_1_r (val Ic x)) => //=.
* rewrite //=. intros (((a&Hgt)&[])&?). destruct Hp1 as (Hinv1&Hinv1'&?).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
f_equal. f_equal.
apply sval_inj_pred => //=.
* rewrite //=. intros (((a&Hgt)&[])&?). destruct Hp2 as (Hinv1&Hinv1'&?&Hval).
rewrite //=. destruct (Rmult_1_r _). rewrite /eq_rect_r //=.
rewrite Hval //= !Rmult_1_r //.
Qed.
Lemma ival_coupling_equip {X} I1 (P: X → Prop) Hpf:
ival_couplingP I1 (ival_equip I1 P Hpf) (λ x y, x = (sval y)).
Proof.
unshelve eexists.
refine ((x ← ival_equip I1 P Hpf; mret (exist _ (sval x, x) _))); auto.
- setoid_rewrite ival_bind_mret_mret.
rewrite /sval.
etransitivity; first (symmetry; apply ival_right_id).
apply ival_bind_P.
- setoid_rewrite ival_bind_mret_mret.
rewrite /sval.
etransitivity; first (symmetry; apply ival_right_id).
eapply ival_bind_congr; first reflexivity.
intros (?&?) => //=. reflexivity.
Qed.
Lemma ival_coupling_support {X Y} I1 I2 (P: X → Y → Prop)
(Ic: ival_couplingP I1 I2 P) :
ival_couplingP I1 I2 (λ x y, ∃ Hpf: P x y, In_isupport x I1 ∧ In_isupport y I2 ∧
In_isupport (exist _ (x, y) Hpf) Ic).
Proof.
destruct Ic as [Ic Hp1 Hp2].
cut (ival_couplingP I1 I2 (λ x y, ∃ (Hpf: P x y), In_isupport (exist _ (x, y) Hpf) Ic)).
{ intros. eapply ival_coupling_conseq; last eauto.
intros x y (Hpf&Hin).
destruct Hin as (ic&Hind&Hval).
exists Hpf.
repeat split; auto.
- rewrite //=. symmetry in Hp1.
destruct Hp1 as (h1&?&?&?&Hind1&Hval1).
unshelve (eexists).
{ refine (sval (h1 _ )).
exists (existT ic tt). rewrite //= Rmult_1_r. destruct Rgt_dec => //=. }
split; rewrite //=.
* rewrite Hind1 //= Hind //=.
* rewrite Hval1 //= Rmult_1_r //.
- rewrite //=. symmetry in Hp2.
destruct Hp2 as (h1&?&?&?&Hind1&Hval1).
unshelve eexists.
{ refine (sval (h1 _)).
exists (existT ic tt). rewrite //= Rmult_1_r. destruct Rgt_dec => //=. }
split; rewrite //=.
* rewrite Hind1 //= Hind //=.
* rewrite Hval1 //= Rmult_1_r //.
- rewrite //=. exists ic; split => //=.
}
unshelve (eexists).
refine (z ← ival_equip Ic (λ xy, In_isupport xy Ic) _; mret _).
{ destruct z as ((xy&HP)&Hin). exists xy. abstract (exists HP; destruct xy; eauto). }
{ auto. }
- setoid_rewrite Hp1.
setoid_rewrite ival_bind_mret_mret.
eapply ival_coupling_eq.
eapply ival_coupling_bind; first eapply ival_coupling_equip.
intros (xy&HP) ((xy'&HP')&Hin).
rewrite //=.
inversion 1; subst; auto. apply ival_coupling_refl.
- setoid_rewrite Hp2.
setoid_rewrite ival_bind_mret_mret.
eapply ival_coupling_eq.
eapply ival_coupling_bind; first eapply ival_coupling_equip.
intros (xy&HP) ((xy'&HP')&Hin).
rewrite //=.
inversion 1; subst; auto. apply ival_coupling_refl.
Qed.
Lemma ival_coupling_proper {X Y} I1 I1' I2 I2' (P: X → Y → Prop) :
eq_ival I1 I1' →
eq_ival I2 I2' →
ival_couplingP I1 I2 P →
ival_couplingP I1' I2' P.
Proof.
intros Heq1 Heq2 [Ic Hp1 Hp2].
exists Ic; etransitivity; eauto; by symmetry.
Qed.
|
theory Extra2
imports ExtraInv VCTheoryLemmas
begin
theorem extra2: "VC2 extraInv s0 requestButton_value"
apply(simp only: VC2_def extraInv_def)
proof
print_state
assume VC: " (toEnvP s0 \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = minimalRed \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> MINIMAL_RED_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = minimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and> getPstate s2 Ctrl = green \<and> ltimeEnv s2 Ctrl = GREEN_TIME_LIMIT) \<or>
(\<forall>s2. toEnvP s2 \<and> substate s2 s1 \<longrightarrow> getPstate s2 Ctrl = minimalRed)) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and> substate s2 s0 \<and> getPstate s2 Ctrl = minimalRed \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = minimalRed) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and> (\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = minimalRed) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<and>
(\<forall>s3. toEnvP s3 \<and> substate s2 s3 \<and> substate s3 s1 \<and> s2 \<noteq> s3 \<longrightarrow>
getPstate s3 Ctrl = redAfterMinimalRed \<and>
getVarBool s3 redAfterMinimalRed = NOT_PRESSED \<and> getVarBool s3 Ctrl = NOT_PRESSED))) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = redToGreen \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> RED_TO_GREEN_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = redToGreen \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl = redAfterMinimalRed \<and>
(getVarBool s2 redAfterMinimalRed = PRESSED \<or> getVarBool s2 Ctrl = PRESSED))) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and> substate s2 s0 \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<and> getPstate s2 Ctrl = redToGreen \<longrightarrow>
getPstate s1 Ctrl = redToGreen) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and> (\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = redToGreen) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = green \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> GREEN_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = green \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<and> ltimeEnv s2 Ctrl = RED_TO_GREEN_TIME_LIMIT)) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and> substate s2 s0 \<and> getPstate s2 Ctrl = green \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = green) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and> (\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = green) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = green \<longrightarrow> getVarBool s1 minimalRed = PRESSED) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl \<noteq> green \<longrightarrow> getVarBool s1 minimalRed = NOT_PRESSED) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<longrightarrow>
getPstate s1 Ctrl = minimalRed \<or>
getPstate s1 Ctrl = redAfterMinimalRed \<or> getPstate s1 Ctrl = redToGreen \<or> getPstate s1 Ctrl = green) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
getVarBool s1 Ctrl = NOT_PRESSED) \<and>
(\<forall>s2. toEnvP s2 \<and> substate s2 s0 \<and> getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = PRESSED \<longrightarrow>
(\<exists>s1. toEnvP s1 \<and>
substate s1 s2 \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and> getVarBool s1 Ctrl = PRESSED)) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED))) \<and>
(\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 redAfterMinimalRed = NOT_PRESSED)) \<and>
env (setVarAny s0 requestButton_value) requestButton_value \<and>
getPstate (setVarAny s0 requestButton_value) Ctrl = minimalRed \<and>
getVarBool (setVarAny s0 requestButton_value) Ctrl \<and>
MINIMAL_RED_TIME_LIMIT
\<le> ltimeEnv (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl "
then obtain ei0: "toEnvP s0" and
ei1: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl
\<le> MINIMAL_RED_TIME_LIMIT)"
and ei2: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl = green \<and>
ltimeEnv s2 Ctrl =
GREEN_TIME_LIMIT) \<or>
(\<forall>s2. toEnvP s2 \<and> substate s2 s1 \<longrightarrow>
getPstate s2 Ctrl = minimalRed))"
and ei3:" (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
getPstate s2 Ctrl = minimalRed \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = minimalRed)"
and ei4: " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = minimalRed) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
and ei5: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<and>
(\<forall>s3. toEnvP s3 \<and> substate s2 s3 \<and> substate s3 s1 \<and> s2 \<noteq> s3 \<longrightarrow>
getPstate s3 Ctrl = redAfterMinimalRed \<and>
getVarBool s3 redAfterMinimalRed = NOT_PRESSED \<and> getVarBool s3 Ctrl = NOT_PRESSED)))" and ei6:
" (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl
\<le> RED_TO_GREEN_TIME_LIMIT)"
and ei7: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl =
redAfterMinimalRed \<and>
(getVarBool s2 redAfterMinimalRed =
PRESSED \<or>
getVarBool s2 Ctrl = PRESSED)))"
and ei8: " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<longrightarrow>
getPstate s1 Ctrl = redToGreen)"
and ei9: " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = redToGreen) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
and ei10:
" (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = green \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl \<le> GREEN_TIME_LIMIT)"
and ei11: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = green \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<and>
ltimeEnv s2 Ctrl =
RED_TO_GREEN_TIME_LIMIT))"
and ei12: " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
getPstate s2 Ctrl = green \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = green)" and ei13:
" (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = green) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
and ei14: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 minimalRed = PRESSED)" and
ei15: " (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl \<noteq> green \<longrightarrow>
getVarBool s1 minimalRed = NOT_PRESSED)"
and ei16: " (\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<longrightarrow>
getPstate s1 Ctrl = minimalRed \<or>
getPstate s1 Ctrl = redAfterMinimalRed \<or>
getPstate s1 Ctrl = redToGreen \<or>
getPstate s1 Ctrl = green)" and
ei17: " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 s0 \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
getVarBool s1 Ctrl = NOT_PRESSED)" and
ei18: " (\<forall>s2. toEnvP s2 \<and> substate s2 s0 \<and> getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = PRESSED \<longrightarrow>
(\<exists>s1. toEnvP s1 \<and>
substate s1 s2 \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and> getVarBool s1 Ctrl = PRESSED))" and
ei19: " (\<forall>s1. toEnvP s1 \<and> substate s1 s0 \<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED)))"
and ei20:
" (\<forall>s1. toEnvP s1 \<and>
substate s1 s0 \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 redAfterMinimalRed =
NOT_PRESSED)"
and vc: " env (setVarAny s0 requestButton_value)
requestButton_value \<and>
getPstate (setVarAny s0 requestButton_value)
Ctrl =
minimalRed \<and>
getVarBool (setVarAny s0 requestButton_value)
Ctrl \<and>
MINIMAL_RED_TIME_LIMIT
\<le> ltimeEnv
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl "
by fastforce
have "toEnvP
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))" by auto
moreover from ei1 have " (\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl
\<le> MINIMAL_RED_TIME_LIMIT)"
by auto
moreover from ei2 have " (\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl = green \<and>
ltimeEnv s2 Ctrl =
GREEN_TIME_LIMIT) \<or>
(\<forall>s2. toEnvP s2 \<and> substate s2 s1 \<longrightarrow>
getPstate s2 Ctrl = minimalRed))"
by auto
moreover from ei3 have " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s2 Ctrl = minimalRed \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = minimalRed)"
by auto
moreover from ei4 have " (\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = minimalRed) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
by auto
moreover from ei5 have " (\<forall>s1. toEnvP s1 \<and>
substate s1 (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))
\<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<and>
(\<forall>s3. toEnvP s3 \<and> substate s2 s3 \<and> substate s3 s1 \<and> s2 \<noteq> s3 \<longrightarrow>
getPstate s3 Ctrl = redAfterMinimalRed \<and>
getVarBool s3 redAfterMinimalRed = NOT_PRESSED \<and> getVarBool s3 Ctrl = NOT_PRESSED)))"
by auto
moreover from ei6 have "(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl
\<le> RED_TO_GREEN_TIME_LIMIT)"
by auto
moreover from ei7 have "(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl =
redAfterMinimalRed \<and>
(getVarBool s2 redAfterMinimalRed =
PRESSED \<or>
getVarBool s2 Ctrl = PRESSED)))"
by auto
moreover from ei8 have "(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<longrightarrow>
getPstate s1 Ctrl = redToGreen)"
by auto
moreover from ei9 have "(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = redToGreen) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
by auto
moreover from ei10 have " (\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and>
ltimeEnv s1 Ctrl \<le> GREEN_TIME_LIMIT)"
by auto
moreover from ei11 have " \<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<and>
ltimeEnv s2 Ctrl =
RED_TO_GREEN_TIME_LIMIT)"
by auto
moreover from ei12 have "(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s2 Ctrl = green \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = green)"
by auto
moreover from ei13 have "(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s1 s3 \<and> substate s3 s2 \<longrightarrow>
getPstate s3 Ctrl = green) \<longrightarrow>
toEnvNum s1 s2 =
ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl)"
by auto
moreover from ei14 have " (\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 minimalRed = PRESSED)"
by auto
moreover from ei0 ei15 vc substate_refl have "(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
getPstate s1 Ctrl \<noteq> green \<longrightarrow>
getVarBool s1 minimalRed = NOT_PRESSED)" by auto
moreover from ei16 have "(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<longrightarrow>
getPstate s1 Ctrl = minimalRed \<or>
getPstate s1 Ctrl = redAfterMinimalRed \<or>
getPstate s1 Ctrl = redToGreen \<or>
getPstate s1 Ctrl = green)"
by auto
moreover from ei17 have "(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2 (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
getVarBool s1 Ctrl = NOT_PRESSED)"
by auto
moreover from ei18 have " (\<forall>s2. toEnvP s2 \<and> substate s2
(toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))
\<and> getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = PRESSED \<longrightarrow>
(\<exists>s1. toEnvP s1 \<and>
substate s1 s2 \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and> getVarBool s1 Ctrl = PRESSED))"
by auto
moreover have " (\<forall>s1. toEnvP s1 \<and> substate s1 (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))
\<and> getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED)))"
proof -
from ei0 ei1 vc substate_refl toEnvNum_id have 1: "( toEnvP s0 \<and>
substate s0 (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed)) \<and>
toEnvNum s0 (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))
= Ctrl \<and>
getPstate s0 Ctrl = minimalRed \<and>
ltimeEnv s0 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s0 redAfterMinimalRed \<or> getVarBool (toEnv
(setPstate
(setVarBool
(setVarAny s0 requestButton_value)
redAfterMinimalRed PRESSED)
Ctrl redAfterMinimalRed))
Ctrl = PRESSED))"
by auto
show ?thesis
proof
fix s1
show " toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED))"
proof cases
assume 2: "s1 = (toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed))"
show ?thesis
proof
from 1 2 show "\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and> (getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED)"
by blast
qed
next
assume 2: "s1 \<noteq> (toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed))"
with ei19 show ?thesis by auto
qed
qed
qed
moreover from ei20 have "(\<forall>s1. toEnvP s1 \<and>
substate s1 (toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 redAfterMinimalRed =
NOT_PRESSED)"
by auto
ultimately show "toEnvP
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> MINIMAL_RED_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = minimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and> getPstate s2 Ctrl = green \<and> ltimeEnv s2 Ctrl = GREEN_TIME_LIMIT) \<or>
(\<forall>s2. toEnvP s2 \<and> substate s2 s1 \<longrightarrow> getPstate s2 Ctrl = minimalRed)) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s2 Ctrl = minimalRed \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = minimalRed) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = minimalRed) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<and>
(\<forall>s3. toEnvP s3 \<and> substate s2 s3 \<and> substate s3 s1 \<and> s2 \<noteq> s3 \<longrightarrow>
getPstate s3 Ctrl = redAfterMinimalRed \<and>
getVarBool s3 redAfterMinimalRed = NOT_PRESSED \<and> getVarBool s3 Ctrl = NOT_PRESSED))) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> RED_TO_GREEN_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redToGreen \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s1 Ctrl \<and>
getPstate s2 Ctrl = redAfterMinimalRed \<and>
(getVarBool s2 redAfterMinimalRed = PRESSED \<or> getVarBool s2 Ctrl = PRESSED))) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<and> getPstate s2 Ctrl = redToGreen \<longrightarrow>
getPstate s1 Ctrl = redToGreen) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = redToGreen) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
0 < ltimeEnv s1 Ctrl \<and> ltimeEnv s1 Ctrl \<le> GREEN_TIME_LIMIT) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = ltimeEnv s2 Ctrl \<and>
getPstate s2 Ctrl = redToGreen \<and> ltimeEnv s2 Ctrl = RED_TO_GREEN_TIME_LIMIT)) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s2 Ctrl = green \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl \<longrightarrow>
getPstate s1 Ctrl = green) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
(\<forall>s3. toEnvP s3 \<and> substate s1 s3 \<and> substate s3 s2 \<longrightarrow> getPstate s3 Ctrl = green) \<longrightarrow>
toEnvNum s1 s2 = ltimeEnv s2 Ctrl - ltimeEnv s1 Ctrl) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 minimalRed = PRESSED) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl \<noteq> green \<longrightarrow>
getVarBool s1 minimalRed = NOT_PRESSED) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<longrightarrow>
getPstate s1 Ctrl = minimalRed \<or>
getPstate s1 Ctrl = redAfterMinimalRed \<or> getPstate s1 Ctrl = redToGreen \<or> getPstate s1 Ctrl = green) \<and>
(\<forall>s1 s2.
toEnvP s1 \<and>
toEnvP s2 \<and>
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = NOT_PRESSED \<longrightarrow>
getVarBool s1 Ctrl = NOT_PRESSED) \<and>
(\<forall>s2. toEnvP s2 \<and>
substate s2
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s2 Ctrl = minimalRed \<and> getVarBool s2 redAfterMinimalRed = PRESSED \<longrightarrow>
(\<exists>s1. toEnvP s1 \<and>
substate s1 s2 \<and> toEnvNum s1 s2 < ltimeEnv s2 Ctrl - Ctrl \<and> getVarBool s1 Ctrl = PRESSED)) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = redAfterMinimalRed \<and> getVarBool s1 redAfterMinimalRed \<longrightarrow>
(\<exists>s2. toEnvP s2 \<and>
substate s2 s1 \<and>
toEnvNum s2 s1 = Ctrl \<and>
getPstate s2 Ctrl = minimalRed \<and>
ltimeEnv s2 Ctrl = MINIMAL_RED_TIME_LIMIT \<and>
(getVarBool s2 redAfterMinimalRed \<or> getVarBool s1 Ctrl = PRESSED))) \<and>
(\<forall>s1. toEnvP s1 \<and>
substate s1
(toEnv
(setPstate (setVarBool (setVarAny s0 requestButton_value) redAfterMinimalRed PRESSED) Ctrl
redAfterMinimalRed)) \<and>
getPstate s1 Ctrl = green \<longrightarrow>
getVarBool s1 redAfterMinimalRed = NOT_PRESSED)" by blast
qed |
State Before: α β : Sort u_1
h : α = β
⊢ Bijective (Eq.mpr h) State After: case refl
α : Sort u_1
⊢ Bijective (Eq.mpr (_ : α = α)) Tactic: cases h State Before: case refl
α : Sort u_1
⊢ Bijective (Eq.mpr (_ : α = α)) State After: no goals Tactic: refine ⟨fun _ _ ↦ id, fun x ↦ ⟨x, rfl⟩⟩ |
lemma contractible_UNIV [simp]: "contractible (UNIV :: 'a::real_normed_vector set)" |
[STATEMENT]
lemma map_hd_lem[rule_format] : "n > 0 \<longrightarrow> (f 0 # map (\<lambda>i. f i) [1..<n]) = map (\<lambda>i. f i) [0..<n]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<longrightarrow> f 0 # map f [1..<n] = map f [0..<n]
[PROOF STEP]
by (simp add : hd_map upt_rec) |
[STATEMENT]
lemma divisor_count_asymptotics_aux:
"(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
define R where "R = (\<lambda>x. \<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i))"
[PROOF STATE]
proof (state)
this:
R = (\<lambda>x. \<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i))
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
define S where "S = (\<lambda>x. ln (real (nat \<lfloor>sqrt x\<rfloor>)) - ln x / 2)"
[PROOF STATE]
proof (state)
this:
S = (\<lambda>x. ln (real (nat \<lfloor>sqrt x\<rfloor>)) - ln x / 2)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
have R_bound: "R x \<in> {0..sqrt x}" if x: "x \<ge> 0" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
have "R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)
[PROOF STEP]
unfolding R_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i)) \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)
[PROOF STEP]
by (intro sum_mono frac_le_1)
[PROOF STATE]
proof (state)
this:
R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
from x
[PROOF STATE]
proof (chain)
picking this:
0 \<le> x
[PROOF STEP]
have "\<dots> = of_int \<lfloor>sqrt x\<rfloor>"
[PROOF STATE]
proof (prove)
using this:
0 \<le> x
goal (1 subgoal):
1. (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1) = real_of_int \<lfloor>sqrt x\<rfloor>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1) = real_of_int \<lfloor>sqrt x\<rfloor>
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1) = real_of_int \<lfloor>sqrt x\<rfloor>
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
have "\<dots> \<le> sqrt x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_int \<lfloor>sqrt x\<rfloor> \<le> sqrt x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
real_of_int \<lfloor>sqrt x\<rfloor> \<le> sqrt x
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
R x \<le> sqrt x
[PROOF STEP]
have "R x \<le> sqrt x"
[PROOF STATE]
proof (prove)
using this:
R x \<le> sqrt x
goal (1 subgoal):
1. R x \<le> sqrt x
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
R x \<le> sqrt x
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
R x \<le> sqrt x
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
have "R x \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> R x
[PROOF STEP]
unfolding R_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i))
[PROOF STEP]
by (intro sum_nonneg) simp_all
[PROOF STATE]
proof (state)
this:
0 \<le> R x
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
R x \<le> sqrt x
0 \<le> R x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
R x \<le> sqrt x
0 \<le> R x
goal (1 subgoal):
1. R x \<in> {0..sqrt x}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
R x \<in> {0..sqrt x}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 \<le> ?x \<Longrightarrow> R ?x \<in> {0..sqrt ?x}
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
have R_bound': "norm (R x) \<le> 1 * norm (sqrt x)" if "x \<ge> 0" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. norm (R x) \<le> 1 * norm (sqrt x)
[PROOF STEP]
using R_bound[OF that] that
[PROOF STATE]
proof (prove)
using this:
R x \<in> {0..sqrt x}
0 \<le> x
goal (1 subgoal):
1. norm (R x) \<le> 1 * norm (sqrt x)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 \<le> ?x \<Longrightarrow> norm (R ?x) \<le> 1 * norm (sqrt ?x)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
have R_bigo: "R \<in> O(sqrt)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R \<in> O(sqrt)
[PROOF STEP]
using eventually_ge_at_top[of "0::real"]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) 0) at_top
goal (1 subgoal):
1. R \<in> O(sqrt)
[PROOF STEP]
by (intro bigoI[of _ 1], elim eventually_mono) (rule R_bound')
[PROOF STATE]
proof (state)
this:
R \<in> O(sqrt)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
have "eventually (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x) at_top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
using eventually_ge_at_top[of "0 :: real"]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) 0) at_top
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
proof eventually_elim
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
case (elim x)
[PROOF STATE]
proof (state)
this:
0 \<le> x
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
have "sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. of_int \<lfloor>x / real i\<rfloor>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>)
[PROOF STEP]
using elim
[PROOF STATE]
proof (prove)
using this:
0 \<le> x
goal (1 subgoal):
1. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>)
[PROOF STEP]
by (simp add: sum_upto_altdef)
[PROOF STATE]
proof (state)
this:
sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>)
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>)
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
have "\<dots> = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>) = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x
[PROOF STEP]
by (simp add: sum_subtractf frac_def R_def sum_distrib_left)
[PROOF STATE]
proof (state)
this:
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>) = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. real_of_int \<lfloor>x / real i\<rfloor>) = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
have "{0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
{0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
{0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
have "(\<Sum>i\<in>\<dots>. 1 / real i) = harm (nat \<lfloor>sqrt x\<rfloor>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 1..nat \<lfloor>sqrt x\<rfloor>. 1 / real i) = harm (nat \<lfloor>sqrt x\<rfloor>)
[PROOF STEP]
by (simp add: harm_def divide_simps)
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 1..nat \<lfloor>sqrt x\<rfloor>. 1 / real i) = harm (nat \<lfloor>sqrt x\<rfloor>)
goal (1 subgoal):
1. \<And>x. 0 \<le> x \<Longrightarrow> sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
goal (1 subgoal):
1. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in at_top. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
hence "(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in>
\<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (nat \<lfloor>sqrt x\<rfloor>) + euler_mascheroni)) - R x + x * S x)"
(is "_ \<in> \<Theta>(?A)")
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in at_top. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) = x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> \<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) - R x + x * S x)
[PROOF STEP]
by (intro bigthetaI_cong) (elim eventually_mono, simp_all add: algebra_simps S_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> \<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) - R x + x * S x)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> \<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) - R x + x * S x)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
have "?A \<in> O(sqrt)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) - R x + x * S x) \<in> O(sqrt)
[PROOF STEP]
proof (intro sum_in_bigo)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (real_of_int \<lfloor>sqrt x\<rfloor>))
[PROOF STEP]
by (intro bigthetaI_cong eventually_mono [OF eventually_ge_at_top[of "1::real"]])
(auto simp: S_def ln_sqrt)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (real_of_int \<lfloor>sqrt x\<rfloor>))
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (real_of_int \<lfloor>sqrt x\<rfloor>))
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / sqrt x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (sqrt x) - ln (real_of_int \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / sqrt x)
[PROOF STEP]
by (rule landau_o.big.compose[OF ln_minus_ln_floor_bigo sqrt_at_top])
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (sqrt x) - ln (real_of_int \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / sqrt x)
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. - S x) \<in> O(\<lambda>x. 1 / sqrt x)
[PROOF STEP]
have "(\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. - S x) \<in> O(\<lambda>x. 1 / sqrt x)
goal (1 subgoal):
1. (\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))
[PROOF STEP]
by (intro landau_o.big.mult) simp_all
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. x * (1 / sqrt x)) \<in> \<Theta>(sqrt)
[PROOF STEP]
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::real"]])
(auto simp: field_simps)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (1 / sqrt x)) \<in> \<Theta>(sqrt)
goal (3 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
3. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
show "(\<lambda>x. x * S x) \<in> O(sqrt)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. x * S x) \<in> O(sqrt)
goal (1 subgoal):
1. (\<lambda>x. x * S x) \<in> O(sqrt)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * S x) \<in> O(sqrt)
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
let ?f = "\<lambda>x::real. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
have "?f \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))
[PROOF STEP]
proof (rule landau_o.big.compose[of _ _ _ "\<lambda>x. nat \<lfloor>sqrt x\<rfloor>"])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O[?F](\<lambda>a. 1 / real a)
2. LIM x at_top. nat \<lfloor>sqrt x\<rfloor> :> ?F
[PROOF STEP]
show "filterlim (\<lambda>x::real. nat \<lfloor>sqrt x\<rfloor>) at_top at_top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x at_top. nat \<lfloor>sqrt x\<rfloor> :> sequentially
[PROOF STEP]
by (intro filterlim_compose[OF filterlim_nat_sequentially]
filterlim_compose[OF filterlim_floor_sequentially] sqrt_at_top)
[PROOF STATE]
proof (state)
this:
LIM x at_top. nat \<lfloor>sqrt x\<rfloor> :> sequentially
goal (1 subgoal):
1. (\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)
[PROOF STEP]
show "(\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)
[PROOF STEP]
by (rule harm_expansion_bigo_simple2)
[PROOF STATE]
proof (state)
this:
(\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>x. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))
[PROOF STEP]
proof (rule bigoI[of _ 1], use eventually_ge_at_top[of 2] in eventually_elim)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. 2 \<le> x \<Longrightarrow> norm (1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<le> 1 * norm (1 / (sqrt x - 1))
[PROOF STEP]
case (elim x)
[PROOF STATE]
proof (state)
this:
2 \<le> x
goal (1 subgoal):
1. \<And>x. 2 \<le> x \<Longrightarrow> norm (1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<le> 1 * norm (1 / (sqrt x - 1))
[PROOF STEP]
have "sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>
goal (1 subgoal):
1. \<And>x. 2 \<le> x \<Longrightarrow> norm (1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<le> 1 * norm (1 / (sqrt x - 1))
[PROOF STEP]
with elim
[PROOF STATE]
proof (chain)
picking this:
2 \<le> x
sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
2 \<le> x
sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>
goal (1 subgoal):
1. norm (1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<le> 1 * norm (1 / (sqrt x - 1))
[PROOF STEP]
by (simp add: field_simps)
[PROOF STATE]
proof (state)
this:
norm (1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<le> 1 * norm (1 / (sqrt x - 1))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x::real. 1 / (sqrt x - 1)) \<in> O(\<lambda>x. 1 / sqrt x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1 / (sqrt x - 1)) \<in> O(\<lambda>x. 1 / sqrt x)
[PROOF STEP]
by (rule landau_o.big.compose[OF _ sqrt_at_top]) simp_all
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1 / (sqrt x - 1)) \<in> O(\<lambda>x. 1 / sqrt x)
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) \<in> O(\<lambda>x. 1 / sqrt x)
[PROOF STEP]
have "(\<lambda>x. x * ?f x) \<in> O(\<lambda>x. x * (1 / sqrt x))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) \<in> O(\<lambda>x. 1 / sqrt x)
goal (1 subgoal):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(\<lambda>x. x * (1 / sqrt x))
[PROOF STEP]
by (intro landau_o.big.mult landau_o.big_refl)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(\<lambda>x. x * (1 / sqrt x))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(\<lambda>x. x * (1 / sqrt x))
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. x * (1 / sqrt x)) \<in> \<Theta>(sqrt)
[PROOF STEP]
by (intro bigthetaI_cong eventually_mono[OF eventually_gt_at_top[of "0::real"]])
(auto elim!: eventually_mono simp: field_simps)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (1 / sqrt x)) \<in> \<Theta>(sqrt)
goal (2 subgoals):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
2. R \<in> O(sqrt)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
[PROOF STEP]
show "(\<lambda>x. x * ?f x) \<in> O(sqrt)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
goal (1 subgoal):
1. (\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni))) \<in> O(sqrt)
goal (1 subgoal):
1. R \<in> O(sqrt)
[PROOF STEP]
qed fact+
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)) - R x + x * S x) \<in> O(sqrt)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
goal (1 subgoal):
1. (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) - (x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)
goal:
No subgoals!
[PROOF STEP]
qed |
(*
Copyright 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
theory readyqueues_remove_mem
imports tasks
begin
text \<open>Up to two locales per function in the binary.\<close>
locale readyqueues_remove_function = tasks_context +
fixes rsp\<^sub>0 rbp\<^sub>0 a task readyqueues_remove_ret :: \<open>64 word\<close>
and core_id :: \<open>32 word\<close>
and v\<^sub>0 :: \<open>8 word\<close>
and blocks :: \<open>(nat \<times> 64 word \<times> nat) set\<close>
assumes seps: \<open>seps blocks\<close>
and masters\<^sub>2:
\<open>master blocks (a, 1) 0\<close>
\<open>master blocks (rsp\<^sub>0, 8) 1\<close>
\<open>master blocks (rsp\<^sub>0-8, 8) 2\<close>
\<open>master blocks (rsp\<^sub>0-16, 8) 3\<close>
\<open>master blocks (rsp\<^sub>0-28, 4) 4\<close>
\<open>master blocks (rsp\<^sub>0-40, 8) 5\<close>
\<open>master blocks (task + 41, 1) 6\<close> \<comment> \<open>\texttt{task->prio}\<close>
\<open>master blocks (the (label_to_address assembly ''readyqueues'') + 24 + (ucast core_id + (ucast core_id << 3) << 6), 4) 7\<close>
and ret_address: \<open>outside readyqueues_remove_ret 649 848\<close> \<comment> \<open>Only works for non-recursive functions.\<close>
begin
text \<open>Need the exact format used in the below proofs, so doing this icky explicit simplification\<close>
lemma boffset_add_commute[simp]: \<open>boffset + x + y = boffset + (x + y)\<close>
by simp
lemmas masters = masters\<^sub>2[simplified]
declare boffset_add_commute[simp del]
text \<open>
The Floyd invariant expresses for some locations properties that are invariably true.
Simply expresses that a byte in the memory remains untouched.
\<close>
definition pp_\<Theta> :: floyd_invar where
\<open>pp_\<Theta> \<equiv> [
\<comment> \<open>precondition\<close>
boffset+649 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0
\<and> regs \<sigma> rbp = rbp\<^sub>0
\<and> \<langle>31,0\<rangle>regs \<sigma> rdi = core_id
\<and> regs \<sigma> rsi = task
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+readyqueues_remove_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
boffset+740 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-40
\<and> regs \<sigma> rbp = rsp\<^sub>0-8
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-28,4] = core_id
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-40,8] = task
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+readyqueues_remove_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
boffset+745 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-40
\<and> regs \<sigma> rbp = rsp\<^sub>0-8
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-28,4] = core_id
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-40,8] = task
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+readyqueues_remove_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
boffset+845 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-40
\<and> regs \<sigma> rbp = rsp\<^sub>0-8
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-28,4] = core_id
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-40,8] = task
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+readyqueues_remove_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
\<comment> \<open>postcondition\<close>
boffset+readyqueues_remove_ret \<mapsto> \<lambda>\<sigma>. \<sigma> \<turnstile> *[a,1] = v\<^sub>0
\<and> regs \<sigma> rsp = rsp\<^sub>0+8
\<and> regs \<sigma> rbp = rbp\<^sub>0
]\<close>
text \<open>Adding some rules to the simplifier to simplify proofs.\<close>
schematic_goal pp_\<Theta>_zero[simp]:
shows \<open>pp_\<Theta> boffset = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_l[simp]:
shows \<open>pp_\<Theta> (n + boffset) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_r[simp]:
shows \<open>pp_\<Theta> (boffset + n) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
end
locale readyqueues_remove_function_calls = readyqueues_remove_function +
fixes task_list_remove_task :: \<open>state \<Rightarrow> state\<close>
assumes task_list_remove_task_def: \<open>exec_instr assembly semantics tasks_flag_annotation (Unary (IS_8088 Call) (Immediate (ImmLabel ''task_list_remove_task''))) 5 = task_list_remove_task\<close>
and task_list_remove_task_incr: \<open>regs (task_list_remove_task \<sigma>) rip = regs \<sigma> rip + 5\<close>
and task_list_remove_task740: \<open>the (pp_\<Theta> (boffset+740)) \<sigma> \<Longrightarrow> the (pp_\<Theta> (boffset+745)) (task_list_remove_task \<sigma>)\<close>
begin
lemma rewrite_readyqueues_remove_mem:
\<open>is_std_invar readyqueues_remove_ret (floyd.invar readyqueues_remove_ret pp_\<Theta>)\<close>
text \<open>Boilerplate code to start the VCG\<close>
apply (rule floyd_invarI)
apply (rewrite at \<open>floyd_vcs readyqueues_remove_ret \<hole> _\<close> pp_\<Theta>_def)
apply (intro floyd_vcsI)
text \<open>Subgoal for rip = boffset+649\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
done
text \<open>Subgoal for rip = boffset+740\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address)
text \<open>Discharge function call\<close>
apply (rule wps_rls)
apply (simp (no_asm_simp) add: lookup_table_def instr_index_def entry_size_def)
apply (rewrite_one_let' add: assembly_def)
apply (rewrite_one_let' add: task_list_remove_task_def)
apply (rule floyd_invarI'[of _ _ \<open>the (pp_\<Theta> (boffset+745))\<close>])
apply (simp add: task_list_remove_task_incr)
using task_list_remove_task740
apply simp
done
text \<open>Subgoal for rip = boffset+745\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address)
text \<open>Apply VCG/symb.\ execution\<close>
apply (symbolic_execution masters: masters)+
apply (finish_symbolic_execution masters: masters)
apply restart_symbolic_execution
apply (symbolic_execution masters: masters)+
subgoal premises prems\<^sub>2
proof -
have x: \<open>\<sigma> \<turnstile> *[regs \<sigma> rbp - 20,4] = core_id\<close> \<comment> \<open>Not sure why this isn't done automatically, it is for readyqueues_push_back.\<close>
using prems\<^sub>2
by auto
show ?thesis
apply (insert prems\<^sub>2)
apply (finish_symbolic_execution add: x masters: masters)
done
qed
done
text \<open>Subgoal for rip = boffset+845\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
done
text \<open>Trivial ending subgoal.\<close>
subgoal
by simp
done
end
end
|
(*************************************************************************
* Copyright (C)
* 2019 The University of Exeter
* 2018-2019 The University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
theory
Attributes
imports
"Isabelle_DOF-Unit-Tests_document"
"Isabelle_DOF-Ontologies.Conceptual"
Concept_MonitorTest1
begin
ML\<open>@{assert} (1 = 1)\<close>
section\<open>Elementar Creation of Doc-items and Access of their Attibutes\<close>
text\<open>Current status:\<close>
print_doc_classes
print_doc_items
(* this corresponds to low-level accesses : *)
ML\<open>
val docitem_tab = DOF_core.get_instances \<^context>
val isa_transformer_tab = DOF_core.get_isa_transformers \<^context>
val docclass_tab = DOF_core.get_onto_classes @{context};
\<close>
ML\<open>
map fst (Name_Space.dest_table docitem_tab);
Name_Space.dest_table docclass_tab;
\<close>
find_theorems (60) name:"Conceptual.M."
value [simp]"M.trace(M.make undefined [] ())"
value "M.ok(M.make undefined_AAA [] ())"
value "M.trace(M.make undefined_AAA [] ())"
value "M.tag_attribute(M.make undefined_AAA [] ())"
value "M.ok(M.make 0 [] ())"
(*
value "ok(M.make undefined [] ())"
value "ok(M.make 0 [] undefined)"
*)
value [simp] \<open> M.ok
(Conceptual.M.trace_update (\<lambda>x. [])
(Conceptual.M.tag_attribute_update (\<lambda>x. 0)
(Conceptual.M.ok_update (\<lambda>x. ())
(undefined::M))
))\<close>
value [simp] \<open> M.ok
(Conceptual.M.trace_update (\<lambda>x. [])
(Conceptual.M.tag_attribute_update (\<lambda>x. 0)
(Conceptual.M.ok_update (\<lambda>x. ())
(undefined::M))
))\<close>
value \<open> M.ok
(Conceptual.M.trace_update (\<lambda>x. [])
(Conceptual.M.tag_attribute_update (\<lambda>x. 0)
(Conceptual.M.ok_update (\<lambda>x. ())
(AAAA::M))
))\<close>
value \<open> M.ok
(Conceptual.M.trace_update (\<lambda>x. [])
(Conceptual.M.tag_attribute_update (\<lambda>x. 0)
(Conceptual.M.ok_update (\<lambda>x. ())
(M.make XX1 XX2 XX3::M))
))\<close>
text\<open>A text item containing standard theorem antiquotations and complex meta-information.\<close>
(* crashes in batch mode ...
text*[dfgdfg::B, Conceptual.B.x ="''f''", y = "[''sdf'']"]\<open> Lorem ipsum ... @{thm refl} \<close>
*)
text*[dfgdfg::B]\<open> Lorem ipsum ... @{thm refl} \<close>
text\<open>document class declarations lead also HOL-type declarations (relevant for ontological links).\<close>
typ "C"
typ "D"
text\<open> ... as well as HOL-constant declarations (relevant for monitor rexps and tracres.).\<close>
term "C"
text\<open>Voila what happens on the ML level:\<close>
ML\<open>val Type("Conceptual.B.B_ext",[Type("Conceptual.C.C_ext",t)]) = @{typ "C"};
val @{typ "D"} = Value_Command.Docitem_Parser.cid_2_cidType "Conceptual.D" @{theory};
val @{typ "E"} = Value_Command.Docitem_Parser.cid_2_cidType "Conceptual.E" @{theory};
\<close>
text*[dfgdfg2::C, z = "None"]\<open> Lorem ipsum ... @{thm refl} \<close>
text*[omega::E, x = "''def''"]\<open> Lorem ipsum ... @{thm refl} \<close>
text\<open> As mentioned in @{docitem \<open>dfgdfg\<close>} \<close>
text\<open>Here is a simulation what happens on the level of the (HOL)-term representation:\<close>
typ \<open>'a A_scheme\<close>
typ \<open>A\<close>
term "A.x (undefined\<lparr>A.x := 3\<rparr>)"
term "B.x ((undefined::C)\<lparr>B.y := [''sdf'']\<rparr>)"
term "C.z ((undefined::C)\<lparr>B.y := [''sdf''], z:= Some undefined\<rparr>)"
ML\<open>
val SOME {def_occurrence = "Conceptual.A", long_name = "Conceptual.A.x", typ = t, def_pos}
= DOF_core.get_attribute_info "Conceptual.A" "x" @{theory};
DOF_core.get_attribute_info "Conceptual.B" "x" @{theory};
DOF_core.get_attribute_info "Conceptual.B" "y" @{theory};
DOF_core.get_attribute_info "Conceptual.C" "x" @{theory};
val SOME {def_occurrence = "Conceptual.C", long_name = "Conceptual.B.y", typ = t', def_pos}
= DOF_core.get_attribute_info "Conceptual.C" "y" @{theory};
(* this is the situation where an attribute is defined in C, but due to inheritance
from B, where it is firstly declared which results in a different long_name. *)
DOF_core.get_attribute_info "Conceptual.C" "z" @{theory};
\<close>
ML\<open>
DOF_core.value_of "sdf" \<^theory>;
DOF_core.value_of "sdfg" \<^theory>;
DOF_core.value_of "dfgdfg" \<^theory>;
DOF_core.value_of "omega" \<^theory>;
\<close>
text\<open>A not too trivial test: default y -> [].
At creation : x -> "f", y -> "sdf".
The latter wins at access time.
Then @{term "t"}: creation of a multi inheritance object omega,
triple updates, the last one wins.\<close>
ML\<open>val s = map HOLogic.dest_string (HOLogic.dest_list @{docitem_attribute y::dfgdfg});
val t = HOLogic.dest_string (@{docitem_attribute x::omega}); \<close>
section\<open>Mutation of Attibutes in DocItems\<close>
ML\<open> val Const("Groups.zero_class.zero", @{typ "int"}) = @{docitem_attribute a2::omega} \<close>
update_instance*[omega::E, a2+="1"]
ML\<open> val (s as Const("Groups.one_class.one", @{typ "int"}))= @{docitem_attribute a2 :: omega} \<close>
update_instance*[omega::E, a2+="6"]
ML\<open> @{docitem_attribute a2::omega};
val s = HOLogic.dest_number @{docitem_attribute a2::omega} \<close>
type_synonym ALFACENTAURI = E
update_instance*[omega::ALFACENTAURI, x+="''inition''"]
ML\<open> val s = HOLogic.dest_string ( @{docitem_attribute x::omega}) \<close>
update_instance*[omega::E, y+="[''defini'',''tion'']"]
update_instance*[omega::E, y+="[''en'']"]
ML\<open> val s = map HOLogic.dest_string (HOLogic.dest_list @{docitem_attribute y::omega}); \<close>
subsection\<open> Example text antiquotation:\<close>
text\<open> @{docitem_attribute y::omega} \<close>
section\<open>Simulation of a Monitor\<close>
declare[[free_class_in_monitor_checking]]
open_monitor*[figs1::figure_group,
caption="''Sample ''"]
ML\<open>val monitor_infos = DOF_core.get_monitor_infos \<^context>\<close>
text*[testFreeA::A]\<open>\<close>
figure*[fig_A::figure, spawn_columns=False,
relative_width="90",
src="''figures/A.png''"]
\<open> The A train \ldots \<close>
figure*[fig_B::figure,
spawn_columns=False,relative_width="90",
src="''figures/B.png''"]
\<open> The B train \ldots \<close>
open_monitor*[figs2::figure_group,
caption="''Sample ''"]
ML\<open>val monitor_infos = DOF_core.get_monitor_infos \<^context>\<close>
figure*[fig_C::figure, spawn_columns=False,
relative_width="90",
src="''figures/A.png''"]
\<open> The C train \ldots \<close>
open_monitor*[figs3::figure_group,
caption="''Sample ''"]
ML\<open>val monitor_infos = DOF_core.get_monitor_infos \<^context>\<close>
figure*[fig_D::figure,
spawn_columns=False,relative_width="90",
src="''figures/B.png''"]
\<open> The D train \ldots \<close>
close_monitor*[figs3]
open_monitor*[figs4::figure_group,
caption="''Sample ''"]
ML\<open>val monitor_infos = DOF_core.get_monitor_infos \<^context>\<close>
text*[testRejected1::figure_group, caption="''document/figures/A.png''"]
\<open> The A train \ldots \<close>
figure*[fig_E::figure,
spawn_columns=False,relative_width="90",
src="''figures/B.png''"]
\<open> The E train \ldots \<close>
close_monitor*[figs4]
close_monitor*[figs2]
text*[testRejected2::figure_group, caption="''document/figures/A.png''"]
\<open> The A train \ldots \<close>
close_monitor*[figs1]
declare[[free_class_in_monitor_checking = false]]
text\<open>Resulting trace of figs1 as ML antiquotation: \<close>
ML \<open>@{trace_attribute figs1}\<close>
text\<open>Resulting trace of figs as text antiquotation:\<close>
text\<open>@{trace_attribute figs1}\<close>
section\<open>A Complex Evaluation involving Automatas\<close>
text\<open>Test trace\_attribute term antiquotation:\<close>
term*\<open>map snd @{trace-attribute \<open>figs1\<close>}\<close>
value*\<open>map snd @{trace-attribute \<open>figs1\<close>}\<close>
definition example_expression where "example_expression \<equiv> \<lbrace>\<lfloor>''Conceptual.A''\<rfloor> || \<lfloor>''Conceptual.F''\<rfloor>\<rbrace>\<^sup>*"
value* \<open> DA.accepts (na2da (rexp2na example_expression)) (map fst @{trace-attribute \<open>aaa\<close>}) \<close>
definition word_test :: "'a list \<Rightarrow> 'a rexp \<Rightarrow> bool" (infix "is-in" 60)
where " w is-in rexp \<equiv> DA.accepts (na2da (rexp2na rexp)) (w)"
value* \<open> (map fst @{trace-attribute \<open>aaa\<close>}) is-in example_expression \<close>
(*<*)
text\<open>Final Status:\<close>
print_doc_items
print_doc_classes
end
(*>*)
|
[STATEMENT]
lemma uniqueness_of_types: "
(\<forall> (E::'a prog \<times> (vname \<Rightarrow> ty option)) T1 T2.
E\<turnstile>e :: T1 \<longrightarrow> E\<turnstile>e :: T2 \<longrightarrow> T1 = T2) \<and>
(\<forall> (E::'a prog \<times> (vname \<Rightarrow> ty option)) Ts1 Ts2.
E\<turnstile>es [::] Ts1 \<longrightarrow> E\<turnstile>es [::] Ts2 \<longrightarrow> Ts1 = Ts2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>E T1 T2. E \<turnstile> e :: T1 \<longrightarrow> E \<turnstile> e :: T2 \<longrightarrow> T1 = T2) \<and> (\<forall>E Ts1 Ts2. E \<turnstile> es [::] Ts1 \<longrightarrow> E \<turnstile> es [::] Ts2 \<longrightarrow> Ts1 = Ts2)
[PROOF STEP]
apply (rule compat_expr_expr_list.induct)
[PROOF STATE]
proof (prove)
goal (11 subgoals):
1. \<And>x. \<forall>E T1 T2. E \<turnstile> NewC x :: T1 \<longrightarrow> E \<turnstile> NewC x :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> Cast x1 x2 :: T1 \<longrightarrow> E \<turnstile> Cast x1 x2 :: T2 \<longrightarrow> T1 = T2
3. \<And>x. \<forall>E T1 T2. E \<turnstile> Lit x :: T1 \<longrightarrow> E \<turnstile> Lit x :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> BinOp x1 x2 x3 :: T1 \<longrightarrow> E \<turnstile> BinOp x1 x2 x3 :: T2 \<longrightarrow> T1 = T2
5. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
7. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
8. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
9. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
10. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
A total of 11 subgoals...
[PROOF STEP]
apply strip_case_simp
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> Cast x1 x2 :: T1 \<longrightarrow> E \<turnstile> Cast x1 x2 :: T2 \<longrightarrow> T1 = T2
2. \<And>x. \<forall>E T1 T2. E \<turnstile> Lit x :: T1 \<longrightarrow> E \<turnstile> Lit x :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> BinOp x1 x2 x3 :: T1 \<longrightarrow> E \<turnstile> BinOp x1 x2 x3 :: T2 \<longrightarrow> T1 = T2
4. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
7. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
8. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
9. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
10. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply strip_case_simp
[PROOF STATE]
proof (prove)
goal (9 subgoals):
1. \<And>x. \<forall>E T1 T2. E \<turnstile> Lit x :: T1 \<longrightarrow> E \<turnstile> Lit x :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2 x3. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> BinOp x1 x2 x3 :: T1 \<longrightarrow> E \<turnstile> BinOp x1 x2 x3 :: T2 \<longrightarrow> T1 = T2
3. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
7. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
8. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
9. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply strip_case_simp
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>x1 x2 x3. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> BinOp x1 x2 x3 :: T1 \<longrightarrow> E \<turnstile> BinOp x1 x2 x3 :: T2 \<longrightarrow> T1 = T2
2. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
7. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
8. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (intro strip)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>x1 x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> BinOp x1 x2 x3 :: T1; E \<turnstile> BinOp x1 x2 x3 :: T2\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
7. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
8. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (rename_tac binop x2 x3 E T1 T2, case_tac binop)
[PROOF STATE]
proof (prove)
goal (9 subgoals):
1. \<And>binop x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> BinOp binop x2 x3 :: T1; E \<turnstile> BinOp binop x2 x3 :: T2; binop = Eq\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>binop x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> BinOp binop x2 x3 :: T1; E \<turnstile> BinOp binop x2 x3 :: T2; binop = Add\<rbrakk> \<Longrightarrow> T1 = T2
3. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
7. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
8. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
9. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply ty_case_simp
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>binop x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x3 :: T1 \<longrightarrow> E \<turnstile> x3 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> BinOp binop x2 x3 :: T1; E \<turnstile> BinOp binop x2 x3 :: T2; binop = Add\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
6. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
7. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
8. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply ty_case_simp
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>x. \<forall>E T1 T2. E \<turnstile> LAcc x :: T1 \<longrightarrow> E \<turnstile> LAcc x :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
5. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
6. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
7. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (strip_case_simp)
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<And>x1 x2. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> x1::=x2 :: T1 \<longrightarrow> E \<turnstile> x1::=x2 :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
4. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
5. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
6. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (strip_case_simp)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x1 x2 x3. \<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2 \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3 :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
4. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
5. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (intro strip)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x1 x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> {x1}x2..x3 :: T1; E \<turnstile> {x1}x2..x3 :: T2\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
4. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
5. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (drule FAcc_invers)+
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x1 x2 x3 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<exists>C. E \<turnstile> x2 :: Class C \<and> field (fst E, C) x3 = Some (x1, T1); \<exists>C. E \<turnstile> x2 :: Class C \<and> field (fst E, C) x3 = Some (x1, T2)\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
3. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
4. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
5. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x1 x2 x3 x4. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3:=x4 :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3:=x4 :: T2 \<longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
3. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
4. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (intro strip)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x1 x2 x3 x4 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> {x1}x2..x3:=x4 :: T1; E \<turnstile> {x1}x2..x3:=x4 :: T2\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
3. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
4. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (drule FAss_invers)+
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x1 x2 x3 x4 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2; \<exists>T. E \<turnstile> {x1}x2..x3 :: T \<and> E \<turnstile> x4 :: T1 \<and> fst E \<turnstile> T1 \<preceq> T; \<exists>T. E \<turnstile> {x1}x2..x3 :: T \<and> E \<turnstile> x4 :: T2 \<and> fst E \<turnstile> T2 \<preceq> T\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
3. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
4. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (elim conjE exE)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x1 x2 x3 x4 E T1 T2 T Ta. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> {x1}x2..x3 :: T; E \<turnstile> x4 :: T1; fst E \<turnstile> T1 \<preceq> T; E \<turnstile> {x1}x2..x3 :: Ta; E \<turnstile> x4 :: T2; fst E \<turnstile> T2 \<preceq> Ta\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
3. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
4. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (drule FAcc_invers)+
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x1 x2 x3 x4 E T1 T2 T Ta. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E T1 T2. E \<turnstile> x4 :: T1 \<longrightarrow> E \<turnstile> x4 :: T2 \<longrightarrow> T1 = T2; E \<turnstile> x4 :: T1; fst E \<turnstile> T1 \<preceq> T; E \<turnstile> x4 :: T2; fst E \<turnstile> T2 \<preceq> Ta; \<exists>C. E \<turnstile> x2 :: Class C \<and> field (fst E, C) x3 = Some (x1, T); \<exists>C. E \<turnstile> x2 :: Class C \<and> field (fst E, C) x3 = Some (x1, Ta)\<rbrakk> \<Longrightarrow> T1 = T2
2. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
3. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
4. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x1 x2 x3 x4 x5. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E T1 T2. E \<turnstile> {x1}x2..x3( {x4}x5) :: T1 \<longrightarrow> E \<turnstile> {x1}x2..x3( {x4}x5) :: T2 \<longrightarrow> T1 = T2
2. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
3. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (intro strip)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x1 x2 x3 x4 x5 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2; E \<turnstile> {x1}x2..x3( {x4}x5) :: T1; E \<turnstile> {x1}x2..x3( {x4}x5) :: T2\<rbrakk> \<Longrightarrow> T1 = T2
2. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
3. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (drule Call_invers)+
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x1 x2 x3 x4 x5 E T1 T2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x2 :: T1 \<longrightarrow> E \<turnstile> x2 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x5 [::] Ts1 \<longrightarrow> E \<turnstile> x5 [::] Ts2 \<longrightarrow> Ts1 = Ts2; \<exists>pTs md. E \<turnstile> x2 :: Class x1 \<and> E \<turnstile> x5 [::] pTs \<and> max_spec (fst E) x1 (x3, pTs) = {((md, T1), x4)}; \<exists>pTs md. E \<turnstile> x2 :: Class x1 \<and> E \<turnstile> x5 [::] pTs \<and> max_spec (fst E) x1 (x3, pTs) = {((md, T2), x4)}\<rbrakk> \<Longrightarrow> T1 = T2
2. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
3. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<forall>E Ts1 Ts2. E \<turnstile> [] [::] Ts1 \<longrightarrow> E \<turnstile> [] [::] Ts2 \<longrightarrow> Ts1 = Ts2
2. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (strip_case_simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x1 x2. \<lbrakk>\<forall>E T1 T2. E \<turnstile> x1 :: T1 \<longrightarrow> E \<turnstile> x1 :: T2 \<longrightarrow> T1 = T2; \<forall>E Ts1 Ts2. E \<turnstile> x2 [::] Ts1 \<longrightarrow> E \<turnstile> x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2\<rbrakk> \<Longrightarrow> \<forall>E Ts1 Ts2. E \<turnstile> x1 # x2 [::] Ts1 \<longrightarrow> E \<turnstile> x1 # x2 [::] Ts2 \<longrightarrow> Ts1 = Ts2
[PROOF STEP]
apply (strip_case_simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
MODULE generic_procedure_module
!
! Purpose:
! To define the derived data type for 2D vectors,
! plus two generic bound procedures.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 12/27/06 S. J. Chapman Original code
!
IMPLICIT NONE
! Declare type vector
TYPE :: vector
REAL :: x ! X value
REAL :: y ! Y value
CONTAINS
GENERIC :: add => vector_plus_vector, vector_plus_scalar
PROCEDURE,PASS :: vector_plus_vector
PROCEDURE,PASS :: vector_plus_scalar
END TYPE vector
! Add procedures
CONTAINS
TYPE (vector) FUNCTION vector_plus_vector ( this, v2 )
!
! Purpose:
! To add two vectors.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 12/27/06 S. J. Chapman Original code
!
IMPLICIT NONE
! Data dictionary: declare calling parameter types & definitions
CLASS(vector),INTENT(IN) :: this ! First vector
CLASS(vector),INTENT(IN) :: v2 ! Second vector
! Add the vectors
vector_plus_vector%x = this%x + v2%x
vector_plus_vector%y = this%y + v2%y
END FUNCTION vector_plus_vector
TYPE (vector) FUNCTION vector_plus_scalar ( this, s )
!
! Purpose:
! To add a vector and a scalar.
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 12/27/06 S. J. Chapman Original code
!
IMPLICIT NONE
! Data dictionary: declare calling parameter types & definitions
CLASS(vector),INTENT(IN) :: this ! First vector
REAL,INTENT(IN) :: s ! Scalar
! Add the points
vector_plus_scalar%x = this%x + s
vector_plus_scalar%y = this%y + s
END FUNCTION vector_plus_scalar
END MODULE generic_procedure_module
|
module Main
main : IO ()
main = putStrLn (?convert 'x')
|
public export
record Rec where
x : Int
f : Rec
f = {x := 4} ?foo
|
#include <boost/utility/addressof.hpp>
#include <string>
#include <iostream>
struct animal
{
std::string name;
int legs;
int operator&() const { return legs; }
};
int main()
{
animal a{"cat", 4};
std::cout << &a << '\n';
std::cout << boost::addressof(a) << '\n';
} |
section \<open>Overview\<close>
text \<open>
Computing the maximal strongly connected components (SCCs) of a
finite directed graph is a celebrated problem in the
theory of graph algorithms. Although Tarjan's algorithm~\cite{tarjan:depth-first}
is perhaps the best-known solution, there are many others. In his PhD
thesis, Bloemen~\cite{bloemen:strong} presents an algorithm that is itself based
on earlier algorithms by Munro~\cite{munro:efficient} and
Dijkstra~\cite{dijkstra:finding}. Just like these algorithms, Bloemen's
solution is based on enumerating SCCs in a depth-first traversal of the graph.
Gabow's algorithm that has already been formalized in Isabelle~\cite{lammich:gabow}
also falls into this category of solutions.
Nevertheless, Bloemen goes on to present a parallel variant of the algorithm
suitable for execution on multi-core processors, based on clever data structures
that minimize locking.
In the following, we encode the sequential version of the algorithm in the
proof assistant Isabelle/HOL, and prove its correctness. Bloemen's thesis
briefly and informally explains why the algorithm is correct. Our proof expands
on these arguments, making them completely formal. The encoding is based on
a direct representation of the algorithm as a pair of mutually recursive
functions; we are not aiming at extracting executable code.
\<close>
theory Graph
imports Main
begin
text \<open>
The record below represents the variables of the
algorithm. Most variables correspond to those used in
Bloemen's presentation. Thus, the variable @{text \<S>}
associates to every node the set of nodes that have
already been determined to be part of the same SCC. A core
invariant of the algorithm will be that this mapping represents
equivalence classes of nodes: for all nodes @{text v} and @{text w},
we maintain the relationship
@{text "v \<in> \<S> w \<longleftrightarrow> \<S> v = \<S> w."}
In an actual implementation of this algorithm, this variable
could conveniently be represented by a union-find structure.
Variable @{text stack} holds the list of roots of these
(not yet maximal) SCCs, in depth-first order,
@{text visited} and @{text explored}
represent the nodes that have already been seen, respectively
that have been completely explored, by the algorithm, and
@{text sccs} is the set of maximal SCCs that the algorithm
has found so far.
Additionally, the record holds some auxiliary variables that
are used in the proof of correctness. In particular,
@{text root} denotes the node on which the algorithm was called,
@{text cstack} represents the call stack of the recursion of
function @{text dfs},
and @{text vsuccs} stores the successors of each node
that have already been visited by the function @{text dfss}
that loops over all successors of a given node.
\<close>
record 'v env =
root :: "'v"
\<S> :: "'v \<Rightarrow> 'v set"
explored :: "'v set"
visited :: "'v set"
vsuccs :: "'v \<Rightarrow> 'v set"
sccs :: "'v set set"
stack :: "'v list"
cstack :: "'v list"
text \<open>
The algorithm is initially called with an environment that
initializes the root node and trivializes all other components.
\<close>
definition init_env where
"init_env v = \<lparr>
root = v,
\<S> = (\<lambda>u. {u}),
explored = {},
visited = {},
vsuccs = (\<lambda>u. {}),
sccs = {},
stack = [],
cstack = []
\<rparr>"
\<comment> \<open>Make the simplifier expand let-constructions automatically.\<close>
declare Let_def[simp]
section \<open>Auxiliary lemmas about lists\<close>
text \<open>
We use the precedence order on the elements that appear
in a list. In particular, stacks are represented as lists,
and a node @{text x} precedes another node @{text y} on the
stack if @{text x} was pushed on the stack later
than @{text y}.
\<close>
definition precedes ("_ \<preceq> _ in _" [100,100,100] 39) where
"x \<preceq> y in xs \<equiv> \<exists>l r. xs = l @ (x # r) \<and> y \<in> set (x # r)"
lemma precedes_mem:
assumes "x \<preceq> y in xs"
shows "x \<in> set xs" "y \<in> set xs"
using assms unfolding precedes_def by auto
lemma head_precedes:
assumes "y \<in> set (x # xs)"
shows "x \<preceq> y in (x # xs)"
using assms unfolding precedes_def by force
lemma precedes_in_tail:
assumes "x \<noteq> z"
shows "x \<preceq> y in (z # zs) \<longleftrightarrow> x \<preceq> y in zs"
using assms unfolding precedes_def by (auto simp: Cons_eq_append_conv)
lemma tail_not_precedes:
assumes "y \<preceq> x in (x # xs)" "x \<notin> set xs"
shows "x = y"
using assms unfolding precedes_def
by (metis Cons_eq_append_conv Un_iff list.inject set_append)
lemma split_list_precedes:
assumes "y \<in> set (ys @ [x])"
shows "y \<preceq> x in (ys @ x # xs)"
using assms unfolding precedes_def
by (metis append_Cons append_assoc in_set_conv_decomp
rotate1.simps(2) set_ConsD set_rotate1)
lemma precedes_refl [simp]: "(x \<preceq> x in xs) = (x \<in> set xs)"
proof
assume "x \<preceq> x in xs" thus "x \<in> set xs"
by (simp add: precedes_mem)
next
assume "x \<in> set xs"
from this[THEN split_list] show "x \<preceq> x in xs"
unfolding precedes_def by auto
qed
lemma precedes_append_left:
assumes "x \<preceq> y in xs"
shows "x \<preceq> y in (ys @ xs)"
using assms unfolding precedes_def by (metis append.assoc)
lemma precedes_append_left_iff:
assumes "x \<notin> set ys"
shows "x \<preceq> y in (ys @ xs) \<longleftrightarrow> x \<preceq> y in xs" (is "?lhs = ?rhs")
proof
assume "?lhs"
then obtain l r where lr: "ys @ xs = l @ (x # r)" "y \<in> set (x # r)"
unfolding precedes_def by blast
then obtain us where
"(ys = l @ us \<and> us @ xs = x # r) \<or> (ys @ us = l \<and> xs = us @ (x # r))"
by (auto simp: append_eq_append_conv2)
thus ?rhs
proof
assume us: "ys = l @ us \<and> us @ xs = x # r"
with assms have "us = []"
by (metis Cons_eq_append_conv in_set_conv_decomp)
with us lr show ?rhs
unfolding precedes_def by auto
next
assume us: "ys @ us = l \<and> xs = us @ (x # r)"
with \<open>y \<in> set (x # r)\<close> show ?rhs
unfolding precedes_def by blast
qed
next
assume "?rhs" thus "?lhs" by (rule precedes_append_left)
qed
lemma precedes_append_right:
assumes "x \<preceq> y in xs"
shows "x \<preceq> y in (xs @ ys)"
using assms unfolding precedes_def by force
lemma precedes_append_right_iff:
assumes "y \<notin> set ys"
shows "x \<preceq> y in (xs @ ys) \<longleftrightarrow> x \<preceq> y in xs" (is "?lhs = ?rhs")
proof
assume ?lhs
then obtain l r where lr: "xs @ ys = l @ (x # r)" "y \<in> set (x # r)"
unfolding precedes_def by blast
then obtain us where
"(xs = l @ us \<and> us @ ys = x # r) \<or> (xs @ us = l \<and> ys = us @ (x # r))"
by (auto simp: append_eq_append_conv2)
thus ?rhs
proof
assume us: "xs = l @ us \<and> us @ ys = x # r"
with \<open>y \<in> set (x # r)\<close> assms show ?rhs
unfolding precedes_def by (metis Cons_eq_append_conv Un_iff set_append)
next
assume us: "xs @ us = l \<and> ys = us @ (x # r)"
with \<open>y \<in> set (x # r)\<close> assms
show ?rhs by auto \<comment> \<open>contradiction\<close>
qed
next
assume ?rhs thus ?lhs by (rule precedes_append_right)
qed
text \<open>
Precedence determines an order on the elements of a list,
provided elements have unique occurrences. However, consider
a list such as @{text "[2,3,1,2]"}: then $1$ precedes $2$ and
$2$ precedes $3$, but $1$ does not precede $3$.
\<close>
lemma precedes_trans:
assumes "x \<preceq> y in xs" and "y \<preceq> z in xs" and "distinct xs"
shows "x \<preceq> z in xs"
using assms unfolding precedes_def
by (smt Un_iff append.assoc append_Cons_eq_iff distinct_append
not_distinct_conv_prefix set_append split_list_last)
lemma precedes_antisym:
assumes "x \<preceq> y in xs" and "y \<preceq> x in xs" and "distinct xs"
shows "x = y"
proof -
from \<open>x \<preceq> y in xs\<close> \<open>distinct xs\<close> obtain as bs where
1: "xs = as @ (x # bs)" "y \<in> set (x # bs)" "y \<notin> set as"
unfolding precedes_def by force
from \<open>y \<preceq> x in xs\<close> \<open>distinct xs\<close> obtain cs ds where
2: "xs = cs @ (y # ds)" "x \<in> set (y # ds)" "x \<notin> set cs"
unfolding precedes_def by force
from 1 2 have "as @ (x # bs) = cs @ (y # ds)"
by simp
then obtain zs where
"(as = cs @ zs \<and> zs @ (x # bs) = y # ds)
\<or> (as @ zs = cs \<and> x # bs = zs @ (y # ds))" (is "?P \<or> ?Q")
by (auto simp: append_eq_append_conv2)
then show ?thesis
proof
assume "?P" with \<open>y \<notin> set as\<close> show ?thesis
by (cases "zs") auto
next
assume "?Q" with \<open>x \<notin> set cs\<close> show ?thesis
by (cases "zs") auto
qed
qed
section \<open>Finite directed graphs\<close>
text \<open>
We represent a graph as an Isabelle locale that identifies a finite
set of vertices (of some base type @{text "'v"}) and associates to
each vertex its set of successor vertices.
\<close>
locale graph =
fixes vertices :: "'v set"
and successors :: "'v \<Rightarrow> 'v set"
assumes vfin: "finite vertices"
and sclosed: "\<forall>x \<in> vertices. successors x \<subseteq> vertices"
context graph
begin
abbreviation edge where
"edge x y \<equiv> y \<in> successors x"
text \<open>
We inductively define reachability of nodes in the graph.
\<close>
inductive reachable where
reachable_refl[iff]: "reachable x x"
| reachable_succ[elim]: "\<lbrakk>edge x y; reachable y z\<rbrakk> \<Longrightarrow> reachable x z"
lemma reachable_edge: "edge x y \<Longrightarrow> reachable x y"
by auto
lemma succ_reachable:
assumes "reachable x y" and "edge y z"
shows "reachable x z"
using assms by induct auto
lemma reachable_trans:
assumes y: "reachable x y" and z: "reachable y z"
shows "reachable x z"
using assms by induct auto
text \<open>
We also need the following variant of reachability avoiding
certain edges. More precisely, @{text y} is reachable from @{text x}
avoiding a set @{text E} of edges if there exists a path such that
no edge from @{text E} appears along the path.
\<close>
inductive reachable_avoiding where
ra_refl[iff]: "reachable_avoiding x x E"
| ra_succ[elim]: "\<lbrakk>edge x y; (x,y) \<notin> E; reachable_avoiding y z E\<rbrakk> \<Longrightarrow> reachable_avoiding x z E"
lemma edge_ra:
assumes "edge x y" and "(x,y) \<notin> E"
shows "reachable_avoiding x y E"
using assms by (meson reachable_avoiding.simps)
lemma ra_trans:
assumes 1: "reachable_avoiding x y E" and 2: "reachable_avoiding y z E"
shows "reachable_avoiding x z E"
using 1 2 by induction auto
lemma ra_mono:
assumes "reachable_avoiding x y E" and "E' \<subseteq> E"
shows "reachable_avoiding x y E'"
using assms by induction auto
lemma ra_add_edge:
assumes "reachable_avoiding x y E"
shows "reachable_avoiding x y (E \<union> {(v,w)})
\<or> (reachable_avoiding x v (E \<union> {(v,w)}) \<and> reachable_avoiding w y (E \<union> {(v,w)}))"
using assms proof (induction)
case (ra_refl x E)
then show ?case by simp
next
case (ra_succ x y E z)
then show ?case
by (metis (no_types, opaque_lifting) UnE emptyE insert_iff prod.inject reachable_avoiding.simps)
qed
text \<open>
Reachability avoiding some edges obviously implies reachability.
Conversely, reachability implies reachability avoiding the empty set.
\<close>
lemma ra_reachable:
"reachable_avoiding x y E \<Longrightarrow> reachable x y"
by (induction rule: reachable_avoiding.induct) (auto intro: succ_reachable)
lemma ra_empty:
"reachable_avoiding x y {} = reachable x y"
proof
assume "reachable_avoiding x y {}"
thus "reachable x y"
by (rule ra_reachable)
next
assume "reachable x y"
thus "reachable_avoiding x y {}"
by induction auto
qed
section \<open>Strongly connected components\<close>
text \<open>
A strongly connected component is a set @{text S} of nodes
such that any two nodes in @{text S} are reachable from each other.
This concept is represented by the predicate @{text "is_subscc"} below.
We are ultimately interested in non-empty, maximal strongly connected
components, represented by the predicate @{text "is_scc"}.
\<close>
definition is_subscc where
"is_subscc S \<equiv> \<forall>x \<in> S. \<forall>y \<in> S. reachable x y"
definition is_scc where
"is_scc S \<equiv> S \<noteq> {} \<and> is_subscc S \<and> (\<forall>S'. S \<subseteq> S' \<and> is_subscc S' \<longrightarrow> S' = S)"
lemma subscc_add:
assumes "is_subscc S" and "x \<in> S"
and "reachable x y" and "reachable y x"
shows "is_subscc (insert y S)"
using assms unfolding is_subscc_def by (metis insert_iff reachable_trans)
lemma sccE:
\<comment> \<open>Two nodes that are reachable from each other are in the same SCC.\<close>
assumes "is_scc S" and "x \<in> S"
and "reachable x y" and "reachable y x"
shows "y \<in> S"
using assms unfolding is_scc_def
by (metis insertI1 subscc_add subset_insertI)
lemma scc_partition:
\<comment> \<open>Two SCCs that contain a common element are identical.\<close>
assumes "is_scc S" and "is_scc S'" and "x \<in> S \<inter> S'"
shows "S = S'"
using assms unfolding is_scc_def is_subscc_def
by (metis IntE assms(2) sccE subsetI)
section \<open>Algorithm for computing strongly connected components\<close>
text \<open>
We now introduce our representation of Bloemen's algorithm in Isabelle/HOL.
The auxiliary function @{text unite} corresponds to the inner \textsf{while}
loop in Bloemen's pseudo-code~\cite[p.32]{bloemen:strong}. It is applied to
two nodes @{text v} and @{text w} (and the environment @{text e} holding the
current values of the program variables) when a loop is found, i.e.\ when
@{text w} is a successor of @{text v} in the graph that has already been
visited in the depth-first search. In that case, the root of the SCC
of node @{text w} determined so far must appear below the root of
@{text v}'s SCC in the @{text stack} maintained by the algorithm.
The effect of the function is to merge the SCCs of all nodes on the
top of the stack above (and including) @{text w}. Node @{text w}'s root
will be the root of the merged SCC.
\<close>
definition unite :: "'v \<Rightarrow> 'v \<Rightarrow> 'v env \<Rightarrow> 'v env" where
"unite v w e \<equiv>
let pfx = takeWhile (\<lambda>x. w \<notin> \<S> e x) (stack e);
sfx = dropWhile (\<lambda>x. w \<notin> \<S> e x) (stack e);
cc = \<Union> { \<S> e x | x . x \<in> set pfx \<union> {hd sfx} }
in e\<lparr>\<S> := \<lambda>x. if x \<in> cc then cc else \<S> e x,
stack := sfx\<rparr>"
text \<open>
We now represent the algorithm as two mutually recursive functions @{text dfs} and
@{text dfss} in Isabelle/HOL. The function @{text dfs} corresponds to Bloemen's
function \textsf{SetBased}, whereas @{text dfss} corresponds to the \textsf{forall}
loop over the successors of the node on which @{text dfs} was called. Instead of
using global program variables in imperative style, our functions explicitly pass
environments that hold the current values of these variables.
A technical complication in the development of the algorithm in Isabelle is the
fact that the functions need not terminate when their pre-conditions (introduced
below) are violated, for example when @{text dfs} is called for a node that was
already visited previously. We therefore cannot prove termination at this point,
but will later show that the explicitly given pre-conditions ensure termination.
\<close>
function (domintros) dfs :: "'v \<Rightarrow> 'v env \<Rightarrow> 'v env"
and dfss :: "'v \<Rightarrow> 'v env \<Rightarrow> 'v env" where
"dfs v e =
(let e1 = e\<lparr>visited := visited e \<union> {v},
stack := (v # stack e),
cstack := (v # cstack e)\<rparr>;
e' = dfss v e1
in if v = hd(stack e')
then e'\<lparr>sccs := sccs e' \<union> {\<S> e' v},
explored := explored e' \<union> (\<S> e' v),
stack := tl(stack e'),
cstack := tl(cstack e')\<rparr>
else e'\<lparr>cstack := tl(cstack e')\<rparr>)"
| "dfss v e =
(let vs = successors v - vsuccs e v
in if vs = {} then e
else let w = SOME x. x \<in> vs;
e' = (if w \<in> explored e then e
else if w \<notin> visited e
then dfs w e
else unite v w e);
e'' = (e'\<lparr>vsuccs :=
(\<lambda>x. if x=v then vsuccs e' v \<union> {w}
else vsuccs e' x)\<rparr>)
in dfss v e'')"
by pat_completeness (force+)
section \<open>Definition of the predicates used in the correctness proof\<close>
text \<open>
Environments are partially ordered according to the following definition.
\<close>
definition sub_env where
"sub_env e e' \<equiv>
root e' = root e
\<and> visited e \<subseteq> visited e'
\<and> explored e \<subseteq> explored e'
\<and> (\<forall>v. vsuccs e v \<subseteq> vsuccs e' v)
\<and> (\<exists>ns. stack e = ns @ stack e')
\<and> cstack e' = cstack e
\<and> (\<forall> v. \<S> e v \<subseteq> \<S> e' v)
\<and> (\<Union> {\<S> e v | v . v \<in> set (stack e)})
\<subseteq> (\<Union> {\<S> e' v | v . v \<in> set (stack e')})
"
lemma sub_env_trans:
assumes "sub_env e e'" and "sub_env e' e''"
shows "sub_env e e''"
using assms unfolding sub_env_def
by (smt (verit, best) append.assoc dual_order.trans)
text \<open>
The set @{text "unvisited e u"} contains all edges @{text "(a,b)"}
such that node @{text a} is in the same SCC as
node @{text u} and the edge has not yet been followed, in the
sense represented by variable @{text vsuccs}.
\<close>
definition unvisited where
"unvisited e u \<equiv>
{(a,b) | a b. a \<in> \<S> e u \<and> b \<in> successors a - vsuccs e a}"
subsection \<open>Main invariant\<close>
text \<open>
The following definition characterizes well-formed environments.
This predicate will be shown to hold throughout the execution
of the algorithm. In words, it asserts the following facts:
\begin{itemize}
\item Only nodes reachable from the root (for which the algorithm
was originally called) are visited.
\item The two stacks @{text stack} and @{text cstack} do not
contain duplicate nodes, and @{text stack} contains a subset
of the nodes on @{text cstack}, in the same order.
\item Any node higher on the @{text stack} (i.e., that was pushed
later) is reachable from nodes lower in the @{text stack}.
This property also holds for nodes on the call stack,
but this is not needed for the correctness proof.
\item Every explored node, and every node on the call stack,
has been visited.
\item Nodes reachable from fully explored nodes have
themselves been fully explored.
\item The set @{text "vsuccs e n"}, for any node @{text n},
is a subset of @{text n}'s successors, and all these nodes
are in @{text visited}. The set is empty if @{text "n \<notin> visited"},
and it contains all successors if @{text n} has been fully
explored or if @{text n} has been visited, but is no longer
on the call stack.
\item The sets @{text "\<S> e n"} represent an equivalence relation.
The equivalence classes of nodes that have not yet been visited
are singletons. Also, equivalence classes for two distinct nodes
on the @{text stack} are disjoint because the stack only stores
roots of SCCs, and the union of the equivalence classes for these
root nodes corresponds to the set of live nodes, i.e. those nodes
that have already been visited but not yet fully explored.
\item More precisely, an equivalence class is represented on the
stack by the oldest node in the sense of the call order: any
node in the class that is still on the call stack precedes the
representative on the call stack and was therefore pushed later.
\item Equivalence classes represent the maximal available
information about strong connectedness: nodes represented by
some node @{text n} on the @{text stack} can reach some node
@{text m} that is lower in the stack only by taking an
edge from some node in @{text n}'s equivalence class that
has not yet been followed. (Remember that @{text m} can reach
@{text n} by one of the previous conjuncts.)
\item Equivalence classes represent partial SCCs in the sense
of the predicate @{text is_subscc}. Variable @{text sccs}
holds maximal SCCs in the sense of the predicate @{text is_scc},
and their union corresponds to the set of explored nodes.
\end{itemize}
\<close>
definition wf_env where
"wf_env e \<equiv>
(\<forall>n \<in> visited e. reachable (root e) n)
\<and> distinct (stack e)
\<and> distinct (cstack e)
\<and> (\<forall>n m. n \<preceq> m in stack e \<longrightarrow> n \<preceq> m in cstack e)
\<and> (\<forall>n m. n \<preceq> m in stack e \<longrightarrow> reachable m n)
\<and> explored e \<subseteq> visited e
\<and> set (cstack e) \<subseteq> visited e
\<and> (\<forall>n \<in> explored e. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e)
\<and> (\<forall>n. vsuccs e n \<subseteq> successors n \<inter> visited e)
\<and> (\<forall>n. n \<notin> visited e \<longrightarrow> vsuccs e n = {})
\<and> (\<forall>n \<in> explored e. vsuccs e n = successors n)
\<and> (\<forall>n \<in> visited e - set (cstack e). vsuccs e n = successors n)
\<and> (\<forall>n m. m \<in> \<S> e n \<longleftrightarrow> (\<S> e n = \<S> e m))
\<and> (\<forall>n. n \<notin> visited e \<longrightarrow> \<S> e n = {n})
\<and> (\<forall>n \<in> set (stack e). \<forall>m \<in> set (stack e). n \<noteq> m \<longrightarrow> \<S> e n \<inter> \<S> e m = {})
\<and> \<Union> {\<S> e n | n. n \<in> set (stack e)} = visited e - explored e
\<and> (\<forall>n \<in> set (stack e). \<forall>m \<in> \<S> e n. m \<in> set (cstack e) \<longrightarrow> m \<preceq> n in cstack e)
\<and> (\<forall>n m. n \<preceq> m in stack e \<and> n \<noteq> m \<longrightarrow>
(\<forall>u \<in> \<S> e n. \<not> reachable_avoiding u m (unvisited e n)))
\<and> (\<forall>n. is_subscc (\<S> e n))
\<and> (\<forall>S \<in> sccs e. is_scc S)
\<and> \<Union> (sccs e) = explored e"
subsection \<open>Consequences of the invariant\<close>
text \<open>
Since every node on the call stack is an element
of @{text visited} and every node on the @{text stack}
also appears on @{text cstack}, all these nodes are
also in @{text visited}.
\<close>
lemma stack_visited:
assumes "wf_env e" "n \<in> set (stack e)"
shows "n \<in> visited e"
using assms unfolding wf_env_def
by (meson precedes_refl subset_iff)
text \<open>
Classes represented on the stack consist of visited nodes
that have not yet been fully explored.
\<close>
lemma stack_class:
assumes "wf_env e" "n \<in> set (stack e)" "m \<in> \<S> e n"
shows "m \<in> visited e - explored e"
using assms unfolding wf_env_def by blast
text \<open>
Conversely, every such node belongs to some class
represented on the stack.
\<close>
lemma visited_unexplored:
assumes "wf_env e" "m \<in> visited e" "m \<notin> explored e"
obtains n where "n \<in> set (stack e)" "m \<in> \<S> e n"
using assms unfolding wf_env_def
by (smt (verit, ccfv_threshold) Diff_iff Union_iff mem_Collect_eq)
text \<open>
Every node belongs to its own equivalence class.
\<close>
lemma S_reflexive:
assumes "wf_env e"
shows "n \<in> \<S> e n"
using assms by (auto simp: wf_env_def)
text \<open>
No node on the stack has been fully explored.
\<close>
lemma stack_unexplored:
assumes 1: "wf_env e"
and 2: "n \<in> set (stack e)"
and 3: "n \<in> explored e"
shows "P"
using stack_class[OF 1 2] S_reflexive[OF 1] 3 by blast
text \<open>
If @{text w} is reachable from visited node @{text v}, but
no unvisited successor of a node reachable from @{text v}
can reach @{text w}, then @{text w} must be visited.
\<close>
lemma reachable_visited:
assumes e: "wf_env e"
and v: "v \<in> visited e"
and w: "reachable v w"
and s: "\<forall>n \<in> visited e. \<forall>m \<in> successors n - vsuccs e n.
reachable v n \<longrightarrow> \<not> reachable m w"
shows "w \<in> visited e"
using w v s proof (induction)
case (reachable_refl x)
then show ?case by simp
next
case (reachable_succ x y z)
then have "y \<in> vsuccs e x" by blast
with e have "y \<in> visited e"
unfolding wf_env_def by (meson le_infE subset_eq)
with reachable_succ reachable.reachable_succ show ?case
by blast
qed
text \<open>
Edges towards explored nodes do not contribute to reachability
of unexplored nodes avoiding some set of edges.
\<close>
lemma avoiding_explored:
assumes e: "wf_env e"
and xy: "reachable_avoiding x y E"
and y: "y \<notin> explored e"
and w: "w \<in> explored e"
shows "reachable_avoiding x y (E \<union> {(v,w)})"
using xy y proof (induction)
case (ra_refl x E)
then show ?case by simp
next
case (ra_succ x y E z)
with e w show ?case unfolding wf_env_def
by (meson edge_ra ra_add_edge ra_reachable ra_trans)
qed
subsection \<open>Pre- and post-conditions of function @{text dfs}\<close>
text \<open>
Function @{text dfs} should be called for a well-formed
environment and a node @{text v} that has not yet been
visited and that is reachable from the root node,
as well as from all nodes in the stack. No outgoing edges
from node @{text v} have yet been followed.
\<close>
definition pre_dfs where
"pre_dfs v e \<equiv>
wf_env e
\<and> v \<notin> visited e
\<and> reachable (root e) v
\<and> (\<forall>n \<in> set (stack e). reachable n v)"
text \<open>
Function @{text dfs} maintains the invariant
@{text wf_env} and returns an environment @{text e'} that
extends the input environment @{text e}. Node @{text v} has been
visited and all its outgoing edges have been followed.
Because the algorithm works in depth-first fashion, no
new outgoing edges of nodes that had already been
visited in the input environment have been followed, and
the stack of @{text e'} is a suffix of the one of @{text e}
such that @{text v} is still reachable from all nodes on the
stack. The stack may have been shortened because SCCs
represented at the top of the stack may have been
merged. The call stack is reestablished as it was in @{text e}.
There are two possible outcomes of the algorithm:
\begin{itemize}
\item Either @{text v} has been fully explored, in which case
the stacks of @{text e} and @{text e'} are the same, and
the equivalence classes of all nodes represented on the
stack are unchanged. This corresponds to the case where
@{text v} is the root node of its (maximal) SCC.
\item Alternatively, the stack of @{text e'} must be
non-empty and @{text v} must be represented by the node at
the top of the stack. The SCCs of the nodes
lower on the stack are unchanged. This corresponds to the
case where @{text v} is not the root node of its SCC, but
some SCCs at the top of the stack may have been merged.
\end{itemize}
\<close>
definition post_dfs where
"post_dfs v e e' \<equiv>
wf_env e'
\<and> v \<in> visited e'
\<and> sub_env e e'
\<and> vsuccs e' v = successors v
\<and> (\<forall>w \<in> visited e. vsuccs e' w = vsuccs e w)
\<and> ( (v \<in> explored e' \<and> stack e' = stack e
\<and> (\<forall>n \<in> set (stack e'). \<S> e' n = \<S> e n))
\<or> (stack e' \<noteq> [] \<and> v \<in> \<S> e' (hd (stack e'))
\<and> (\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n)))"
text \<open>
The initial environment is easily seen to satisfy @{text dfs}'s
pre-condition.
\<close>
lemma init_env_pre_dfs: "pre_dfs v (init_env v)"
by (auto simp: pre_dfs_def wf_env_def init_env_def is_subscc_def
dest: precedes_mem)
text \<open>
Any node represented by the top stack element of the
input environment is still represented by the top
element of the output stack.
\<close>
lemma dfs_S_hd_stack:
assumes wf: "wf_env e"
and post: "post_dfs v e e'"
and n: "stack e \<noteq> []" "n \<in> \<S> e (hd (stack e))"
shows "stack e' \<noteq> []" "n \<in> \<S> e' (hd (stack e'))"
proof -
have 1: "stack e' \<noteq> [] \<and> n \<in> \<S> e' (hd (stack e'))"
proof (cases "stack e' = stack e \<and> (\<forall>n \<in> set (stack e'). \<S> e' n = \<S> e n)")
case True
with n show ?thesis
by auto
next
case 2: False
with post have "stack e' \<noteq> []"
by (simp add: post_dfs_def)
from n have "hd (stack e) \<in> set (stack e)"
by simp
with 2 n post obtain u where
u: "u \<in> set (stack e')" "n \<in> \<S> e' u"
unfolding post_dfs_def sub_env_def by blast
show ?thesis
proof (cases "u = hd (stack e')")
case True
with u \<open>stack e' \<noteq> []\<close> show ?thesis
by simp
next
case False
with u have "u \<in> set (tl (stack e'))"
by (metis empty_set equals0D list.collapse set_ConsD)
with u 2 post have "u \<in> set (tl (stack e)) \<and> n \<in> \<S> e u"
unfolding post_dfs_def sub_env_def
by (metis (no_types, lifting) Un_iff append_self_conv2 set_append tl_append2)
with n wf \<open>hd (stack e) \<in> set (stack e)\<close> show ?thesis
unfolding wf_env_def
by (metis (no_types, opaque_lifting) disjoint_iff_not_equal distinct.simps(2) list.collapse list.set_sel(2))
qed
qed
from 1 show "stack e' \<noteq> []" by simp
from 1 show "n \<in> \<S> e' (hd (stack e'))" by simp
qed
text \<open>
Function @{text dfs} leaves the SCCs represented
by elements in the (new) tail of the @{text stack} unchanged.
\<close>
lemma dfs_S_tl_stack:
assumes post: "post_dfs v e e'"
and nempty: "stack e \<noteq> []"
shows "stack e' \<noteq> []" "\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n"
proof -
have 1: "stack e' \<noteq> [] \<and> (\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n)"
proof (cases "stack e' = stack e \<and> (\<forall>n \<in> set (stack e'). \<S> e' n = \<S> e n)")
case True
with nempty show ?thesis
by (simp add: list.set_sel(2))
next
case False
with post show ?thesis
by (auto simp: post_dfs_def)
qed
from 1 show "stack e' \<noteq> []"
by simp
from 1 show "\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n"
by simp
qed
subsection \<open>Pre- and post-conditions of function @{text dfss}\<close>
text \<open>
The pre- and post-conditions of function @{text dfss}
correspond to the invariant of the loop over all outgoing
edges from node @{text v}. The environment must be
well-formed, node @{text v} must be visited and represented
by the top element of the (non-empty) stack. Node @{text v}
must be reachable from all nodes on the stack, and it must be
the top node on the call stack. All outgoing
edges of node @{text v} that have already been followed must
either lead to completely explored nodes (that are no longer
represented on the stack) or to nodes that are part of the
same SCC as @{text v}.
\<close>
definition pre_dfss where
"pre_dfss v e \<equiv>
wf_env e
\<and> v \<in> visited e
\<and> (stack e \<noteq> [])
\<and> (v \<in> \<S> e (hd (stack e)))
\<and> (\<forall>w \<in> vsuccs e v. w \<in> explored e \<union> \<S> e (hd (stack e)))
\<and> (\<forall>n \<in> set (stack e). reachable n v)
\<and> (\<exists>ns. cstack e = v # ns)"
text \<open>
The post-condition establishes that all outgoing edges
of node @{text v} have been followed. As for function
@{text dfs}, no new outgoing edges of previously visited
nodes have been followed. Also as before, the new stack
is a suffix of the old one, and the call stack is restored.
In case node @{text v} is still on the stack (and therefore
is the root node of its SCC), no node that is lower on the stack
can be reachable from @{text v}. This condition guarantees
the maximality of the computed SCCs.
\<close>
definition post_dfss where
"post_dfss v e e' \<equiv>
wf_env e'
\<and> vsuccs e' v = successors v
\<and> (\<forall>w \<in> visited e - {v}. vsuccs e' w = vsuccs e w)
\<and> sub_env e e'
\<and> (\<forall>w \<in> successors v. w \<in> explored e' \<union> \<S> e' (hd (stack e')))
\<and> (\<forall>n \<in> set (stack e'). reachable n v)
\<and> (stack e' \<noteq> [])
\<and> v \<in> \<S> e' (hd (stack e'))
\<and> (\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n)
\<and> (hd (stack e') = v \<longrightarrow> (\<forall>n \<in> set (tl (stack e')). \<not> reachable v n))"
section \<open>Proof of partial correctness\<close>
subsection \<open>Lemmas about function @{text unite}\<close>
text \<open>
We start by establishing a few lemmas about function @{text unite}
in the context where it is called.
\<close>
lemma unite_stack:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes wf: "wf_env e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
obtains pfx where "stack e = pfx @ (stack e')"
"stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
"w \<in> \<S> e' (hd (stack e'))"
proof -
define pfx where "pfx = takeWhile (\<lambda>x. w \<notin> \<S> e x) (stack e)"
define sfx where "sfx = dropWhile (\<lambda>x. w \<notin> \<S> e x) (stack e)"
define cc where "cc = \<Union> {\<S> e x |x. x \<in> set pfx \<union> {hd sfx}}"
have "stack e = pfx @ sfx"
by (simp add: pfx_def sfx_def)
moreover
have "stack e' = sfx"
by (simp add: e'_def unite_def sfx_def)
moreover
from wf w have "w \<in> \<Union> {\<S> e n | n. n \<in> set (stack e)}"
by (simp add: wf_env_def)
then obtain n where "n \<in> set (stack e)" "w \<in> \<S> e n"
by auto
hence sfx: "sfx \<noteq> [] \<and> w \<in> \<S> e (hd sfx)"
unfolding sfx_def
by (metis dropWhile_eq_Nil_conv hd_dropWhile)
moreover
have "\<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (rule,
auto simp add: e'_def unite_def pfx_def sfx_def cc_def)
moreover
from sfx have "w \<in> cc"
by (auto simp: cc_def)
from S_reflexive[OF wf, of "hd sfx"]
have "hd sfx \<in> cc"
by (auto simp: cc_def)
with \<open>w \<in> cc\<close> \<open>\<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)\<close>
have "w \<in> \<S> e' (hd sfx)"
by simp
ultimately show ?thesis
using that e'_def unite_def pfx_def sfx_def cc_def
by meson
qed
text \<open>
Function @{text unite} leaves intact the equivalence classes
represented by the tail of the new stack.
\<close>
lemma unite_S_tl:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes wf: "wf_env e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
and n: "n \<in> set (tl (stack e'))"
shows "\<S> e' n = \<S> e n"
proof -
from assms obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (blast dest: unite_stack)
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
have "n \<notin> cc"
proof
assume "n \<in> cc"
then obtain m where
"m \<in> set pfx \<union> {hd (stack e')}" "n \<in> \<S> e m"
by (auto simp: cc_def)
with S_reflexive[OF wf, of n] n wf \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
show "False"
unfolding wf_env_def
by (smt (z3) Diff_triv Un_iff Un_insert_right append.right_neutral disjoint_insert(1)
distinct.simps(2) distinct_append empty_set insertE insert_Diff list.exhaust_sel
list.simps(15) set_append)
qed
with pfx show "\<S> e' n = \<S> e n"
by (auto simp add: cc_def)
qed
text \<open>
The stack of the result of @{text unite} represents the
same vertices as the input stack, potentially in fewer
equivalence classes.
\<close>
lemma unite_S_equal:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes wf: "wf_env e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
shows "(\<Union> {\<S> e' n | n. n \<in> set (stack e')}) = (\<Union> {\<S> e n | n. n \<in> set (stack e)})"
proof -
from assms obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (blast dest: unite_stack)
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
from pfx have Se': "\<forall>x. \<S> e' x = (if x \<in> cc then cc else \<S> e x)"
by (auto simp: cc_def)
from S_reflexive[OF wf, of "hd (stack e')"]
have S_hd: "\<S> e' (hd (stack e')) = cc"
by (auto simp: Se' cc_def)
from \<open>stack e' \<noteq> []\<close>
have ste': "set (stack e') = {hd (stack e')} \<union> set (tl (stack e'))"
by (metis insert_is_Un list.exhaust_sel list.simps(15))
from \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "stack e = pfx @ (hd (stack e') # tl (stack e'))"
by auto
hence "\<Union> {\<S> e n | n. n \<in> set (stack e)}
= cc \<union> (\<Union> {\<S> e n | n. n \<in> set (tl (stack e'))})"
by (auto simp add: cc_def)
also from S_hd unite_S_tl[OF wf w]
have "\<dots> = \<S> e' (hd (stack e')) \<union> (\<Union> {\<S> e' n | n. n \<in> set (tl (stack e'))})"
by (auto simp: e'_def)
also from ste'
have "\<dots> = \<Union> {\<S> e' n | n. n \<in> set (stack e')}"
by auto
finally show ?thesis
by simp
qed
text \<open>
The head of the stack represents a (not necessarily maximal) SCC.
\<close>
lemma unite_subscc:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes pre: "pre_dfss v e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
shows "is_subscc (\<S> e' (hd (stack e')))"
proof -
from pre have wf: "wf_env e"
by (simp add: pre_dfss_def)
from assms obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (blast dest: unite_stack[OF wf])
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
from wf w have "w \<in> \<Union> {\<S> e n | n. n \<in> set (stack e)}"
by (simp add: wf_env_def)
hence "w \<in> \<S> e (hd (stack e'))"
apply (simp add: e'_def unite_def)
by (metis dropWhile_eq_Nil_conv hd_dropWhile)
have "is_subscc cc"
proof (clarsimp simp: is_subscc_def)
fix x y
assume "x \<in> cc" "y \<in> cc"
then obtain nx ny where
nx: "nx \<in> set pfx \<union> {hd (stack e')}" "x \<in> \<S> e nx" and
ny: "ny \<in> set pfx \<union> {hd (stack e')}" "y \<in> \<S> e ny"
by (auto simp: cc_def)
with wf have "reachable x nx" "reachable ny y"
by (auto simp: wf_env_def is_subscc_def)
from w pre have "reachable v w"
by (auto simp: pre_dfss_def)
from pre have "reachable (hd (stack e)) v"
by (auto simp: pre_dfss_def wf_env_def is_subscc_def)
from pre have "stack e = hd (stack e) # tl (stack e)"
by (auto simp: pre_dfss_def)
with nx \<open>stack e = pfx @ (stack e')\<close> \<open>stack e' \<noteq> []\<close>
have "hd (stack e) \<preceq> nx in stack e"
by (metis Un_iff Un_insert_right head_precedes list.exhaust_sel list.simps(15)
set_append sup_bot.right_neutral)
with wf have "reachable nx (hd (stack e))"
by (auto simp: wf_env_def)
from \<open>stack e = pfx @ (stack e')\<close> \<open>stack e' \<noteq> []\<close> ny
have "ny \<preceq> hd (stack e') in stack e"
by (metis List.set_insert empty_set insert_Nil list.exhaust_sel set_append split_list_precedes)
with wf have "reachable (hd (stack e')) ny"
by (auto simp: wf_env_def is_subscc_def)
from wf \<open>stack e' \<noteq> []\<close> \<open>w \<in> \<S> e (hd (stack e'))\<close>
have "reachable w (hd (stack e'))"
by (auto simp: wf_env_def is_subscc_def)
from \<open>reachable x nx\<close> \<open>reachable nx (hd (stack e))\<close>
\<open>reachable (hd (stack e)) v\<close> \<open>reachable v w\<close>
\<open>reachable w (hd (stack e'))\<close>
\<open>reachable (hd (stack e')) ny\<close> \<open>reachable ny y\<close>
show "reachable x y"
using reachable_trans by meson
qed
with S_reflexive[OF wf, of "hd (stack e')"] pfx
show ?thesis
by (auto simp: cc_def)
qed
text \<open>
The environment returned by function @{text unite} extends the input environment.
\<close>
lemma unite_sub_env:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes pre: "pre_dfss v e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
shows "sub_env e e'"
proof -
from pre have wf: "wf_env e"
by (simp add: pre_dfss_def)
from assms obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (blast dest: unite_stack[OF wf])
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
have "\<forall>n. \<S> e n \<subseteq> \<S> e' n"
proof (clarify)
fix n u
assume u: "u \<in> \<S> e n"
show "u \<in> \<S> e' n"
proof (cases "n \<in> cc")
case True
then obtain m where
m: "m \<in> set pfx \<union> {hd (stack e')}" "n \<in> \<S> e m"
by (auto simp: cc_def)
with wf S_reflexive[OF wf, of n] u have "u \<in> \<S> e m"
by (auto simp: wf_env_def)
with m pfx show ?thesis
by (auto simp: cc_def)
next
case False
with pfx u show ?thesis
by (auto simp: cc_def)
qed
qed
moreover
have "root e' = root e \<and> visited e' = visited e \<and> cstack e' = cstack e
\<and> explored e' = explored e \<and> vsuccs e' = vsuccs e"
by (simp add: e'_def unite_def)
ultimately show ?thesis
using pfx unite_S_equal[OF wf w]
by (simp add: e'_def sub_env_def)
qed
text \<open>
The environment returned by function @{text unite} is well-formed.
\<close>
lemma unite_wf_env:
fixes e v w
defines "e' \<equiv> unite v w e"
assumes pre: "pre_dfss v e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
shows "wf_env e'"
proof -
from pre have wf: "wf_env e"
by (simp add: pre_dfss_def)
from assms obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
by (blast dest: unite_stack[OF wf])
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
from pfx have Se': "\<forall>x. \<S> e' x = (if x \<in> cc then cc else \<S> e x)"
by (auto simp add: cc_def)
have cc_Un: "cc = \<Union> {\<S> e x | x. x \<in> cc}"
proof
from S_reflexive[OF wf]
show "cc \<subseteq> \<Union> {\<S> e x | x. x \<in> cc}"
by (auto simp: cc_def)
next
{
fix n x
assume "x \<in> cc" "n \<in> \<S> e x"
with wf have "n \<in> cc"
unfolding wf_env_def cc_def
by (smt (verit) Union_iff mem_Collect_eq)
}
thus "(\<Union> {\<S> e x | x. x \<in> cc}) \<subseteq> cc"
by blast
qed
from S_reflexive[OF wf, of "hd (stack e')"]
have hd_cc: "\<S> e' (hd (stack e')) = cc"
by (auto simp: cc_def Se')
{
fix n m
assume n: "n \<in> set (tl (stack e'))"
and m: "m \<in> \<S> e n \<inter> cc"
from m obtain l where
"l \<in> set pfx \<union> {hd (stack e')}" "m \<in> \<S> e l"
by (auto simp: cc_def)
with n m wf \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "False"
unfolding wf_env_def
by (metis (no_types, lifting) Int_iff UnCI UnE disjoint_insert(1) distinct.simps(2)
distinct_append emptyE hd_Cons_tl insert_iff list.set_sel(1) list.set_sel(2)
mk_disjoint_insert set_append)
}
hence tl_cc: "\<forall>n \<in> set (tl (stack e')). \<S> e n \<inter> cc = {}"
by blast
from wf
have "\<forall>n \<in> visited e'. reachable (root e') n"
"distinct (cstack e')"
"explored e' \<subseteq> visited e'"
"set (cstack e') \<subseteq> visited e'"
"\<forall>n \<in> explored e'. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e'"
"\<forall>n. vsuccs e' n \<subseteq> successors n \<inter> visited e'"
"\<forall>n. n \<notin> visited e' \<longrightarrow> vsuccs e' n = {}"
"\<forall>n \<in> explored e'. vsuccs e' n = successors n"
"\<forall>n \<in> visited e' - set (cstack e'). vsuccs e' n = successors n"
"\<forall>S \<in> sccs e'. is_scc S"
"\<Union> (sccs e') = explored e'"
by (auto simp: wf_env_def e'_def unite_def)
moreover
from wf \<open>stack e = pfx @ stack e'\<close>
have "distinct (stack e')"
by (auto simp: wf_env_def)
moreover
have "\<forall>n m. n \<preceq> m in stack e' \<longrightarrow> n \<preceq> m in cstack e'"
proof (clarify)
fix n m
assume "n \<preceq> m in stack e'"
with \<open>stack e = pfx @ stack e'\<close> wf
have "n \<preceq> m in cstack e"
unfolding wf_env_def
by (metis precedes_append_left)
thus "n \<preceq> m in cstack e'"
by (simp add: e'_def unite_def)
qed
moreover
from wf \<open>stack e = pfx @ stack e'\<close>
have "\<forall>n m. n \<preceq> m in stack e' \<longrightarrow> reachable m n"
unfolding wf_env_def by (metis precedes_append_left)
moreover
have "\<forall>n m. m \<in> \<S> e' n \<longleftrightarrow> (\<S> e' n = \<S> e' m)"
proof (clarify)
fix n m
show "m \<in> \<S> e' n \<longleftrightarrow> (\<S> e' n = \<S> e' m)"
proof
assume l: "m \<in> \<S> e' n"
show "\<S> e' n = \<S> e' m"
proof (cases "n \<in> cc")
case True
with l show ?thesis
by (simp add: Se')
next
case False
with l wf have "\<S> e n = \<S> e m"
by (simp add: wf_env_def Se')
with False cc_Un wf have "m \<notin> cc"
unfolding wf_env_def e'_def
by (smt (verit, best) Union_iff mem_Collect_eq)
with \<open>\<S> e n = \<S> e m\<close> False show ?thesis
by (simp add: Se')
qed
next
assume r: "\<S> e' n = \<S> e' m"
show "m \<in> \<S> e' n"
proof (cases "n \<in> cc")
case True
with r pfx have "\<S> e' m = cc"
by (auto simp: cc_def)
have "m \<in> cc"
proof (rule ccontr)
assume "m \<notin> cc"
with pfx have "\<S> e' m = \<S> e m"
by (auto simp: cc_def)
with S_reflexive[OF wf, of m] \<open>\<S> e' m = cc\<close> \<open>m \<notin> cc\<close>
show "False"
by simp
qed
with pfx True show "m \<in> \<S> e' n"
by (auto simp: cc_def)
next
case False
hence "\<S> e' n = \<S> e n"
by (simp add: Se')
have "m \<notin> cc"
proof
assume m: "m \<in> cc"
with \<open>\<S> e' n = \<S> e n\<close> r have "\<S> e n = cc"
by (simp add: Se')
with S_reflexive[OF wf, of n] have "n \<in> cc"
by simp
with \<open>n \<notin> cc\<close> show "False" ..
qed
with r \<open>\<S> e' n = \<S> e n\<close> have "\<S> e m = \<S> e n"
by (simp add: Se')
with S_reflexive[OF wf, of m] have "m \<in> \<S> e n"
by simp
with \<open>\<S> e' n = \<S> e n\<close> show ?thesis
by simp
qed
qed
qed
moreover
have "\<forall>n. n \<notin> visited e' \<longrightarrow> \<S> e' n = {n}"
proof (clarify)
fix n
assume "n \<notin> visited e'"
hence "n \<notin> visited e"
by (simp add: e'_def unite_def)
moreover have "n \<notin> cc"
proof
assume "n \<in> cc"
then obtain m where "m \<in> set pfx \<union> {hd (stack e')}" "n \<in> \<S> e m"
by (auto simp: cc_def)
with \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "m \<in> set (stack e)"
by auto
with stack_class[OF wf this \<open>n \<in> \<S> e m\<close>] \<open>n \<notin> visited e\<close>
show "False"
by simp
qed
ultimately show "\<S> e' n = {n}"
using wf by (auto simp: wf_env_def Se')
qed
moreover
have "\<forall>n \<in> set (stack e'). \<forall>m \<in> set (stack e'). n \<noteq> m \<longrightarrow> \<S> e' n \<inter> \<S> e' m = {}"
proof (clarify)
fix n m
assume "n \<in> set (stack e')" "m \<in> set (stack e')" "n \<noteq> m"
show "\<S> e' n \<inter> \<S> e' m = {}"
proof (cases "n = hd (stack e')")
case True
with \<open>m \<in> set (stack e')\<close> \<open>n \<noteq> m\<close> \<open>stack e' \<noteq> []\<close>
have "m \<in> set (tl (stack e'))"
by (metis hd_Cons_tl set_ConsD)
with True hd_cc tl_cc unite_S_tl[OF wf w]
show ?thesis
by (auto simp: e'_def)
next
case False
with \<open>n \<in> set (stack e')\<close> \<open>stack e' \<noteq> []\<close>
have "n \<in> set (tl (stack e'))"
by (metis hd_Cons_tl set_ConsD)
show ?thesis
proof (cases "m = hd (stack e')")
case True
with \<open>n \<in> set (tl (stack e'))\<close> hd_cc tl_cc unite_S_tl[OF wf w]
show ?thesis
by (auto simp: e'_def)
next
case False
with \<open>m \<in> set (stack e')\<close> \<open>stack e' \<noteq> []\<close>
have "m \<in> set (tl (stack e'))"
by (metis hd_Cons_tl set_ConsD)
with \<open>n \<in> set (tl (stack e'))\<close>
have "\<S> e' m = \<S> e m \<and> \<S> e' n = \<S> e n"
by (auto simp: e'_def unite_S_tl[OF wf w])
moreover
from \<open>m \<in> set (stack e')\<close> \<open>n \<in> set (stack e')\<close> \<open>stack e = pfx @ stack e'\<close>
have "m \<in> set (stack e) \<and> n \<in> set (stack e)"
by auto
ultimately show ?thesis
using wf \<open>n \<noteq> m\<close> by (auto simp: wf_env_def)
qed
qed
qed
moreover
{
from unite_S_equal[OF wf w]
have "\<Union> {\<S> e' n | n. n \<in> set (stack e')} = \<Union> {\<S> e n | n. n \<in> set (stack e)}"
by (simp add: e'_def)
with wf
have "\<Union> {\<S> e' n | n. n \<in> set (stack e')} = visited e - explored e"
by (simp add: wf_env_def)
}
hence "\<Union> {\<S> e' n | n. n \<in> set (stack e')} = visited e' - explored e'"
by (simp add: e'_def unite_def)
moreover
have "\<forall>n \<in> set (stack e'). \<forall>m \<in> \<S> e' n.
m \<in> set (cstack e') \<longrightarrow> m \<preceq> n in cstack e'"
proof (clarify)
fix n m
assume "n \<in> set (stack e')" "m \<in> \<S> e' n" "m \<in> set (cstack e')"
from \<open>m \<in> set (cstack e')\<close> have "m \<in> set (cstack e)"
by (simp add: e'_def unite_def)
have "m \<preceq> n in cstack e"
proof (cases "n = hd (stack e')")
case True
with \<open>m \<in> \<S> e' n\<close> have "m \<in> cc"
by (simp add: hd_cc)
then obtain l where
"l \<in> set pfx \<union> {hd (stack e')}" "m \<in> \<S> e l"
by (auto simp: cc_def)
with \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "l \<in> set (stack e)"
by auto
with \<open>m \<in> \<S> e l\<close> \<open>m \<in> set (cstack e)\<close> wf
have "m \<preceq> l in cstack e"
by (auto simp: wf_env_def)
moreover
from \<open>l \<in> set pfx \<union> {hd (stack e')}\<close> True
\<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "l \<preceq> n in stack e"
by (metis List.set_insert empty_set hd_Cons_tl insert_Nil set_append split_list_precedes)
with wf have "l \<preceq> n in cstack e"
by (auto simp: wf_env_def)
ultimately show ?thesis
using wf unfolding wf_env_def
by (meson precedes_trans)
next
case False
with \<open>n \<in> set (stack e')\<close> \<open>stack e' \<noteq> []\<close>
have "n \<in> set (tl (stack e'))"
by (metis list.collapse set_ConsD)
with unite_S_tl[OF wf w] \<open>m \<in> \<S> e' n\<close>
have "m \<in> \<S> e n"
by (simp add: e'_def)
with \<open>n \<in> set (stack e')\<close> \<open>stack e = pfx @ stack e'\<close>
\<open>m \<in> set (cstack e)\<close> wf
show ?thesis
by (auto simp: wf_env_def)
qed
thus "m \<preceq> n in cstack e'"
by (simp add: e'_def unite_def)
qed
moreover
have "\<forall>n m. n \<preceq> m in stack e' \<and> n \<noteq> m \<longrightarrow>
(\<forall>u \<in> \<S> e' n. \<not> reachable_avoiding u m (unvisited e' n))"
proof (clarify)
fix x y u
assume xy: "x \<preceq> y in stack e'" "x \<noteq> y"
and u: "u \<in> \<S> e' x" "reachable_avoiding u y (unvisited e' x)"
show "False"
proof (cases "x = hd (stack e')")
case True
hence "\<S> e' x = cc"
by (simp add: hd_cc)
with \<open>u \<in> \<S> e' x\<close> obtain x' where
x': "x' \<in> set pfx \<union> {hd (stack e')}" "u \<in> \<S> e x'"
by (auto simp: cc_def)
from \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "stack e = pfx @ (hd (stack e') # tl (stack e'))"
by auto
with x' True have "x' \<preceq> x in stack e"
by (simp add: split_list_precedes)
moreover
from xy \<open>stack e = pfx @ stack e'\<close> have "x \<preceq> y in stack e"
by (simp add: precedes_append_left)
ultimately have "x' \<preceq> y in stack e"
using wf by (auto simp: wf_env_def elim: precedes_trans)
from \<open>x' \<preceq> x in stack e\<close> \<open>x \<preceq> y in stack e\<close> wf \<open>x \<noteq> y\<close>
have "x' \<noteq> y"
by (auto simp: wf_env_def dest: precedes_antisym)
let ?unv = "\<Union> {unvisited e y | y. y \<in> set pfx \<union> {hd (stack e')}}"
from \<open>\<S> e' x = cc\<close> have "?unv = unvisited e' x"
by (auto simp: unvisited_def cc_def e'_def unite_def)
with \<open>reachable_avoiding u y (unvisited e' x)\<close>
have "reachable_avoiding u y ?unv"
by simp
with x' have "reachable_avoiding u y (unvisited e x')"
by (blast intro: ra_mono)
with \<open>x' \<preceq> y in stack e\<close> \<open>x' \<noteq> y\<close> \<open>u \<in> \<S> e x'\<close> wf
show ?thesis
by (auto simp: wf_env_def)
next
case False
with \<open>x \<preceq> y in stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "x \<in> set (tl (stack e'))"
by (metis list.exhaust_sel precedes_mem(1) set_ConsD)
with \<open>u \<in> \<S> e' x\<close> have "u \<in> \<S> e x"
by (auto simp add: unite_S_tl[OF wf w] e'_def)
moreover
from \<open>x \<preceq> y in stack e'\<close> \<open>stack e = pfx @ stack e'\<close>
have "x \<preceq> y in stack e"
by (simp add: precedes_append_left)
moreover
from unite_S_tl[OF wf w] \<open>x \<in> set (tl (stack e'))\<close>
have "unvisited e' x = unvisited e x"
by (auto simp: unvisited_def e'_def unite_def)
ultimately show ?thesis
using \<open>x \<noteq> y\<close> \<open>reachable_avoiding u y (unvisited e' x)\<close> wf
by (auto simp: wf_env_def)
qed
qed
moreover
have "\<forall>n. is_subscc (\<S> e' n)"
proof
fix n
show "is_subscc (\<S> e' n)"
proof (cases "n \<in> cc")
case True
hence "\<S> e' n = cc"
by (simp add: Se')
with unite_subscc[OF pre w] hd_cc
show ?thesis
by (auto simp: e'_def)
next
case False
with wf show ?thesis
by (simp add: Se' wf_env_def)
qed
qed
ultimately show ?thesis
unfolding wf_env_def by blast
qed
subsection \<open>Lemmas establishing the pre-conditions\<close>
text \<open>
The precondition of function @{text dfs} ensures the precondition
of @{text dfss} at the call of that function.
\<close>
lemma pre_dfs_pre_dfss:
assumes "pre_dfs v e"
shows "pre_dfss v (e\<lparr>visited := visited e \<union> {v},
stack := v # stack e,
cstack := v # cstack e\<rparr>)"
(is "pre_dfss v ?e'")
proof -
from assms have wf: "wf_env e"
by (simp add: pre_dfs_def)
from assms have v: "v \<notin> visited e"
by (simp add: pre_dfs_def)
from assms stack_visited[OF wf]
have "\<forall>n \<in> visited ?e'. reachable (root ?e') n"
"distinct (stack ?e')"
"distinct (cstack ?e')"
"explored ?e' \<subseteq> visited ?e'"
"set (cstack ?e') \<subseteq> visited ?e'"
"\<forall>n \<in> explored ?e'. \<forall>m. reachable n m \<longrightarrow> m \<in> explored ?e'"
"\<forall>n. vsuccs ?e' n \<subseteq> successors n"
"\<forall>n \<in> explored ?e'. vsuccs ?e' n = successors n"
"\<forall>n \<in> visited ?e' - set(cstack ?e'). vsuccs ?e' n = successors n"
"\<forall>n. n \<notin> visited ?e' \<longrightarrow> vsuccs ?e' n = {}"
"(\<forall>n m. m \<in> \<S> ?e' n \<longleftrightarrow> (\<S> ?e' n = \<S> ?e' m))"
"(\<forall>n. n \<notin> visited ?e' \<longrightarrow> \<S> ?e' n = {n})"
"\<forall>n. is_subscc (\<S> ?e' n)"
"\<forall>S \<in> sccs ?e'. is_scc S"
"\<Union> (sccs ?e') = explored ?e'"
by (auto simp: pre_dfs_def wf_env_def)
moreover
have "\<forall>n m. n \<preceq> m in stack ?e' \<longrightarrow> reachable m n"
proof (clarify)
fix x y
assume "x \<preceq> y in stack ?e'"
show "reachable y x"
proof (cases "x=v")
assume "x=v"
with \<open>x \<preceq> y in stack ?e'\<close> assms show ?thesis
apply (simp add: pre_dfs_def)
by (metis insert_iff list.simps(15) precedes_mem(2) reachable_refl)
next
assume "x \<noteq> v"
with \<open>x \<preceq> y in stack ?e'\<close> wf show ?thesis
by (simp add: pre_dfs_def wf_env_def precedes_in_tail)
qed
qed
moreover
from wf v have "\<forall>n. vsuccs ?e' n \<subseteq> visited ?e'"
by (auto simp: wf_env_def)
moreover
from wf v
have "(\<forall>n \<in> set (stack ?e'). \<forall> m \<in> set (stack ?e'). n \<noteq> m \<longrightarrow> \<S> ?e' n \<inter> \<S> ?e' m = {})"
apply (simp add: wf_env_def)
by (metis singletonD)
moreover
have "\<Union> {\<S> ?e' v | v . v \<in> set (stack ?e')} = visited ?e' - explored ?e'"
proof -
have "\<Union> {\<S> ?e' v | v . v \<in> set (stack ?e')} =
(\<Union> {\<S> e v | v . v \<in> set (stack e)}) \<union> \<S> e v"
by auto
also from wf v have "\<dots> = visited ?e' - explored ?e'"
by (auto simp: wf_env_def)
finally show ?thesis .
qed
moreover
have "\<forall>n m. n \<preceq> m in stack ?e' \<and> n \<noteq> m \<longrightarrow>
(\<forall>u \<in> \<S> ?e' n. \<not> reachable_avoiding u m (unvisited ?e' n))"
proof (clarify)
fix x y u
assume asm: "x \<preceq> y in stack ?e'" "x \<noteq> y" "u \<in> \<S> ?e' x"
"reachable_avoiding u y (unvisited ?e' x)"
show "False"
proof (cases "x = v")
case True
with wf v \<open>u \<in> \<S> ?e' x\<close> have "u = v" "vsuccs ?e' v = {}"
by (auto simp: wf_env_def)
with \<open>reachable_avoiding u y (unvisited ?e' x)\<close>[THEN reachable_avoiding.cases]
True \<open>x \<noteq> y\<close> wf
show ?thesis
by (auto simp: wf_env_def unvisited_def)
next
case False
with asm wf show ?thesis
by (auto simp: precedes_in_tail wf_env_def unvisited_def)
qed
qed
moreover
have "\<forall>n m. n \<preceq> m in stack ?e' \<longrightarrow> n \<preceq> m in cstack ?e'"
proof (clarsimp)
fix n m
assume "n \<preceq> m in (v # stack e)"
with assms show "n \<preceq> m in (v # cstack e)"
unfolding pre_dfs_def wf_env_def
by (metis head_precedes insertI1 list.simps(15) precedes_in_tail precedes_mem(2) precedes_refl)
qed
moreover
have "\<forall>n \<in> set (stack ?e'). \<forall>m \<in> \<S> ?e' n. m \<in> set (cstack ?e') \<longrightarrow> m \<preceq> n in cstack ?e'"
proof (clarify)
fix n m
assume "n \<in> set (stack ?e')" "m \<in> \<S> ?e' n" "m \<in> set (cstack ?e')"
show "m \<preceq> n in cstack ?e'"
proof (cases "n = v")
case True
with wf v \<open>m \<in> \<S> ?e' n\<close> show ?thesis
by (auto simp: wf_env_def)
next
case False
with \<open>n \<in> set (stack ?e')\<close> \<open>m \<in> \<S> ?e' n\<close>
have "n \<in> set (stack e)" "m \<in> \<S> e n"
by auto
with wf v False \<open>m \<in> \<S> e n\<close> \<open>m \<in> set (cstack ?e')\<close>
show ?thesis
apply (simp add: wf_env_def)
by (metis (mono_tags, lifting) precedes_in_tail singletonD)
qed
qed
ultimately have "wf_env ?e'"
unfolding wf_env_def by (meson le_inf_iff)
moreover
from assms
have "\<forall>w \<in> vsuccs ?e' v. w \<in> explored ?e' \<union> \<S> ?e' (hd (stack ?e'))"
by (auto simp: pre_dfs_def wf_env_def)
moreover
from \<open>\<forall>n m. n \<preceq> m in stack ?e' \<longrightarrow> reachable m n\<close>
have "\<forall>n \<in> set (stack ?e'). reachable n v"
by (simp add: head_precedes)
moreover
from wf v have "\<S> ?e' (hd (stack ?e')) = {v}"
by (simp add: pre_dfs_def wf_env_def)
ultimately show ?thesis
by (auto simp: pre_dfss_def)
qed
text \<open>
Similarly, we now show that the pre-conditions of the different
function calls in the body of function @{text dfss} are satisfied.
First, it is very easy to see that the pre-condition of @{text dfs}
holds at the call of that function.
\<close>
lemma pre_dfss_pre_dfs:
assumes "pre_dfss v e" and "w \<notin> visited e" and "w \<in> successors v"
shows "pre_dfs w e"
using assms unfolding pre_dfss_def pre_dfs_def wf_env_def
by (meson succ_reachable)
text \<open>
The pre-condition of @{text dfss} holds when the successor
considered in the current iteration has already been explored.
\<close>
lemma pre_dfss_explored_pre_dfss:
fixes e v w
defines "e'' \<equiv> e\<lparr>vsuccs := (\<lambda>x. if x=v then vsuccs e v \<union> {w} else vsuccs e x)\<rparr>"
assumes 1: "pre_dfss v e" and 2: "w \<in> successors v" and 3: "w \<in> explored e"
shows "pre_dfss v e''"
proof -
from 1 have v: "v \<in> visited e"
by (simp add: pre_dfss_def)
have "wf_env e''"
proof -
from 1 have wf: "wf_env e"
by (simp add: pre_dfss_def)
hence "\<forall>v \<in> visited e''. reachable (root e'') v"
"distinct (stack e'')"
"distinct (cstack e'')"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> n \<preceq> m in cstack e''"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> reachable m n"
"explored e'' \<subseteq> visited e''"
"set (cstack e'') \<subseteq> visited e''"
"\<forall>n \<in> explored e''. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e''"
"\<forall>n m. m \<in> \<S> e'' n \<longleftrightarrow> (\<S> e'' n = \<S> e'' m)"
"\<forall>n. n \<notin> visited e'' \<longrightarrow> \<S> e'' n = {n}"
"\<forall>n \<in> set (stack e''). \<forall> m \<in> set (stack e'').
n \<noteq> m \<longrightarrow> \<S> e'' n \<inter> \<S> e'' m = {}"
"\<Union> {\<S> e'' n | n. n \<in> set (stack e'')} = visited e'' - explored e''"
"\<forall>n \<in> set (stack e''). \<forall>m \<in> \<S> e'' n.
m \<in> set (cstack e'') \<longrightarrow> m \<preceq> n in cstack e''"
"\<forall>n. is_subscc (\<S> e'' n)"
"\<forall>S \<in> sccs e''. is_scc S"
"\<Union> (sccs e'') = explored e''"
by (auto simp: wf_env_def e''_def)
moreover
from wf 2 3 have "\<forall>v. vsuccs e'' v \<subseteq> successors v \<inter> visited e''"
by (auto simp: wf_env_def e''_def)
moreover
from wf v have "\<forall>n. n \<notin> visited e'' \<longrightarrow> vsuccs e'' n = {}"
by (auto simp: wf_env_def e''_def)
moreover
from wf 2
have "\<forall>v. v \<in> explored e'' \<longrightarrow> vsuccs e'' v = successors v"
by (auto simp: wf_env_def e''_def)
moreover
have "\<forall>x y. x \<preceq> y in stack e'' \<and> x \<noteq> y \<longrightarrow>
(\<forall>u \<in> \<S> e'' x. \<not> reachable_avoiding u y (unvisited e'' x))"
proof (clarify)
fix x y u
assume "x \<preceq> y in stack e''" "x \<noteq> y"
"u \<in> \<S> e'' x"
"reachable_avoiding u y (unvisited e'' x)"
hence prec: "x \<preceq> y in stack e" "u \<in> \<S> e x"
by (auto simp: e''_def)
with stack_unexplored[OF wf] have "y \<notin> explored e"
by (blast dest: precedes_mem)
have "(unvisited e x = unvisited e'' x)
\<or> (unvisited e x = unvisited e'' x \<union> {(v,w)})"
by (auto simp: e''_def unvisited_def split: if_splits)
thus "False"
proof
assume "unvisited e x = unvisited e'' x"
with prec \<open>x \<noteq> y\<close> \<open>reachable_avoiding u y (unvisited e'' x)\<close> wf
show ?thesis
unfolding wf_env_def by metis
next
assume "unvisited e x = unvisited e'' x \<union> {(v,w)}"
with wf \<open>reachable_avoiding u y (unvisited e'' x)\<close>
\<open>y \<notin> explored e\<close> \<open>w \<in> explored e\<close> prec \<open>x \<noteq> y\<close>
show ?thesis
using avoiding_explored[OF wf] unfolding wf_env_def
by (metis (no_types, lifting))
qed
qed
moreover
from wf 2
have "\<forall>n \<in> visited e'' - set (cstack e''). vsuccs e'' n = successors n"
by (auto simp: e''_def wf_env_def)
ultimately show ?thesis
unfolding wf_env_def by meson
qed
with 1 3 show ?thesis
by (auto simp: pre_dfss_def e''_def)
qed
text \<open>
The call to @{text dfs} establishes the pre-condition for the
recursive call to @{text dfss} in the body of @{text dfss}.
\<close>
lemma pre_dfss_post_dfs_pre_dfss:
fixes e v w
defines "e' \<equiv> dfs w e"
defines "e'' \<equiv> e'\<lparr>vsuccs := (\<lambda>x. if x=v then vsuccs e' v \<union> {w} else vsuccs e' x)\<rparr>"
assumes pre: "pre_dfss v e"
and w: "w \<in> successors v" "w \<notin> visited e"
and post: "post_dfs w e e'"
shows "pre_dfss v e''"
proof -
from pre
have "wf_env e" "v \<in> visited e" "stack e \<noteq> []" "v \<in> \<S> e (hd (stack e))"
by (auto simp: pre_dfss_def)
with post have "stack e' \<noteq> []" "v \<in> \<S> e' (hd (stack e'))"
by (auto dest: dfs_S_hd_stack)
from post have "w \<in> visited e'"
by (simp add: post_dfs_def)
have "wf_env e''"
proof -
from post have wf': "wf_env e'"
by (simp add: post_dfs_def)
hence "\<forall>n \<in> visited e''. reachable (root e'') n"
"distinct (stack e'')"
"distinct (cstack e'')"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> n \<preceq> m in cstack e''"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> reachable m n"
"explored e'' \<subseteq> visited e''"
"set (cstack e'') \<subseteq> visited e''"
"\<forall>n \<in> explored e''. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e''"
"\<forall>n m. m \<in> \<S> e'' n \<longleftrightarrow> (\<S> e'' n = \<S> e'' m)"
"\<forall>n. n \<notin> visited e'' \<longrightarrow> \<S> e'' n = {n}"
"\<forall>n \<in> set (stack e''). \<forall> m \<in> set (stack e'').
n \<noteq> m \<longrightarrow> \<S> e'' n \<inter> \<S> e'' m = {}"
"\<Union> {\<S> e'' n | n. n \<in> set (stack e'')} = visited e'' - explored e''"
"\<forall>n \<in> set (stack e''). \<forall> m \<in> \<S> e'' n. m \<in> set (cstack e'') \<longrightarrow> m \<preceq> n in cstack e''"
"\<forall>n. is_subscc (\<S> e'' n)"
"\<forall>S \<in> sccs e''. is_scc S"
"\<Union> (sccs e'') = explored e''"
by (auto simp: wf_env_def e''_def)
moreover
from wf' w have "\<forall>n. vsuccs e'' n \<subseteq> successors n"
by (auto simp: wf_env_def e''_def)
moreover
from wf' \<open>w \<in> visited e'\<close> have "\<forall>n. vsuccs e'' n \<subseteq> visited e''"
by (auto simp: wf_env_def e''_def)
moreover
from post \<open>v \<in> visited e\<close>
have "\<forall>n. n \<notin> visited e'' \<longrightarrow> vsuccs e'' n = {}"
apply (simp add: post_dfs_def wf_env_def sub_env_def e''_def)
by (meson subsetD)
moreover
from wf' w
have "\<forall>n \<in> explored e''. vsuccs e'' n = successors n"
by (auto simp: wf_env_def e''_def)
moreover
have "\<forall>n m. n \<preceq> m in stack e'' \<and> n \<noteq> m \<longrightarrow>
(\<forall>u \<in> \<S> e'' n. \<not> reachable_avoiding u m (unvisited e'' n))"
proof (clarify)
fix x y u
assume "x \<preceq> y in stack e''" "x \<noteq> y"
"u \<in> \<S> e'' x"
"reachable_avoiding u y (unvisited e'' x)"
hence 1: "x \<preceq> y in stack e'" "u \<in> \<S> e' x"
by (auto simp: e''_def)
with stack_unexplored[OF wf'] have "y \<notin> explored e'"
by (auto dest: precedes_mem)
have "(unvisited e' x = unvisited e'' x)
\<or> (unvisited e' x = unvisited e'' x \<union> {(v,w)})"
by (auto simp: e''_def unvisited_def split: if_splits)
thus "False"
proof
assume "unvisited e' x = unvisited e'' x"
with 1 \<open>x \<noteq> y\<close> \<open>reachable_avoiding u y (unvisited e'' x)\<close> wf'
show ?thesis
unfolding wf_env_def by metis
next
assume unv: "unvisited e' x = unvisited e'' x \<union> {(v,w)}"
from post
have "w \<in> explored e'
\<or> (w \<in> \<S> e' (hd (stack e')) \<and> (\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n))"
by (auto simp: post_dfs_def)
thus ?thesis
proof
assume "w \<in> explored e'"
with wf' unv \<open>reachable_avoiding u y (unvisited e'' x)\<close>
\<open>y \<notin> explored e'\<close> 1 \<open>x \<noteq> y\<close>
show ?thesis
using avoiding_explored[OF wf'] unfolding wf_env_def
by (metis (no_types, lifting))
next
assume w: "w \<in> \<S> e' (hd (stack e'))
\<and> (\<forall>n \<in> set (tl (stack e')). \<S> e' n = \<S> e n)"
from \<open>reachable_avoiding u y (unvisited e'' x)\<close>[THEN ra_add_edge]
unv
have "reachable_avoiding u y (unvisited e' x)
\<or> reachable_avoiding w y (unvisited e' x)"
by auto
thus ?thesis
proof
assume "reachable_avoiding u y (unvisited e' x)"
with \<open>x \<preceq> y in stack e''\<close> \<open>x \<noteq> y\<close> \<open>u \<in> \<S> e'' x\<close> wf'
show ?thesis
by (auto simp: e''_def wf_env_def)
next
assume "reachable_avoiding w y (unvisited e' x)"
from unv have "v \<in> \<S> e' x"
by (auto simp: unvisited_def)
from \<open>x \<preceq> y in stack e''\<close> have "x \<in> set (stack e')"
by (simp add: e''_def precedes_mem)
have "x = hd (stack e')"
proof (rule ccontr)
assume "x \<noteq> hd (stack e')"
with \<open>x \<in> set (stack e')\<close> \<open>stack e' \<noteq> []\<close>
have "x \<in> set (tl (stack e'))"
by (metis hd_Cons_tl set_ConsD)
with w \<open>v \<in> \<S> e' x\<close> have "v \<in> \<S> e x"
by auto
moreover
from post \<open>stack e' \<noteq> []\<close> \<open>x \<in> set (stack e')\<close> \<open>x \<in> set (tl (stack e'))\<close>
have "x \<in> set (tl (stack e))"
unfolding post_dfs_def sub_env_def
by (metis Un_iff self_append_conv2 set_append tl_append2)
moreover
from pre have "wf_env e" "stack e \<noteq> []" "v \<in> \<S> e (hd (stack e))"
by (auto simp: pre_dfss_def)
ultimately show "False"
unfolding wf_env_def
by (metis (no_types, lifting) distinct.simps(2) hd_Cons_tl insert_disjoint(2)
list.set_sel(1) list.set_sel(2) mk_disjoint_insert)
qed
with \<open>reachable_avoiding w y (unvisited e' x)\<close>
\<open>x \<preceq> y in stack e''\<close> \<open>x \<noteq> y\<close> w wf'
show ?thesis
by (auto simp add: e''_def wf_env_def)
qed
qed
qed
qed
moreover
from wf' \<open>\<forall>n. vsuccs e'' n \<subseteq> successors n\<close>
have "\<forall>n \<in> visited e'' - set (cstack e''). vsuccs e'' n = successors n"
by (auto simp: wf_env_def e''_def split: if_splits)
ultimately show ?thesis
unfolding wf_env_def by (meson le_inf_iff)
qed
show "pre_dfss v e''"
proof -
from pre post
have "v \<in> visited e''"
by (auto simp: pre_dfss_def post_dfs_def sub_env_def e''_def)
moreover
{
fix u
assume u: "u \<in> vsuccs e'' v"
have "u \<in> explored e'' \<union> \<S> e'' (hd (stack e''))"
proof (cases "u = w")
case True
with post show ?thesis
by (auto simp: post_dfs_def e''_def)
next
case False
with u pre post
have "u \<in> explored e \<union> \<S> e (hd (stack e))"
by (auto simp: pre_dfss_def post_dfs_def e''_def)
then show ?thesis
proof
assume "u \<in> explored e"
with post show ?thesis
by (auto simp: post_dfs_def sub_env_def e''_def)
next
assume "u \<in> \<S> e (hd (stack e))"
with \<open>wf_env e\<close> post \<open>stack e \<noteq> []\<close>
show ?thesis
by (auto simp: e''_def dest: dfs_S_hd_stack)
qed
qed
}
moreover
from pre post
have "\<forall>n \<in> set (stack e''). reachable n v"
unfolding pre_dfss_def post_dfs_def sub_env_def
using e''_def by force
moreover
from \<open>stack e' \<noteq> []\<close> have "stack e'' \<noteq> []"
by (simp add: e''_def)
moreover
from \<open>v \<in> \<S> e' (hd (stack e'))\<close> have "v \<in> \<S> e'' (hd (stack e''))"
by (simp add: e''_def)
moreover
from pre post have "\<exists>ns. cstack e'' = v # ns"
by (auto simp: pre_dfss_def post_dfs_def sub_env_def e''_def)
ultimately show ?thesis
using \<open>wf_env e''\<close> unfolding pre_dfss_def by blast
qed
qed
text \<open>
Finally, the pre-condition for the recursive call to @{text dfss}
at the end of the body of function @{text dfss} also holds if
@{text unite} was applied.
\<close>
lemma pre_dfss_unite_pre_dfss:
fixes e v w
defines "e' \<equiv> unite v w e"
defines "e'' \<equiv> e'\<lparr>vsuccs := (\<lambda>x. if x=v then vsuccs e' v \<union> {w} else vsuccs e' x)\<rparr>"
assumes pre: "pre_dfss v e"
and w: "w \<in> successors v" "w \<notin> vsuccs e v" "w \<in> visited e" "w \<notin> explored e"
shows "pre_dfss v e''"
proof -
from pre have wf: "wf_env e"
by (simp add: pre_dfss_def)
from pre have "v \<in> visited e"
by (simp add: pre_dfss_def)
from pre w have "v \<notin> explored e"
unfolding pre_dfss_def wf_env_def
by (meson reachable_edge)
from unite_stack[OF wf w] obtain pfx where
pfx: "stack e = pfx @ stack e'" "stack e' \<noteq> []"
"let cc = \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}
in \<S> e' = (\<lambda>x. if x \<in> cc then cc else \<S> e x)"
"w \<in> \<S> e' (hd (stack e'))"
by (auto simp: e'_def)
define cc where "cc \<equiv> \<Union> {\<S> e n |n. n \<in> set pfx \<union> {hd (stack e')}}"
from unite_wf_env[OF pre w] have wf': "wf_env e'"
by (simp add: e'_def)
from \<open>stack e = pfx @ stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "hd (stack e) \<in> set pfx \<union> {hd (stack e')}"
by (simp add: hd_append)
with pre have "v \<in> cc"
by (auto simp: pre_dfss_def cc_def)
from S_reflexive[OF wf, of "hd (stack e')"]
have "hd (stack e') \<in> cc"
by (auto simp: cc_def)
with pfx \<open>v \<in> cc\<close> have "v \<in> \<S> e' (hd (stack e'))"
by (auto simp: cc_def)
from unite_sub_env[OF pre w] have "sub_env e e'"
by (simp add: e'_def)
have "wf_env e''"
proof -
from wf'
have "\<forall>n \<in> visited e''. reachable (root e'') n"
"distinct (stack e'')"
"distinct (cstack e'')"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> n \<preceq> m in cstack e''"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> reachable m n"
"explored e'' \<subseteq> visited e''"
"set (cstack e'') \<subseteq> visited e''"
"\<forall>n \<in> explored e''. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e''"
"\<forall>n m. m \<in> \<S> e'' n \<longleftrightarrow> (\<S> e'' n = \<S> e'' m)"
"\<forall>n. n \<notin> visited e'' \<longrightarrow> \<S> e'' n = {n}"
"\<forall>n \<in> set (stack e''). \<forall>m \<in> set (stack e'').
n \<noteq> m \<longrightarrow> \<S> e'' n \<inter> \<S> e'' m = {}"
"\<Union> {\<S> e'' n | n. n \<in> set (stack e'')} = visited e'' - explored e''"
"\<forall>n \<in> set (stack e''). \<forall>m \<in> \<S> e'' n.
m \<in> set (cstack e'') \<longrightarrow> m \<preceq> n in cstack e''"
"\<forall>n. is_subscc (\<S> e'' n)"
"\<forall>S \<in> sccs e''. is_scc S"
"\<Union> (sccs e'') = explored e''"
by (auto simp: wf_env_def e''_def)
moreover
from wf' w \<open>sub_env e e'\<close>
have "\<forall>n. vsuccs e'' n \<subseteq> successors n \<inter> visited e''"
by (auto simp: wf_env_def sub_env_def e''_def)
moreover
from wf' \<open>v \<in> visited e\<close> \<open>sub_env e e'\<close>
have "\<forall>n. n \<notin> visited e'' \<longrightarrow> vsuccs e'' n = {}"
by (auto simp: wf_env_def sub_env_def e''_def)
moreover
from wf' \<open>v \<notin> explored e\<close>
have "\<forall>n \<in> explored e''. vsuccs e'' n = successors n"
by (auto simp: wf_env_def e''_def e'_def unite_def)
moreover
from wf' \<open>w \<in> successors v\<close>
have "\<forall>n \<in> visited e'' - set (cstack e''). vsuccs e'' n = successors n"
by (auto simp: wf_env_def e''_def e'_def unite_def)
moreover
have "\<forall>x y. x \<preceq> y in stack e'' \<and> x \<noteq> y \<longrightarrow>
(\<forall>u \<in> \<S> e'' x. \<not> reachable_avoiding u y (unvisited e'' x))"
proof (clarify)
fix x y u
assume xy: "x \<preceq> y in stack e''" "x \<noteq> y"
and u: "u \<in> \<S> e'' x" "reachable_avoiding u y (unvisited e'' x)"
hence prec: "x \<preceq> y in stack e'" "u \<in> \<S> e' x"
by (simp add: e''_def)+
show "False"
proof (cases "x = hd (stack e')")
case True
with \<open>v \<in> \<S> e' (hd (stack e'))\<close>
have "unvisited e' x = unvisited e'' x
\<or> (unvisited e' x = unvisited e'' x \<union> {(v,w)})"
by (auto simp: e''_def unvisited_def split: if_splits)
thus "False"
proof
assume "unvisited e' x = unvisited e'' x"
with prec \<open>x \<noteq> y\<close> \<open>reachable_avoiding u y (unvisited e'' x)\<close> wf'
show ?thesis
unfolding wf_env_def by metis
next
assume "unvisited e' x = unvisited e'' x \<union> {(v,w)}"
with \<open>reachable_avoiding u y (unvisited e'' x)\<close>[THEN ra_add_edge]
have "reachable_avoiding u y (unvisited e' x)
\<or> reachable_avoiding w y (unvisited e' x)"
by auto
thus ?thesis
proof
assume "reachable_avoiding u y (unvisited e' x)"
with prec \<open>x \<noteq> y\<close> wf' show ?thesis
by (auto simp: wf_env_def)
next
assume "reachable_avoiding w y (unvisited e' x)"
with \<open>x = hd (stack e')\<close> \<open>w \<in> \<S> e' (hd (stack e'))\<close>
\<open>x \<preceq> y in stack e'\<close> \<open>x \<noteq> y\<close> wf'
show ?thesis
by (auto simp: wf_env_def)
qed
qed
next
case False
with \<open>x \<preceq> y in stack e'\<close> \<open>stack e' \<noteq> []\<close>
have "x \<in> set (tl (stack e'))"
by (metis list.exhaust_sel precedes_mem(1) set_ConsD)
with unite_S_tl[OF wf w] \<open>u \<in> \<S> e' x\<close>
have "u \<in> \<S> e x"
by (simp add: e'_def)
moreover
from \<open>x \<preceq> y in stack e'\<close> \<open>stack e = pfx @ stack e'\<close>
have "x \<preceq> y in stack e"
by (simp add: precedes_append_left)
moreover
from \<open>v \<in> \<S> e' (hd (stack e'))\<close> \<open>x \<in> set (tl (stack e'))\<close>
\<open>stack e' \<noteq> []\<close> wf'
have "v \<notin> \<S> e' x"
unfolding wf_env_def
by (metis (no_types, lifting) Diff_cancel Diff_triv distinct.simps(2) insert_not_empty
list.exhaust_sel list.set_sel(1) list.set_sel(2) mk_disjoint_insert)
hence "unvisited e'' x = unvisited e' x"
by (auto simp: unvisited_def e''_def split: if_splits)
moreover
from \<open>x \<in> set (tl (stack e'))\<close> unite_S_tl[OF wf w]
have "unvisited e' x = unvisited e x"
by (simp add: unvisited_def e'_def unite_def)
ultimately show ?thesis
using \<open>x \<noteq> y\<close> \<open>reachable_avoiding u y (unvisited e'' x)\<close> wf
by (auto simp: wf_env_def)
qed
qed
ultimately show ?thesis
unfolding wf_env_def by meson
qed
show "pre_dfss v e''"
proof -
from pre have "v \<in> visited e''"
by (simp add: pre_dfss_def e''_def e'_def unite_def)
moreover
{
fix u
assume u: "u \<in> vsuccs e'' v"
have "u \<in> explored e'' \<union> \<S> e'' (hd (stack e''))"
proof (cases "u = w")
case True
with \<open>w \<in> \<S> e' (hd (stack e'))\<close> show ?thesis
by (simp add: e''_def)
next
case False
with u have "u \<in> vsuccs e v"
by (simp add: e''_def e'_def unite_def)
with pre have "u \<in> explored e \<union> \<S> e (hd (stack e))"
by (auto simp: pre_dfss_def)
then show ?thesis
proof
assume "u \<in> explored e"
thus ?thesis
by (simp add: e''_def e'_def unite_def)
next
assume "u \<in> \<S> e (hd (stack e))"
with \<open>hd (stack e) \<in> set pfx \<union> {hd (stack e')}\<close>
have "u \<in> cc"
by (auto simp: cc_def)
moreover
from S_reflexive[OF wf, of "hd (stack e')"] pfx
have "\<S> e' (hd (stack e')) = cc"
by (auto simp: cc_def)
ultimately show ?thesis
by (simp add: e''_def)
qed
qed
}
hence "\<forall>w \<in> vsuccs e'' v. w \<in> explored e'' \<union> \<S> e'' (hd (stack e''))"
by blast
moreover
from pre \<open>stack e = pfx @ stack e'\<close>
have "\<forall>n \<in> set (stack e''). reachable n v"
by (auto simp: pre_dfss_def e''_def)
moreover
from \<open>stack e' \<noteq> []\<close> have "stack e'' \<noteq> []"
by (simp add: e''_def)
moreover
from \<open>v \<in> \<S> e' (hd (stack e'))\<close> have "v \<in> \<S> e'' (hd (stack e''))"
by (simp add: e''_def)
moreover
from pre have "\<exists>ns. cstack e'' = v # ns"
by (auto simp: pre_dfss_def e''_def e'_def unite_def)
ultimately show ?thesis
using \<open>wf_env e''\<close> unfolding pre_dfss_def by blast
qed
qed
subsection \<open>Lemmas establishing the post-conditions\<close>
text \<open>
Assuming the pre-condition of function @{text dfs} and the post-condition of
the call to @{text dfss} in the body of that function, the post-condition of
@{text dfs} is established.
\<close>
lemma pre_dfs_implies_post_dfs:
fixes v e
defines "e1 \<equiv> e\<lparr>visited := visited e \<union> {v},
stack := (v # stack e),
cstack:=(v # cstack e)\<rparr>"
defines "e' \<equiv> dfss v e1"
defines "e'' \<equiv> e'\<lparr> cstack := tl(cstack e')\<rparr>"
assumes 1: "pre_dfs v e"
and 2: "dfs_dfss_dom (Inl(v, e))"
and 3: "post_dfss v e1 e'"
shows "post_dfs v e (dfs v e)"
proof -
from 1 have wf: "wf_env e"
by (simp add: pre_dfs_def)
from 1 have v: "v \<notin> visited e"
by (simp add: pre_dfs_def)
from 3 have wf': "wf_env e'"
by (simp add: post_dfss_def)
from 3 have cst': "cstack e' = v # cstack e"
by (simp add: post_dfss_def sub_env_def e1_def)
show ?thesis
proof (cases "v = hd(stack e')")
case True
have notempty: "stack e' = v # stack e"
proof -
from 3 obtain ns where
ns: "stack e1 = ns @ (stack e')" "stack e' \<noteq> []"
by (auto simp: post_dfss_def sub_env_def)
have "ns = []"
proof (rule ccontr)
assume "ns \<noteq> []"
with ns have "hd ns = v"
apply (simp add: e1_def)
by (metis hd_append2 list.sel(1))
with True ns \<open>ns \<noteq> []\<close> have "\<not> distinct (stack e1)"
by (metis disjoint_iff_not_equal distinct_append hd_in_set)
with wf v stack_visited[OF wf] show False
by (auto simp: wf_env_def e1_def)
qed
with ns show ?thesis
by (simp add: e1_def)
qed
have e2: "dfs v e = e'\<lparr>sccs := sccs e' \<union> {\<S> e' v},
explored := explored e' \<union> (\<S> e' v),
stack := tl (stack e'),
cstack := tl (cstack e')\<rparr>" (is "_ = ?e2")
using True 2 dfs.psimps[of v e] unfolding e1_def e'_def
by (fastforce simp: e1_def e'_def)
from notempty have stack2: "stack ?e2 = stack e"
by (simp add: e1_def)
moreover from 3 have "v \<in> visited ?e2"
by (auto simp: post_dfss_def sub_env_def e1_def)
moreover
from 3 notempty have subenv: "sub_env e ?e2"
by (auto simp: post_dfss_def sub_env_def e1_def)
moreover have "wf_env ?e2"
proof -
from wf'
have "\<forall>n \<in> visited ?e2. reachable (root ?e2) n"
"distinct (stack ?e2)"
"\<forall>n. vsuccs ?e2 n \<subseteq> successors n \<inter> visited ?e2"
"\<forall>n. n \<notin> visited ?e2 \<longrightarrow> vsuccs ?e2 n = {}"
"\<forall>n m. m \<in> \<S> ?e2 n \<longleftrightarrow> (\<S> ?e2 n = \<S> ?e2 m)"
"\<forall>n. n \<notin> visited ?e2 \<longrightarrow> \<S> ?e2 n = {n}"
"\<forall>n. is_subscc (\<S> ?e2 n)"
"\<Union> (sccs ?e2) = explored ?e2"
by (auto simp: wf_env_def distinct_tl)
moreover
from 1 cst' have "distinct (cstack ?e2)"
by (auto simp: pre_dfs_def wf_env_def)
moreover
from 1 stack2 have "\<forall>n m. n \<preceq> m in stack ?e2 \<longrightarrow> reachable m n"
by (auto simp: pre_dfs_def wf_env_def)
moreover
from 1 stack2 cst'
have "\<forall>n m. n \<preceq> m in stack ?e2 \<longrightarrow> n \<preceq> m in cstack ?e2"
by (auto simp: pre_dfs_def wf_env_def)
moreover
from notempty wf' have "explored ?e2 \<subseteq> visited ?e2"
apply (simp add: wf_env_def)
using stack_class[OF wf']
by (smt (verit, del_insts) Diff_iff insert_subset list.simps(15) subset_eq)
moreover
from 3 cst' have "set (cstack ?e2) \<subseteq> visited ?e2"
by (simp add: post_dfss_def wf_env_def e1_def)
moreover
{
fix u
assume "u \<in> explored ?e2"
have "vsuccs ?e2 u = successors u"
proof (cases "u \<in> explored e'")
case True
with wf' show ?thesis
by (auto simp: wf_env_def)
next
case False
with \<open>u \<in> explored ?e2\<close> have "u \<in> \<S> e' v"
by simp
show ?thesis
proof (cases "u = v")
case True
with 3 show ?thesis
by (auto simp: post_dfss_def)
next
case False
have "u \<in> visited e' - set (cstack e')"
proof
from notempty \<open>u \<in> \<S> e' v\<close> stack_class[OF wf'] False
show "u \<in> visited e'"
by auto
next
show "u \<notin> set (cstack e')"
proof
assume u: "u \<in> set (cstack e')"
with notempty \<open>u \<in> \<S> e' v\<close> \<open>wf_env e'\<close> have "u \<preceq> v in cstack e'"
by (auto simp: wf_env_def)
with cst' u False wf' show "False"
unfolding wf_env_def
by (metis head_precedes precedes_antisym)
qed
qed
with 3 show ?thesis
by (auto simp: post_dfss_def wf_env_def)
qed
qed
}
note explored_vsuccs = this
moreover have "\<forall>n \<in> explored ?e2. \<forall>m. reachable n m \<longrightarrow> m \<in> explored ?e2"
proof (clarify)
fix x y
assume asm: "x \<in> explored ?e2" "reachable x y"
show "y \<in> explored ?e2"
proof (cases "x \<in> explored e'")
case True
with \<open>wf_env e'\<close> \<open>reachable x y\<close> show ?thesis
by (simp add: wf_env_def)
next
case False
with asm have "x \<in> \<S> e' v"
by simp
with \<open>explored ?e2 \<subseteq> visited ?e2\<close> have "x \<in> visited e'"
by auto
from \<open>x \<in> \<S> e' v\<close> wf' have "reachable v x"
by (auto simp: wf_env_def is_subscc_def)
have "y \<in> visited e'"
proof (rule ccontr)
assume "y \<notin> visited e'"
with reachable_visited[OF wf' \<open>x \<in> visited e'\<close> \<open>reachable x y\<close>]
obtain n m where
"n \<in> visited e'" "m \<in> successors n - vsuccs e' n"
"reachable x n" "reachable m y"
by blast
from wf' \<open>m \<in> successors n - vsuccs e' n\<close>
have "n \<notin> explored e'"
by (auto simp: wf_env_def)
obtain n' where
"n' \<in> set (stack e')" "n \<in> \<S> e' n'"
by (rule visited_unexplored[OF wf' \<open>n \<in> visited e'\<close> \<open>n \<notin> explored e'\<close>])
have "n' = v"
proof (rule ccontr)
assume "n' \<noteq> v"
with \<open>n' \<in> set (stack e')\<close> \<open>v = hd (stack e')\<close>
have "n' \<in> set (tl (stack e'))"
by (metis emptyE hd_Cons_tl set_ConsD set_empty)
moreover
from \<open>n \<in> \<S> e' n'\<close> \<open>wf_env e'\<close> have "reachable n n'"
by (auto simp: wf_env_def is_subscc_def)
with \<open>reachable v x\<close> \<open>reachable x n\<close> reachable_trans
have "reachable v n'"
by blast
ultimately show "False"
using 3 \<open>v = hd (stack e')\<close>
by (auto simp: post_dfss_def)
qed
with \<open>n \<in> \<S> e' n'\<close> \<open>m \<in> successors n - vsuccs e' n\<close> explored_vsuccs
show "False"
by auto
qed
show ?thesis
proof (cases "y \<in> explored e'")
case True
then show ?thesis
by simp
next
case False
obtain n where ndef: "n \<in> set (stack e')" "(y \<in> \<S> e' n)"
by (rule visited_unexplored[OF wf' \<open>y \<in> visited e'\<close> False])
show ?thesis
proof (cases "n = v")
case True
with ndef show ?thesis by simp
next
case False
with ndef notempty have "n \<in> set (tl (stack e'))"
by simp
moreover
from wf' ndef have "reachable y n"
by (auto simp: wf_env_def is_subscc_def)
with \<open>reachable v x\<close> \<open>reachable x y\<close>
have "reachable v n"
by (meson reachable_trans)
ultimately show ?thesis
using \<open>v = hd (stack e')\<close> 3
by (simp add: post_dfss_def)
qed
qed
qed
qed
moreover
from 3 cst'
have "\<forall>n \<in> visited ?e2 - set (cstack ?e2). vsuccs ?e2 n = successors n"
apply (simp add: post_dfss_def wf_env_def)
by (metis (no_types, opaque_lifting) Diff_iff list.simps(15) set_ConsD)
moreover
from wf' notempty
have "\<forall>n m. n \<in> set (stack ?e2) \<and> m \<in> set (stack ?e2) \<and> n \<noteq> m
\<longrightarrow> (\<S> ?e2 n \<inter> \<S> ?e2 m = {})"
by (simp add: wf_env_def)
moreover
have "\<Union> {\<S> ?e2 n | n . n \<in> set (stack ?e2)} = visited ?e2 - explored ?e2"
proof -
from wf' notempty
have "(\<Union> {\<S> ?e2 n | n . n \<in> set (stack ?e2)}) \<inter> \<S> e' v = {}"
by (auto simp: wf_env_def)
with notempty
have "\<Union> {\<S> ?e2 n | n . n \<in> set (stack ?e2)} =
(\<Union> {\<S> e' n | n . n \<in> set (stack e')}) - \<S> e' v"
by auto
also from wf'
have "\<dots> = (visited e' - explored e') - \<S> e' v"
by (simp add: wf_env_def)
finally show ?thesis
by auto
qed
moreover
have "\<forall>n \<in> set (stack ?e2). \<forall>m \<in> \<S> ?e2 n. m \<in> set (cstack ?e2) \<longrightarrow> m \<preceq> n in cstack ?e2"
proof (clarsimp simp: cst')
fix n m
assume "n \<in> set (tl (stack e'))"
"m \<in> \<S> e' n" "m \<in> set (cstack e)"
with 3 have "m \<in> \<S> e n"
by (auto simp: post_dfss_def e1_def)
with wf notempty \<open>n \<in> set (tl (stack e'))\<close> \<open>m \<in> set (cstack e)\<close>
show "m \<preceq> n in cstack e"
by (auto simp: wf_env_def)
qed
moreover
{
fix x y u
assume xy: "x \<preceq> y in stack ?e2" "x \<noteq> y"
and u: "u \<in> \<S> ?e2 x" "reachable_avoiding u y (unvisited ?e2 x)"
from xy notempty stack2
have "x \<preceq> y in stack e'"
by (metis head_precedes insert_iff list.simps(15) precedes_in_tail precedes_mem(2))
with wf' \<open>x \<noteq> y\<close> u have "False"
by (auto simp: wf_env_def unvisited_def)
}
moreover have "\<forall>S \<in> sccs ?e2. is_scc S"
proof (clarify)
fix S
assume asm: "S \<in> sccs ?e2"
show "is_scc S"
proof (cases "S = \<S> e' v")
case True
with S_reflexive[OF wf'] have "S \<noteq> {}"
by blast
from wf' True have subscc: "is_subscc S"
by (simp add: wf_env_def)
{
assume "\<not> is_scc S"
with \<open>S \<noteq> {}\<close> \<open>is_subscc S\<close> obtain S' where
S'_def: "S' \<noteq> S" "S \<subseteq> S'" "is_subscc S'"
unfolding is_scc_def by blast
then obtain x where "x \<in> S' \<and> x \<notin> S"
by blast
with True S'_def wf'
have xv: "reachable v x \<and> reachable x v"
unfolding wf_env_def is_subscc_def by (metis in_mono)
from \<open>\<forall>v w. w \<in> \<S> ?e2 v \<longleftrightarrow> (\<S> ?e2 v = \<S> ?e2 w)\<close>
have "v \<in> explored ?e2"
by auto
with \<open>\<forall>x \<in> explored ?e2. \<forall>y. reachable x y \<longrightarrow> y \<in> explored ?e2\<close>
xv \<open>S = \<S> e' v\<close> \<open>x \<in> S' \<and> x \<notin> S\<close>
have "x \<in> explored e'"
by auto
with wf' xv have "v \<in> explored e'"
by (auto simp: wf_env_def)
with notempty have "False"
by (auto intro: stack_unexplored[OF wf'])
}
then show ?thesis
by blast
next
case False
with asm wf' show ?thesis
by (auto simp: wf_env_def)
qed
qed
ultimately show ?thesis
unfolding wf_env_def by meson
qed
moreover
from \<open>wf_env ?e2\<close> have "v \<in> explored ?e2"
by (auto simp: wf_env_def)
moreover
from 3 have "vsuccs ?e2 v = successors v"
by (simp add: post_dfss_def)
moreover
from 1 3 have "\<forall>w \<in> visited e. vsuccs ?e2 w = vsuccs e w"
by (auto simp: pre_dfs_def post_dfss_def e1_def)
moreover
from 3 have "\<forall>n \<in> set (stack ?e2). \<S> ?e2 n = \<S> e n"
by (auto simp: post_dfss_def e1_def)
ultimately show ?thesis
unfolding post_dfs_def using e2 by simp
next
case False
with 2 have e': "dfs v e = e''"
by (simp add: dfs.psimps e''_def e'_def e1_def)
moreover have "wf_env e''"
proof -
from wf'
have "\<forall>n \<in> visited e''. reachable (root e'') n"
"distinct (stack e'')"
"distinct (cstack e'')"
"\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> reachable m n"
"explored e'' \<subseteq> visited e''"
"\<forall>n \<in> explored e''. \<forall>m. reachable n m \<longrightarrow> m \<in> explored e''"
"\<forall>n. vsuccs e'' n \<subseteq> successors n \<inter> visited e''"
"\<forall>n. n \<notin> visited e'' \<longrightarrow> vsuccs e'' n = {}"
"\<forall>n \<in> explored e''. vsuccs e'' n = successors n"
"\<forall>n m. m \<in> \<S> e'' n \<longleftrightarrow> (\<S> e'' n = \<S> e'' m)"
"\<forall>n. n \<notin> visited e'' \<longrightarrow> \<S> e'' n = {n}"
"\<forall>n \<in> set (stack e''). \<forall>m \<in> set (stack e'').
n \<noteq> m \<longrightarrow> \<S> e'' n \<inter> \<S> e'' m = {}"
"\<Union> {\<S> e'' n | n. n \<in> set (stack e'')} = visited e'' - explored e''"
"\<forall>n. is_subscc (\<S> e'' n)"
"\<forall>S \<in> sccs e''. is_scc S"
"\<Union> (sccs e'') = explored e''"
by (auto simp: e''_def wf_env_def distinct_tl)
moreover have "\<forall>n m. n \<preceq> m in stack e'' \<longrightarrow> n \<preceq> m in cstack e''"
proof (clarsimp simp add: e''_def)
fix n m
assume nm: "n \<preceq> m in stack e'"
with 3 have "n \<preceq> m in cstack e'"
unfolding post_dfss_def wf_env_def
by meson
moreover
have "n \<noteq> v"
proof
assume "n = v"
with nm have "n \<in> set (stack e')"
by (simp add: precedes_mem)
with 3 \<open>n = v\<close> have "v = hd (stack e')"
unfolding post_dfss_def wf_env_def
by (metis (no_types, opaque_lifting) IntI equals0D list.set_sel(1))
with \<open>v \<noteq> hd (stack e')\<close> show "False"
by simp
qed
ultimately show "n \<preceq> m in tl (cstack e')"
by (simp add: cst' precedes_in_tail)
qed
moreover
from 3 have "set (cstack e'') \<subseteq> visited e''"
by (simp add: post_dfss_def wf_env_def sub_env_def e''_def e1_def subset_eq)
moreover
from 3
have "\<forall>n \<in> visited e'' - set (cstack e''). vsuccs e'' n = successors n"
apply (simp add: post_dfss_def sub_env_def wf_env_def e''_def e1_def)
by (metis (no_types, opaque_lifting) DiffE DiffI set_ConsD)
moreover
have "\<forall>n \<in> set (stack e''). \<forall>m \<in> \<S> e'' n.
m \<in> set (cstack e'') \<longrightarrow> m \<preceq> n in cstack e''"
proof (clarsimp simp: e''_def)
fix m n
assume asm: "n \<in> set (stack e')" "m \<in> \<S> e' n"
"m \<in> set (tl (cstack e'))"
with wf' cst' have "m \<noteq> v" "m \<preceq> n in cstack e'"
by (auto simp: wf_env_def)
with cst' show "m \<preceq> n in tl (cstack e')"
by (simp add: precedes_in_tail)
qed
moreover
from wf'
have "(\<forall>x y. x \<preceq> y in stack e'' \<and> x \<noteq> y \<longrightarrow>
(\<forall>u \<in> \<S> e'' x. \<not> reachable_avoiding u y (unvisited e'' x)))"
by (force simp: e''_def wf_env_def unvisited_def)
ultimately show ?thesis
unfolding wf_env_def by blast
qed
moreover
from 3 have "v \<in> visited e''"
by (auto simp: post_dfss_def sub_env_def e''_def e1_def)
moreover
from 3 have subenv: "sub_env e e''"
apply (simp add: post_dfss_def sub_env_def e1_def e''_def)
by (smt (verit, best) Collect_mono False Union_mono append_eq_Cons_conv list.sel(1) subset_trans)
moreover
from 3 have "vsuccs e'' v = successors v"
by (simp add: post_dfss_def e''_def)
moreover
from 1 3 have "\<forall>w \<in> visited e. vsuccs e'' w = vsuccs e w"
by (auto simp: pre_dfs_def post_dfss_def e1_def e''_def)
moreover
from 3
have "stack e'' \<noteq> []" "v \<in> \<S> e'' (hd (stack e''))"
"\<forall>n \<in> set (tl (stack e'')). \<S> e'' n = \<S> e n"
by (auto simp: post_dfss_def e1_def e''_def)
ultimately show ?thesis unfolding post_dfs_def
by blast
qed
qed
text \<open>
The following lemma is central for proving
partial correctness: assuming termination (represented by
the predicate @{text dfs_dfss_dom}) and the pre-condition
of the functions, both @{text dfs} and @{text dfss}
establish their post-conditions. The first part of the
theorem follows directly from the preceding lemma and the
computational induction rule generated by Isabelle, the
second part is proved directly, distinguishing the different
cases in the definition of function @{text dfss}.
\<close>
lemma pre_post:
shows
"\<lbrakk>dfs_dfss_dom (Inl(v,e)); pre_dfs v e\<rbrakk> \<Longrightarrow> post_dfs v e (dfs v e)"
"\<lbrakk>dfs_dfss_dom (Inr(v,e)); pre_dfss v e\<rbrakk> \<Longrightarrow> post_dfss v e (dfss v e)"
proof (induct rule: dfs_dfss.pinduct)
fix v e
assume dom: "dfs_dfss_dom (Inl(v,e))"
and predfs: "pre_dfs v e"
and prepostdfss: "\<And>e1. \<lbrakk> e1 = e \<lparr>visited := visited e \<union> {v}, stack := v # stack e,
cstack := v # cstack e\<rparr>; pre_dfss v e1 \<rbrakk>
\<Longrightarrow> post_dfss v e1 (dfss v e1)"
then show "post_dfs v e (dfs v e)"
using pre_dfs_implies_post_dfs pre_dfs_pre_dfss by auto
next
fix v e
assume dom: "dfs_dfss_dom (Inr(v,e))"
and predfss: "pre_dfss v e"
and prepostdfs:
"\<And>vs w.
\<lbrakk> vs = successors v - vsuccs e v; vs \<noteq> {}; w = (SOME x. x \<in> vs);
w \<notin> explored e; w \<notin> visited e; pre_dfs w e \<rbrakk>
\<Longrightarrow> post_dfs w e (dfs w e)"
and prepostdfss:
"\<And>vs w e' e''.
\<lbrakk> vs = successors v - vsuccs e v; vs \<noteq> {}; w = (SOME x. x \<in> vs);
e' = (if w \<in> explored e then e
else if w \<notin> visited e then dfs w e
else unite v w e);
e'' = e'\<lparr>vsuccs := \<lambda>x. if x = v then vsuccs e' v \<union> {w}
else vsuccs e' x\<rparr> ;
pre_dfss v e'' \<rbrakk>
\<Longrightarrow> post_dfss v e'' (dfss v e'')"
show "post_dfss v e (dfss v e)"
proof -
let ?vs = "successors v - vsuccs e v"
from predfss have wf: "wf_env e"
by (simp add: pre_dfss_def)
from predfss have "v \<in> visited e"
by (simp add: pre_dfss_def)
from predfss have "v \<notin> explored e"
by (meson DiffD2 list.set_sel(1) pre_dfss_def stack_class)
show ?thesis
proof (cases "?vs = {}")
case True
with dom have "dfss v e = e"
by (simp add: dfss.psimps)
moreover
from True wf have "vsuccs e v = successors v"
unfolding wf_env_def
by (meson Diff_eq_empty_iff le_infE subset_antisym)
moreover
have "sub_env e e"
by (simp add: sub_env_def)
moreover
from predfss \<open>vsuccs e v = successors v\<close>
have "\<forall>w \<in> successors v. w \<in> explored e \<union> \<S> e (hd (stack e))"
"\<forall>n \<in> set (stack e). reachable n v"
"stack e \<noteq> []"
"v \<in> \<S> e (hd (stack e))"
by (auto simp: pre_dfss_def)
moreover
{
fix n
assume asm: "hd (stack e) = v"
"n \<in> set (tl (stack e))"
"reachable v n"
with \<open>stack e \<noteq> []\<close> have "v \<preceq> n in stack e"
by (metis head_precedes hd_Cons_tl list.set_sel(2))
moreover
from wf \<open>stack e \<noteq> []\<close> asm have "v \<noteq> n"
unfolding wf_env_def
by (metis distinct.simps(2) list.exhaust_sel)
moreover
from wf have "v \<in> \<S> e v"
by (rule S_reflexive)
moreover
{
fix a b
assume "a \<in> \<S> e v" "b \<in> successors a - vsuccs e a"
with \<open>vsuccs e v = successors v\<close> have "a \<noteq> v"
by auto
from \<open>stack e \<noteq> []\<close> \<open>hd (stack e) = v\<close>
have "v \<in> set (stack e)"
by auto
with \<open>a \<noteq> v\<close> \<open>a \<in> \<S> e v\<close> wf have "a \<in> visited e"
unfolding wf_env_def by (metis singletonD)
have "False"
proof (cases "a \<in> set (cstack e)")
case True
with \<open>v \<in> set (stack e)\<close> \<open>a \<in> \<S> e v\<close> \<open>wf_env e\<close>
have "a \<preceq> v in cstack e"
by (auto simp: wf_env_def)
moreover
from predfss obtain ns where "cstack e = v # ns"
by (auto simp: pre_dfss_def)
moreover
from wf have "distinct (cstack e)"
by (simp add: wf_env_def)
ultimately have "a = v"
using tail_not_precedes by force
with \<open>a \<noteq> v\<close> show ?thesis ..
next
case False
with \<open>a \<in> visited e\<close> wf have "vsuccs e a = successors a"
by (auto simp: wf_env_def)
with \<open>b \<in> successors a - vsuccs e a\<close> show ?thesis
by simp
qed
}
hence "unvisited e v = {}"
by (auto simp: unvisited_def)
ultimately have "\<not> reachable_avoiding v n {}"
using wf unfolding wf_env_def by metis
with \<open>reachable v n\<close> have "False"
by (simp add: ra_empty)
}
ultimately show ?thesis
using wf by (auto simp: post_dfss_def)
next
case vs_case: False
define w where "w = (SOME x. x \<in> ?vs)"
define e' where "e' = (if w \<in> explored e then e
else if w \<notin> visited e then dfs w e
else unite v w e)"
define e'' where "e'' = (e'\<lparr>vsuccs := \<lambda>x. if x=v then vsuccs e' v \<union> {w} else vsuccs e' x\<rparr>)"
from dom vs_case have dfss: "dfss v e = dfss v e''"
apply (simp add: dfss.psimps e''_def)
using e'_def w_def by auto
from vs_case have wvs: "w \<in> ?vs"
unfolding w_def by (metis some_in_eq)
show ?thesis
proof (cases "w \<in> explored e")
case True
hence e': "e' = e"
by (simp add: e'_def)
with predfss wvs True
have "pre_dfss v e''"
by (auto simp: e''_def pre_dfss_explored_pre_dfss)
with prepostdfss vs_case
have post'': "post_dfss v e'' (dfss v e'')"
by (auto simp: w_def e'_def e''_def)
moreover
from post''
have "\<forall>u \<in> visited e - {v}. vsuccs (dfss v e'') u = vsuccs e u"
by (auto simp: post_dfss_def e' e''_def)
moreover
have "sub_env e e''"
by (auto simp: sub_env_def e' e''_def)
with post'' have "sub_env e (dfss v e'')"
by (auto simp: post_dfss_def elim: sub_env_trans)
moreover
from e' have "\<S> e'' = \<S> e"
by (simp add: e''_def)
ultimately show ?thesis
by (auto simp: dfss post_dfss_def)
next
case notexplored: False
then show ?thesis
proof (cases "w \<notin> visited e")
case True
with e'_def notexplored have "e' = dfs w e"
by auto
with True notexplored pre_dfss_pre_dfs predfss
prepostdfs vs_case w_def
have postdfsw: "post_dfs w e e'"
by (metis DiffD1 some_in_eq)
with predfss wvs True \<open>e' = dfs w e\<close>
have "pre_dfss v e''"
by (auto simp: e''_def pre_dfss_post_dfs_pre_dfss)
with prepostdfss vs_case
have post'': "post_dfss v e'' (dfss v e'')"
by (auto simp: w_def e'_def e''_def)
moreover
have "\<forall>u \<in> visited e - {v}. vsuccs (dfss v e'') u = vsuccs e u"
proof
fix u
assume "u \<in> visited e - {v}"
with postdfsw
have u: "vsuccs e' u = vsuccs e u" "u \<in> visited e'' - {v}"
by (auto simp: post_dfs_def sub_env_def e''_def)
with post'' have "vsuccs (dfss v e'') u = vsuccs e'' u"
by (auto simp: post_dfss_def)
with u show "vsuccs (dfss v e'') u = vsuccs e u"
by (simp add: e''_def)
qed
moreover
have "sub_env e (dfss v e'')"
proof -
from postdfsw have "sub_env e e'"
by (simp add: post_dfs_def)
moreover
have "sub_env e' e''"
by (auto simp: sub_env_def e''_def)
moreover
from post'' have "sub_env e'' (dfss v e'')"
by (simp add: post_dfss_def)
ultimately show ?thesis
by (metis sub_env_trans)
qed
moreover
{
fix n
assume n: "n \<in> set (tl (stack (dfss v e'')))"
with post'' have "\<S> (dfss v e'') n = \<S> e' n"
by (simp add: post_dfss_def e''_def)
moreover
from \<open>pre_dfss v e''\<close> n post''
have "stack e' \<noteq> [] \<and> n \<in> set (tl (stack e''))"
apply (simp add: pre_dfss_def post_dfss_def sub_env_def e''_def)
by (metis (no_types, lifting) Un_iff list.set_sel(2) self_append_conv2 set_append tl_append2)
with postdfsw have "\<S> e' n = \<S> e n"
apply (simp add: post_dfs_def e''_def)
by (metis list.set_sel(2))
ultimately have "\<S> (dfss v e'') n = \<S> e n"
by simp
}
ultimately show ?thesis
by (auto simp: dfss post_dfss_def)
next
case False
hence e': "e' = unite v w e" using notexplored
using e'_def by simp
from False have "w \<in> visited e"
by simp
from wf wvs notexplored False obtain pfx where
pfx: "stack e = pfx @ (stack e')" "stack e' \<noteq> []"
unfolding e' by (blast dest: unite_stack)
from predfss wvs notexplored False \<open>e' = unite v w e\<close>
have "pre_dfss v e''"
by (auto simp: e''_def pre_dfss_unite_pre_dfss)
with prepostdfss vs_case \<open>e' = unite v w e\<close> \<open>w \<notin> explored e\<close> \<open>w \<in> visited e\<close>
have post'': "post_dfss v e'' (dfss v e'')"
by (auto simp: w_def e''_def)
moreover
from post''
have "\<forall>u \<in> visited e - {v}. vsuccs (dfss v e'') u = vsuccs e u"
by (auto simp: post_dfss_def e''_def e' unite_def)
moreover
have "sub_env e (dfss v e'')"
proof -
from predfss wvs \<open>w \<in> visited e\<close> notexplored
have "sub_env e e'"
unfolding e' by (blast dest: unite_sub_env)
moreover
have "sub_env e' e''"
by (auto simp: sub_env_def e''_def)
moreover
from post'' have "sub_env e'' (dfss v e'')"
by (simp add: post_dfss_def)
ultimately show ?thesis
by (metis sub_env_trans)
qed
moreover
{
fix n
assume n: "n \<in> set (tl (stack (dfss v e'')))"
with post'' have "\<S> (dfss v e'') n = \<S> e'' n"
by (simp add: post_dfss_def)
moreover
from n post'' \<open>stack e' \<noteq> []\<close>
have "n \<in> set (tl (stack e''))"
apply (simp add: post_dfss_def sub_env_def e''_def)
by (metis (no_types, lifting) Un_iff list.set_sel(2) self_append_conv2 set_append tl_append2)
with wf wvs \<open>w \<in> visited e\<close> notexplored
have "\<S> e'' n = \<S> e n"
by (auto simp: e''_def e' dest: unite_S_tl)
ultimately have "\<S> (dfss v e'') n = \<S> e n"
by simp
}
ultimately show ?thesis
by (simp add: dfss post_dfss_def)
qed
qed
qed
qed
qed
text \<open>
We can now show partial correctness of the algorithm:
applied to some node @{text "v"} and the empty environment,
it computes the set of strongly connected components in
the subgraph reachable from node @{text "v"}. In particular,
if @{text "v"} is a root of the graph, the algorithm computes
the set of SCCs of the graph.
\<close>
theorem partial_correctness:
fixes v
defines "e \<equiv> dfs v (init_env v)"
assumes "dfs_dfss_dom (Inl (v, init_env v))"
shows "sccs e = {S . is_scc S \<and> (\<forall>n\<in>S. reachable v n)}"
(is "_ = ?rhs")
proof -
from assms init_env_pre_dfs[of v]
have post: "post_dfs v (init_env v) e"
by (auto dest: pre_post)
hence wf: "wf_env e"
by (simp add: post_dfs_def)
from post have "cstack e = []"
by (auto simp: post_dfs_def sub_env_def init_env_def)
have "stack e = []"
proof (rule ccontr)
assume "stack e \<noteq> []"
hence "hd (stack e) \<preceq> hd (stack e) in stack e"
by simp
with wf \<open>cstack e = []\<close> show "False"
unfolding wf_env_def
by (metis empty_iff empty_set precedes_mem(2))
qed
with post have vexp: "v \<in> explored e"
by (simp add: post_dfs_def)
from wf \<open>stack e = []\<close> have "explored e = visited e"
by (auto simp: wf_env_def)
have "sccs e \<subseteq> ?rhs"
proof
fix S
assume S: "S \<in> sccs e"
with wf have "is_scc S"
by (simp add: wf_env_def)
moreover
from S wf have "S \<subseteq> explored e"
unfolding wf_env_def
by blast
with post \<open>explored e = visited e\<close> have "\<forall>n\<in>S. reachable v n"
by (auto simp: post_dfs_def wf_env_def sub_env_def init_env_def)
ultimately show "S \<in> ?rhs"
by auto
qed
moreover
{
fix S
assume "is_scc S" "\<forall>n\<in>S. reachable v n"
from \<open>\<forall>n\<in>S. reachable v n\<close> vexp wf
have "S \<subseteq> \<Union> (sccs e)"
unfolding wf_env_def by (metis subset_eq)
with \<open>is_scc S\<close> obtain S' where S': "S' \<in> sccs e \<and> S \<inter> S' \<noteq> {}"
unfolding is_scc_def
by (metis Union_disjoint inf.absorb_iff2 inf_commute)
with wf have "is_scc S'"
by (simp add: wf_env_def)
with S' \<open>is_scc S\<close> have "S \<in> sccs e"
by (auto dest: scc_partition)
}
ultimately show ?thesis by blast
qed
section \<open>Proof of termination and total correctness\<close>
text \<open>
We define a binary relation on the arguments of functions @{text dfs} and @{text dfss},
and prove that this relation is well-founded and that all calls within
the function bodies respect the relation, assuming that the pre-conditions
of the initial function call are satisfied. By well-founded induction,
we conclude that the pre-conditions of the functions are sufficient to
ensure termination.
Following the internal representation of the two mutually recursive
functions in Isabelle as a single function on the disjoint sum of the
types of arguments, our relation is defined as a set of argument pairs
injected into the sum type. The left injection @{text Inl} takes
arguments of function @{text dfs}, the right injection @{text Inr}
takes arguments of function @{text dfss}.\footnote{Note that the
types of the arguments of @{text dfs} and @{text dfss} are actually
identical. We nevertheless use the sum type in order to remember
the function that was called.}
The conditions on the arguments in the definition of the relation
overapproximate the arguments in the actual calls.
\<close>
definition dfs_dfss_term::"(('v \<times> 'v env + 'v \<times> 'v env) \<times> ('v \<times> 'v env + 'v \<times> 'v env)) set" where
"dfs_dfss_term \<equiv>
{ (Inr(v, e1), Inl(v, e)) | v e e1.
v \<in> vertices - visited e \<and> visited e1 = visited e \<union> {v} }
\<union> { (Inl(w, e), Inr(v, e)) | v w e. v \<in> vertices}
\<union> { (Inr(v, e''), Inr(v, e)) | v e e''.
v \<in> vertices \<and> sub_env e e''
\<and> (\<exists>w \<in> vertices. w \<notin> vsuccs e v \<and> w \<in> vsuccs e'' v)}"
text \<open>
Informally, termination is ensured because at each call,
either a new vertex is visited (hence the complement of
the set of visited nodes w.r.t. the finite set of vertices
decreases) or a new successor is added to the set
@{text "vsuccs e v"} of some vertex @{text v}.
In order to make this argument formal, we inject the argument
tuples that appear in our relation into tuples consisting of
the sets mentioned in the informal argument. However, there is
one added complication because the call of @{text dfs} from
@{text dfss} does not immediately add the vertex to the set
of visited nodes (this happens only at the beginning of
function @{text dfs}). We therefore add a third component of
$0$ or $1$ to these tuples, reflecting the fact that there
can only be one call of @{text dfs} from @{text dfss} for a
given vertex @{text v}.
\<close>
fun dfs_dfss_to_tuple where
"dfs_dfss_to_tuple (Inl(v::'v, e::'v env)) =
(vertices - visited e, vertices \<times> vertices - {(u,u') | u u'. u' \<in> vsuccs e u}, 0)"
| "dfs_dfss_to_tuple (Inr(v::'v, e::'v env)) =
(vertices - visited e, vertices \<times> vertices - {(u,u') | u u'. u' \<in> vsuccs e u}, 1::nat)"
text \<open>
The triples defined in this way can be ordered lexicographically
(with the first two components ordered as finite subsets and the
third one following the predecessor relation on natural numbers).
We prove that the injection of the above relation into sets
of triples respects the lexicographic ordering and conclude that
our relation is well-founded.
\<close>
lemma wf_term: "wf dfs_dfss_term"
proof -
let ?r = "(finite_psubset :: ('v set \<times> 'v set) set)
<*lex*> (finite_psubset :: ((('v \<times> 'v) set) \<times> ('v \<times> 'v) set) set)
<*lex*> pred_nat"
have "wf (finite_psubset :: ('v set \<times> 'v set) set)"
by (rule wf_finite_psubset)
moreover
have "wf (finite_psubset :: ((('v \<times> 'v) set) \<times> ('v \<times> 'v) set) set)"
by (rule wf_finite_psubset)
ultimately have "wf ?r"
using wf_pred_nat by blast
moreover
have "dfs_dfss_term \<subseteq> inv_image ?r dfs_dfss_to_tuple"
proof (clarify)
fix a b
assume "(a,b) \<in> dfs_dfss_term"
hence "(\<exists>v w e e''. a = Inr(v,e'') \<and> b = Inr(v,e) \<and> v \<in> vertices \<and> sub_env e e''
\<and> w \<in> vertices \<and> w \<notin> vsuccs e v \<and> w \<in> vsuccs e'' v)
\<or> (\<exists>v e e1. a = Inr(v,e1) \<and> b = Inl(v,e) \<and> v \<in> vertices - visited e
\<and> visited e1 = visited e \<union> {v})
\<or> (\<exists>v w e. a = Inl(w,e) \<and> b = Inr(v,e))"
(is "?c1 \<or> ?c2 \<or> ?c3")
by (auto simp: dfs_dfss_term_def)
then show "(a,b) \<in> inv_image ?r dfs_dfss_to_tuple"
proof
assume "?c1"
then obtain v w e e'' where
ab: "a = Inr(v, e'')" "b = Inr(v,e)" and
vw: "v \<in> vertices" "w \<in> vertices" "w \<in> vsuccs e'' v" "w \<notin> vsuccs e v" and
sub: "sub_env e e''"
by blast
from sub have "vertices - visited e'' \<subseteq> vertices - visited e"
by (auto simp: sub_env_def)
moreover
from sub vw
have "(vertices \<times> vertices - {(u,u') | u u'. u' \<in> vsuccs e'' u})
\<subset> (vertices \<times> vertices - {(u,u') | u u'. u' \<in> vsuccs e u})"
by (auto simp: sub_env_def)
ultimately show ?thesis
using vfin ab by auto
next
assume "?c2 \<or> ?c3"
with vfin show ?thesis
by (auto simp: pred_nat_def)
qed
qed
ultimately show ?thesis
using wf_inv_image wf_subset by blast
qed
text \<open>
The following theorem establishes sufficient conditions that ensure
termination of the two functions @{text dfs} and @{text dfss}.
The proof proceeds by well-founded induction using the relation
@{text dfs_dfss_term}. Isabelle represents the termination domains
of the functions by the predicate @{text dfs_dfss_dom} and
generates a theorem @{text dfs_dfss.domintros} for proving
membership of arguments in the termination domains. The
actual formulation is a litte technical because the mutual
induction must again be encoded in a single induction argument
over the sum type representing the arguments of both functions.
\<close>
theorem dfs_dfss_termination:
"\<lbrakk>v \<in> vertices ; pre_dfs v e\<rbrakk> \<Longrightarrow> dfs_dfss_dom(Inl(v, e))"
"\<lbrakk>v \<in> vertices ; pre_dfss v e\<rbrakk> \<Longrightarrow> dfs_dfss_dom(Inr(v, e))"
proof -
{ fix args
have "(case args
of Inl(v,e) \<Rightarrow>
v \<in> vertices \<and> pre_dfs v e
| Inr(v,e) \<Rightarrow>
v \<in> vertices \<and> pre_dfss v e)
\<longrightarrow> dfs_dfss_dom args" (is "?P args \<longrightarrow> ?Q args")
proof (rule wf_induct[OF wf_term])
fix arg :: "('v \<times> 'v env) + ('v \<times> 'v env)"
assume ih: "\<forall> arg'. (arg', arg) \<in> dfs_dfss_term \<longrightarrow> (?P arg' \<longrightarrow> ?Q arg')"
show "?P arg \<longrightarrow> ?Q arg"
proof
assume P: "?P arg"
show "?Q arg"
proof (cases arg)
case (Inl a)
then obtain v e where a: "arg = Inl(v, e)"
using dfs.cases by metis
with P have pre: "v \<in> vertices \<and> pre_dfs v e"
by simp
let ?e1 = "e\<lparr>visited := visited e \<union> {v}, stack := v # stack e, cstack := v # cstack e\<rparr>"
let ?recarg = "Inr(v, ?e1)"
from a pre
have "(?recarg, arg) \<in> dfs_dfss_term"
by (auto simp: pre_dfs_def dfs_dfss_term_def)
moreover
from pre have "?P ?recarg"
by (auto dest: pre_dfs_pre_dfss)
ultimately have "?Q ?recarg"
using ih a by auto
then have "?Q (Inl(v, e))"
by (auto intro: dfs_dfss.domintros)
then show ?thesis
by (simp add: a)
next
case (Inr b)
then obtain v e where b: "arg = Inr(v, e)"
using dfs.cases by metis
with P have pre: "v \<in> vertices \<and> pre_dfss v e"
by simp
let ?sw = "SOME w. w \<in> successors v \<and> w \<notin> vsuccs e v"
have "?Q (Inr(v, e))"
proof (rule dfs_dfss.domintros)
fix w
assume "w \<in> successors v"
"?sw \<notin> explored e"
"?sw \<notin> visited e"
"\<not> dfs_dfss_dom (Inl (?sw, e))"
show "w \<in> vsuccs e v"
proof (rule ccontr)
assume "w \<notin> vsuccs e v"
with \<open>w \<in> successors v\<close> have sw: "?sw \<in> successors v - vsuccs e v"
by (metis (mono_tags, lifting) Diff_iff some_eq_imp)
with pre \<open>?sw \<notin> visited e\<close> have "pre_dfs ?sw e"
by (blast intro: pre_dfss_pre_dfs)
moreover
from pre sw sclosed have "?sw \<in> vertices"
by blast
moreover
from pre have "(Inl(?sw,e), Inr(v,e)) \<in> dfs_dfss_term"
by (simp add: dfs_dfss_term_def)
ultimately have "dfs_dfss_dom (Inl(?sw,e))"
using ih b by auto
with \<open>\<not> dfs_dfss_dom (Inl (?sw, e))\<close>
show "False" ..
qed
next
let ?e' = "dfs ?sw e"
let ?e''= "?e'\<lparr>vsuccs := \<lambda>x. if x = v then vsuccs ?e' v \<union> {?sw}
else vsuccs ?e' x\<rparr>"
fix w
assume asm: "w \<in> successors v" "w \<notin> vsuccs e v"
"?sw \<notin> visited e" "?sw \<notin> explored e"
from \<open>w \<in> successors v\<close> \<open>w \<notin> vsuccs e v\<close>
have sw: "?sw \<in> successors v - vsuccs e v"
by (metis (no_types, lifting) Diff_iff some_eq_imp)
with pre \<open>?sw \<notin> visited e\<close> have "pre_dfs ?sw e"
by (blast intro: pre_dfss_pre_dfs)
moreover
from pre sw sclosed have "?sw \<in> vertices"
by blast
moreover
from pre have "(Inl(?sw, e), Inr(v,e)) \<in> dfs_dfss_term"
by (simp add: dfs_dfss_term_def)
ultimately have "dfs_dfss_dom (Inl(?sw, e))"
using ih b by auto
from this \<open>pre_dfs ?sw e\<close> have post: "post_dfs ?sw e ?e'"
by (rule pre_post)
hence "sub_env e ?e'"
by (simp add: post_dfs_def)
moreover
have "sub_env ?e' ?e''"
by (auto simp: sub_env_def)
ultimately have "sub_env e ?e''"
by (rule sub_env_trans)
with pre \<open>?sw \<in> vertices\<close> sw
have "(Inr(v, ?e''), Inr(v, e)) \<in> dfs_dfss_term"
by (auto simp: dfs_dfss_term_def)
moreover
from pre post sw \<open>?sw \<notin> visited e\<close> have "pre_dfss v ?e''"
by (blast intro: pre_dfss_post_dfs_pre_dfss)
ultimately show "dfs_dfss_dom(Inr(v, ?e''))"
using pre ih b by auto
next
let ?e'' = "e\<lparr>vsuccs := \<lambda>x. if x = v then vsuccs e v \<union> {?sw} else vsuccs e x\<rparr>"
fix w
assume "w \<in> successors v" "w \<notin> vsuccs e v"
"?sw \<notin> visited e" "?sw \<in> explored e"
with pre have "False"
unfolding pre_dfss_def wf_env_def
by (meson subsetD)
thus "?Q (Inr(v, ?e''))"
by simp
next
fix w
assume asm: "w \<in> successors v" "w \<notin> vsuccs e v"
"?sw \<in> visited e" "?sw \<in> explored e"
let ?e'' = "e\<lparr>vsuccs := \<lambda>x. if x = v then vsuccs e v \<union> {?sw} else vsuccs e x\<rparr>"
let ?recarg = "Inr(v, ?e'')"
from \<open>w \<in> successors v\<close> \<open>w \<notin> vsuccs e v\<close>
have sw: "?sw \<in> successors v - vsuccs e v"
by (metis (no_types, lifting) Diff_iff some_eq_imp)
have "(?recarg, arg) \<in> dfs_dfss_term"
proof -
have "sub_env e ?e''"
by (auto simp: sub_env_def)
moreover
from sw pre sclosed
have "\<exists>u \<in> vertices. u \<notin> vsuccs e v \<and> u \<in> vsuccs ?e'' v"
by auto
ultimately show ?thesis
using pre b unfolding dfs_dfss_term_def by blast
qed
moreover
from pre sw \<open>?sw \<in> explored e\<close> have "?P ?recarg"
by (auto dest: pre_dfss_explored_pre_dfss)
ultimately show "?Q ?recarg"
using ih b by blast
next
fix w
assume asm: "w \<in> successors v" "w \<notin> vsuccs e v"
"?sw \<in> visited e" "?sw \<notin> explored e"
let ?eu = "unite v ?sw e"
let ?e'' = "?eu\<lparr>vsuccs := \<lambda>x. if x = v then vsuccs ?eu v \<union> {?sw} else vsuccs ?eu x\<rparr>"
let ?recarg = "Inr(v, ?e'')"
from \<open>w \<in> successors v\<close> \<open>w \<notin> vsuccs e v\<close>
have sw: "?sw \<in> successors v - vsuccs e v"
by (metis (no_types, lifting) Diff_iff some_eq_imp)
have "(?recarg, arg) \<in> dfs_dfss_term"
proof -
from pre asm sw have "sub_env e ?eu"
by (blast dest: unite_sub_env)
hence "sub_env e ?e''"
by (auto simp: sub_env_def)
moreover
from sw pre sclosed
have "\<exists>u \<in> vertices. u \<notin> vsuccs e v \<and> u \<in> vsuccs ?e'' v"
by auto
ultimately show ?thesis
using pre b unfolding dfs_dfss_term_def by blast
qed
moreover
from pre sw \<open>?sw \<in> visited e\<close> \<open>?sw \<notin> explored e\<close> have "?P ?recarg"
by (auto dest: pre_dfss_unite_pre_dfss)
ultimately show "?Q ?recarg"
using ih b by auto
qed
then show ?thesis
by (simp add: b)
qed
qed
qed
}
note dom=this
from dom
show "\<lbrakk> v \<in> vertices ; pre_dfs v e\<rbrakk> \<Longrightarrow> dfs_dfss_dom(Inl(v, e))"
by auto
from dom
show "\<lbrakk> v \<in> vertices ; pre_dfss v e\<rbrakk> \<Longrightarrow> dfs_dfss_dom(Inr(v, e))"
by auto
qed
text \<open>
Putting everything together, we prove the total correctness of
the algorithm when applied to some (root) vertex.
\<close>
theorem correctness:
assumes "v \<in> vertices"
shows "sccs (dfs v (init_env v)) = {S . is_scc S \<and> (\<forall>n\<in>S. reachable v n)}"
using assms init_env_pre_dfs[of v]
by (simp add: dfs_dfss_termination partial_correctness)
end
end
|
lemma uniformity_Abort: "uniformity = Filter.abstract_filter (\<lambda>u. Code.abort (STR ''uniformity is not executable'') (\<lambda>u. uniformity))" |
/-
Copyright (c) 2022 Matthias Uschold. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Matthias Uschold.
-/
import tactic
import algebra.group.basic
import data.real.basic
import topology.continuous_function.bounded
import topology.category.Top
import algebra.group.defs
/-!
# Means on Groups
In this file, we introduce means on groups,
i.e. `averaging' maps that turn bounded (continuous) functions G → ℝ into a real number.
## Main Definitions
- `mean` : Structure for means
- `mean_pushforward` : Pushing forward a mean on $G$ via a map $G → H$ yields a mean on $H$.
## Implementation Notes
We will ultimately need this notion to define amenability of groups via
invariant means (in the file def_amenable).
This file defines means by regarding all groups with their discrete topology,
thus enabling us to use `bounded_continuous_function`.
If you want to consider means on (non-discrete)
topological groups, one needs to change some definitions.
## References
* <https://en.wikipedia.org/wiki/Amenable_group>
* [C. Löh, *Geometric Group Theory*, Proposition 9.1.1][loeh17]
## Tags
mean
-/
open classical
open bounded_continuous_function
variables (G:Type*) [group G]
instance topspaceG : topological_space G := ⊥
instance discrtopG : discrete_topology G := ⟨rfl⟩
/-- A mean on a group-/
structure mean
:= mk ::
(lin_map : (bounded_continuous_function G ℝ) →ₗ[ℝ] ℝ)
(normality : lin_map (bounded_continuous_function.const G (1:ℝ)) = 1)
(positivity: ∀ {f : bounded_continuous_function G ℝ},
(∀ (x:G), f x ≥ 0) → lin_map f ≥ 0)
instance : has_coe (mean G) ((bounded_continuous_function G ℝ) →ₗ[ℝ] ℝ)
:= {coe := mean.lin_map}
/--Equality of means can be checked by evaluation -/
@[ext]
theorem ext {m n : mean G}
(h: ∀ f, m f = n f)
: m = n
:= begin
cases m,
cases n,
simp,
ext,
exact h x,
end
namespace mean
section el_facts
/-!
### Elementary facts
We collect some elementary facts about means
-/
@[simp]
lemma mean_of_neg
(m : mean G)
{f: bounded_continuous_function G ℝ}
: m (-f) = - m f
:= begin
have : m (-f) + m f = 0,
{
calc m (-f) + m f
= m ((-f) +f )
: by exact (m.lin_map.map_add' (-f) f).symm
... = m 0
: by ring_nf
... = m ((0:ℝ) • 0)
: by simp
... = (ring_hom.id ℝ) 0 • m 0
: by exact m.lin_map.map_smul' 0 0
... = 0
: by simp,
},
linarith,
end
lemma mean_bounded
(m : mean G)
{f: bounded_continuous_function G ℝ}
{M : ℝ}
(fbound : ∀ (x:G), f x ≤ M)
: m f ≤ M
:= begin
-- strategy of proof : (M-f) is a positive function
let diff : bounded_continuous_function G ℝ
:= bounded_continuous_function.const G M - f,
have diffpos : ∀ (x:G), diff x ≥ 0,
{
assume (x:G),
dsimp[diff],
by linarith only [fbound x],
},
have mdiffpos : m diff ≥ 0
:= m.positivity diffpos,
have mean_const : m (bounded_continuous_function.const G M) = M,
{
calc m (bounded_continuous_function.const G M)
= m (M • bounded_continuous_function.const G 1)
: by congr'; begin
ext (x:G),
simp,
end
... = M • m (bounded_continuous_function.const G 1)
: by exact m.lin_map.map_smul' M _
... = M • 1
: by congr'; exact m.normality
... = M
: by simp,
},
have : m f + m diff = M
:= by
calc m f + m diff
= m (f + diff)
: by exact (m.lin_map.map_add' f diff).symm
... = m (f + bounded_continuous_function.const G M - f)
: by simp[diff]
... = m (bounded_continuous_function.const G M )
: by simp
... = M
: by simp [mean_const],
by linarith only [this, mdiffpos],
end
/--Essentially: W.r.t. the sup-norm, m has norm ≤ 1-/
lemma mean_bounded_abs
(m : mean G)
{f: bounded_continuous_function G ℝ}
{M : ℝ}
(fbound : ∀ (x:G), |f x| ≤ M)
: |m f| ≤ M
:= begin
have bound_le : m f ≤ M,
{
have fbound' : ∀ (x:G), f x ≤ M
:= (λ x, (abs_le.mp (fbound x)).2),
exact mean_bounded G m fbound',
},
have bound_ge : m f ≥ -M,
{
have negfbound' : ∀ (x:G), (-f) x ≤ M,
{
assume x:G,
simp,
by linarith[(abs_le.mp (fbound x)).1],
},
have : m (-f) ≤ M
:= mean_bounded G m negfbound',
have : m (-f) = - m f
:= mean_of_neg G m,
by linarith,
},
exact abs_le.mpr (and.intro bound_ge bound_le),
end
@[simp]
lemma mean_add
{m : mean G}
{f g: bounded_continuous_function G ℝ}
: m (f+g) = m f + m g
:= m.lin_map.map_add' f g
@[simp]
lemma mean_smul
{m : mean G}
{f: bounded_continuous_function G ℝ}
{r :ℝ}
: m (r•f) =r • (m f)
:= m.lin_map.map_smul' r f
/--Means are monotone functions-/
lemma mean_monotone
{m : mean G}
{f g: bounded_continuous_function G ℝ}
(f_le_g : f ≤ g)
: m f ≤ m g
:= begin
have diff_pos: ∀ (x:G), (g-f) x ≥ 0,
{
assume x:G,
have : (g-f) x = g x - f x
:= by refl,
rw this,
simp,
exact f_le_g x,
},
calc m f
= m f + 0
: by ring
... ≤ m f + m (g-f)
: by {simp, exact m.positivity diff_pos,}
... = m (f+(g-f))
: by rw mean_add
... = m g
: by congr';ring,
end
end el_facts
section pushforward_mean
/-!
### Pushforwards of Means
We will often use the following construction: If `m` is a mean on `H`
and `π : G → H` is any map, we can obtain a mean on `G`
by composing with `π`.
-/
variables {H : Type* } [group H]
(π: G → H)
variable {G}
/-- Precomposition of a bounded_continuous_map with a continuous map-/
noncomputable def bcont_precomp
{X Y: Type*}
[topological_space X]
[topological_space Y]
(h : C(X,Y))
(f : bounded_continuous_function Y ℝ)
: bounded_continuous_function X ℝ
:= bounded_continuous_function.mk
(continuous_map.comp f.to_continuous_map h)
(by {
rcases f.map_bounded' with ⟨C, hf⟩,
use C,
assume x y : X,
specialize hf (h x) (h y),
exact hf,
})
/-- even easier: with discrete topology-/
noncomputable def bcont_precomp_discrete
{X Y: Type*}
[topological_space X] [discrete_topology X]
[topological_space Y]
(h : X → Y)
(f : bounded_continuous_function Y ℝ)
: bounded_continuous_function X ℝ
:= bcont_precomp (continuous_map.mk h continuous_of_discrete_topology) f
noncomputable def comp_bcont
(f: bounded_continuous_function H ℝ)
: bounded_continuous_function G ℝ
:= bcont_precomp_discrete π f
@[simp]
lemma comp_bcont_eval
(π : G → H)
(f: bounded_continuous_function H ℝ)
(g :G)
: comp_bcont π f g = f (π g)
:= by refl
@[simp]
noncomputable def pull_bcont
(π : G → H)
: (bounded_continuous_function H ℝ) →ₗ[ℝ] (bounded_continuous_function G ℝ)
:= linear_map.mk (λ f, comp_bcont π f)
(by tauto) (by tauto)
include π
@[simp]
noncomputable def mean_pushforward_linmap
(π : G → H)
(m : mean G)
: (bounded_continuous_function H ℝ) →ₗ[ℝ] ℝ
:= linear_map.comp m.lin_map (pull_bcont π)
lemma mean_pushforward_norm
(π : G → H)
(m : mean G)
: (mean_pushforward_linmap π m) (bounded_continuous_function.const H (1:ℝ)) = 1
:= begin
-- the pushforward of the 1-function is the 1-function
have pull_of_one
: (pull_bcont π) (bounded_continuous_function.const H (1:ℝ))
= bounded_continuous_function.const G (1:ℝ),
{
ext (x:G),
simp,
},
calc (mean_pushforward_linmap π m) (bounded_continuous_function.const H (1:ℝ))
= m.lin_map (pull_bcont π (bounded_continuous_function.const H (1:ℝ)))
: by tauto
... = m.lin_map (bounded_continuous_function.const G (1:ℝ))
: by rw pull_of_one
... = 1
: m.normality,
end
lemma mean_pushforward_pos
(π : G → H)
(m : mean G)
: ∀ (f : bounded_continuous_function H ℝ),
(∀ (x:H), f(x) ≥ 0) → (mean_pushforward_linmap π m) f ≥ 0
:= begin
assume f fnonneg,
apply m.positivity,
-- key step: pull_bcont π f is also nonneg
change ∀(x:G), (pull_bcont π f) x ≥ 0,
assume (x:G),
specialize fnonneg (π x),
by tauto,
end
/-- The mean on H, induced by the mean on G-/
@[simp]
noncomputable def mean_pushforward
(π : G → H)
(m : mean G)
: mean H
:= mean.mk (mean_pushforward_linmap π m)
(mean_pushforward_norm π m)
(mean_pushforward_pos π m)
end pushforward_mean
end mean
|
// Copyright (c) 2013 The Bitcoin Core developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <boost/test/unit_test.hpp>
#include "entrustment.h"
BOOST_AUTO_TEST_SUITE(entrust_reward_tests)
BOOST_AUTO_TEST_CASE(test_activeuser) {
Entrustment& ent = Entrustment::GetInstance();
BlockHeight forkheight = ent.forkheight_halfMinDivideReward;
BOOST_CHECK(ent.GetMinDivideReward(forkheight-1) == ent.GetMinDivideRewardV1());
BOOST_CHECK(ent.GetMinDivideReward(forkheight) == ent.GetMinDivideRewardV2());
BOOST_CHECK(ent.GetMinDivideReward(forkheight+1) == ent.GetMinDivideRewardV2());
CAmount localMinDivideReward = ent.GetMinDivideReward(forkheight);
dpos::UserStatistics user;
user.entrustAmount = 1;
BOOST_CHECK(ent.IsActiveUser(user,forkheight));
user.entrustAmount = 0;
user.totalDividendReward = 0;
user.totalDealDivideReward = user.totalDividendReward + localMinDivideReward + 1;
BOOST_CHECK(user.totalDealDivideReward - user.totalDividendReward > localMinDivideReward);
BOOST_CHECK(ent.IsActiveUser(user,forkheight));
user.totalDealDivideReward = user.totalDividendReward + localMinDivideReward;
BOOST_CHECK(user.totalDealDivideReward - user.totalDividendReward == localMinDivideReward);
BOOST_CHECK(ent.IsActiveUser(user,forkheight));
user.totalDealDivideReward = user.totalDividendReward + localMinDivideReward - 1;
BOOST_CHECK(user.totalDealDivideReward - user.totalDividendReward < localMinDivideReward);
BOOST_CHECK(!ent.IsActiveUser(user,forkheight));
}
BOOST_AUTO_TEST_SUITE_END()
|
A sensitive barometric pressure sensor can be used to demonstrate transpiration. The picture here shows a low pressure sensor (as it was called) with a tube sealed onto the stem of a plant. We think that transpiration causes the plant stem creates enough negative pressure to show a change on a graph on screen. |
type $RR <class {@e1 i32, @e2 f32, @e3 f64}>
type $SS <classincomplete <$RR> {@f1 i32, @f2 i8, @f3 i8}>
javaclass $TT <$SS> public static
func &foo (
var %x <$SS>) i32 {
dassign %x 2 ( constval i32 32 )
return ( dread i32 %x 2 ) }
# EXEC: %irbuild Main.mpl
# EXEC: %irbuild Main.irb.mpl
# EXEC: %cmp Main.irb.mpl Main.irb.irb.mpl
|
theorem foo (x : Nat) (h : x > 0) : x ≠ 0 :=
match x with
| 0 => sorry
| h+1 => sorry
inductive Mem : α → List α → Prop where
| head (a : α) (as : List α) : Mem a (a::as)
| tail (a b : α) (bs : List α) : Mem a bs → Mem a (b::bs)
infix:50 "∈" => Mem
theorem mem_split {a : α} {as : List α} (h : a ∈ as) : ∃ s t, as = s ++ a :: t := by
induction as with
| nil => cases h
| cons b bs ih => cases h with
| head a bs => exact ⟨[], ⟨bs, rfl⟩⟩
| tail a b bs h =>
match bs, ih h with
| _, ⟨s, t, rfl⟩ =>
exists b::s; exists t
rw [List.cons_append]
|
(* (c) Copyright Microsoft Corporation and Inria. All rights reserved. *)
Require Import ssreflect ssrbool eqtype ssrnat.
Lemma test (x : bool) : True.
have H1 x := x.
have (x) := x => H2.
have H3 T (x : T) := x.
have ? : bool := H1 _ x.
have ? : bool := H2 _ x.
have ? : bool := H3 _ x.
have ? (z : bool) : forall y : bool, z = z := fun y => refl_equal _.
have ? w : w = w := @refl_equal nat w.
have ? y : true by [].
have ? (z : bool) : z = z.
exact: (@refl_equal _ z).
have ? (z w : bool) : z = z by exact: (@refl_equal _ z).
have H w (a := 3) (_ := 4) : w && true = w.
by rewrite andbT.
exact I.
Qed.
Lemma test1 : True.
suff (x : bool): x = x /\ True.
by move/(_ true); case=> _.
split; first by exact: (@refl_equal _ x).
suff H y : y && true = y /\ True.
by case: (H true).
suff H1 /= : true && true /\ True.
by rewrite andbT; split; [exact: (@refl_equal _ y) | exact: I].
match goal with |- is_true true /\ True => idtac end.
by split.
Qed.
Lemma foo n : n >= 0.
have f i (j := i + n) : j < n.
match goal with j := i + n |- _ => idtac end.
Undo 2.
suff f i (j := i + n) : j < n.
done.
match goal with j := i + n |- _ => idtac end.
Undo 3.
done.
Qed.
|
lemma cone_empty[intro, simp]: "cone {}" |
> COMING SOON: This is where you will connect external API'S |
State Before: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
h : ∀ (a : α), a ∈ q → ∀ (b : α), b ∈ q → f a = f b
a : α
aq : a ∈ q
⊢ liftOn q f h = f a State After: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
⊢ ∀ (h : ∀ (a : α), a ∈ q → ∀ (b : α), b ∈ q → f a = f b), liftOn q f h = f a Tactic: revert h State Before: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
⊢ ∀ (h : ∀ (a : α), a ∈ q → ∀ (b : α), b ∈ q → f a = f b), liftOn q f h = f a State After: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
⊢ ∀ (h : ∀ (a_1 : α), a_1 ∈ mk aq → ∀ (b : α), b ∈ mk aq → f a_1 = f b), liftOn (mk aq) f h = f a Tactic: rw [eq_mk_of_mem aq] State Before: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
⊢ ∀ (h : ∀ (a_1 : α), a_1 ∈ mk aq → ∀ (b : α), b ∈ mk aq → f a_1 = f b), liftOn (mk aq) f h = f a State After: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
h✝ : ∀ (a_1 : α), a_1 ∈ mk aq → ∀ (b : α), b ∈ mk aq → f a_1 = f b
⊢ liftOn (mk aq) f h✝ = f a Tactic: intro State Before: α : Type u_1
β : Type u_2
q : Semiquot α
f : α → β
a : α
aq : a ∈ q
h✝ : ∀ (a_1 : α), a_1 ∈ mk aq → ∀ (b : α), b ∈ mk aq → f a_1 = f b
⊢ liftOn (mk aq) f h✝ = f a State After: no goals Tactic: rfl |
(* Title: HOL/Multivariate_Analysis/Euclidean_Space.thy
Author: Johannes Hölzl, TU München
Author: Brian Huffman, Portland State University
*)
section {* Finite-Dimensional Inner Product Spaces *}
theory Euclidean_Space
imports
L2_Norm
"~~/src/HOL/Library/Inner_Product"
"~~/src/HOL/Library/Product_Vector"
begin
subsection {* Type class of Euclidean spaces *}
class euclidean_space = real_inner +
fixes Basis :: "'a set"
assumes nonempty_Basis [simp]: "Basis \<noteq> {}"
assumes finite_Basis [simp]: "finite Basis"
assumes inner_Basis:
"\<lbrakk>u \<in> Basis; v \<in> Basis\<rbrakk> \<Longrightarrow> inner u v = (if u = v then 1 else 0)"
assumes euclidean_all_zero_iff:
"(\<forall>u\<in>Basis. inner x u = 0) \<longleftrightarrow> (x = 0)"
abbreviation dimension :: "('a::euclidean_space) itself \<Rightarrow> nat" where
"dimension TYPE('a) \<equiv> card (Basis :: 'a set)"
syntax "_type_dimension" :: "type => nat" ("(1DIM/(1'(_')))")
translations "DIM('t)" == "CONST dimension (TYPE('t))"
lemma (in euclidean_space) norm_Basis[simp]: "u \<in> Basis \<Longrightarrow> norm u = 1"
unfolding norm_eq_sqrt_inner by (simp add: inner_Basis)
lemma (in euclidean_space) inner_same_Basis[simp]: "u \<in> Basis \<Longrightarrow> inner u u = 1"
by (simp add: inner_Basis)
lemma (in euclidean_space) inner_not_same_Basis: "u \<in> Basis \<Longrightarrow> v \<in> Basis \<Longrightarrow> u \<noteq> v \<Longrightarrow> inner u v = 0"
by (simp add: inner_Basis)
lemma (in euclidean_space) sgn_Basis: "u \<in> Basis \<Longrightarrow> sgn u = u"
unfolding sgn_div_norm by (simp add: scaleR_one)
lemma (in euclidean_space) Basis_zero [simp]: "0 \<notin> Basis"
proof
assume "0 \<in> Basis" thus "False"
using inner_Basis [of 0 0] by simp
qed
lemma (in euclidean_space) nonzero_Basis: "u \<in> Basis \<Longrightarrow> u \<noteq> 0"
by clarsimp
lemma (in euclidean_space) SOME_Basis: "(SOME i. i \<in> Basis) \<in> Basis"
by (metis ex_in_conv nonempty_Basis someI_ex)
lemma (in euclidean_space) inner_setsum_left_Basis[simp]:
"b \<in> Basis \<Longrightarrow> inner (\<Sum>i\<in>Basis. f i *\<^sub>R i) b = f b"
by (simp add: inner_setsum_left inner_Basis if_distrib comm_monoid_add_class.setsum.If_cases)
lemma (in euclidean_space) euclidean_eqI:
assumes b: "\<And>b. b \<in> Basis \<Longrightarrow> inner x b = inner y b" shows "x = y"
proof -
from b have "\<forall>b\<in>Basis. inner (x - y) b = 0"
by (simp add: inner_diff_left)
then show "x = y"
by (simp add: euclidean_all_zero_iff)
qed
lemma (in euclidean_space) euclidean_eq_iff:
"x = y \<longleftrightarrow> (\<forall>b\<in>Basis. inner x b = inner y b)"
by (auto intro: euclidean_eqI)
lemma (in euclidean_space) euclidean_representation_setsum:
"(\<Sum>i\<in>Basis. f i *\<^sub>R i) = b \<longleftrightarrow> (\<forall>i\<in>Basis. f i = inner b i)"
by (subst euclidean_eq_iff) simp
lemma (in euclidean_space) euclidean_representation_setsum':
"b = (\<Sum>i\<in>Basis. f i *\<^sub>R i) \<longleftrightarrow> (\<forall>i\<in>Basis. f i = inner b i)"
by (auto simp add: euclidean_representation_setsum[symmetric])
lemma (in euclidean_space) euclidean_representation: "(\<Sum>b\<in>Basis. inner x b *\<^sub>R b) = x"
unfolding euclidean_representation_setsum by simp
lemma (in euclidean_space) choice_Basis_iff:
fixes P :: "'a \<Rightarrow> real \<Rightarrow> bool"
shows "(\<forall>i\<in>Basis. \<exists>x. P i x) \<longleftrightarrow> (\<exists>x. \<forall>i\<in>Basis. P i (inner x i))"
unfolding bchoice_iff
proof safe
fix f assume "\<forall>i\<in>Basis. P i (f i)"
then show "\<exists>x. \<forall>i\<in>Basis. P i (inner x i)"
by (auto intro!: exI[of _ "\<Sum>i\<in>Basis. f i *\<^sub>R i"])
qed auto
lemma DIM_positive: "0 < DIM('a::euclidean_space)"
by (simp add: card_gt_0_iff)
subsection {* Subclass relationships *}
instance euclidean_space \<subseteq> perfect_space
proof
fix x :: 'a show "\<not> open {x}"
proof
assume "open {x}"
then obtain e where "0 < e" and e: "\<forall>y. dist y x < e \<longrightarrow> y = x"
unfolding open_dist by fast
def y \<equiv> "x + scaleR (e/2) (SOME b. b \<in> Basis)"
have [simp]: "(SOME b. b \<in> Basis) \<in> Basis"
by (rule someI_ex) (auto simp: ex_in_conv)
from `0 < e` have "y \<noteq> x"
unfolding y_def by (auto intro!: nonzero_Basis)
from `0 < e` have "dist y x < e"
unfolding y_def by (simp add: dist_norm)
from `y \<noteq> x` and `dist y x < e` show "False"
using e by simp
qed
qed
subsection {* Class instances *}
subsubsection {* Type @{typ real} *}
instantiation real :: euclidean_space
begin
definition
[simp]: "Basis = {1::real}"
instance
by default auto
end
lemma DIM_real[simp]: "DIM(real) = 1"
by simp
subsubsection {* Type @{typ complex} *}
instantiation complex :: euclidean_space
begin
definition Basis_complex_def:
"Basis = {1, ii}"
instance
by default (auto simp add: Basis_complex_def intro: complex_eqI split: split_if_asm)
end
lemma DIM_complex[simp]: "DIM(complex) = 2"
unfolding Basis_complex_def by simp
subsubsection {* Type @{typ "'a \<times> 'b"} *}
instantiation prod :: (euclidean_space, euclidean_space) euclidean_space
begin
definition
"Basis = (\<lambda>u. (u, 0)) ` Basis \<union> (\<lambda>v. (0, v)) ` Basis"
lemma setsum_Basis_prod_eq:
fixes f::"('a*'b)\<Rightarrow>('a*'b)"
shows "setsum f Basis = setsum (\<lambda>i. f (i, 0)) Basis + setsum (\<lambda>i. f (0, i)) Basis"
proof -
have "inj_on (\<lambda>u. (u::'a, 0::'b)) Basis" "inj_on (\<lambda>u. (0::'a, u::'b)) Basis"
by (auto intro!: inj_onI Pair_inject)
thus ?thesis
unfolding Basis_prod_def
by (subst setsum.union_disjoint) (auto simp: Basis_prod_def setsum.reindex)
qed
instance proof
show "(Basis :: ('a \<times> 'b) set) \<noteq> {}"
unfolding Basis_prod_def by simp
next
show "finite (Basis :: ('a \<times> 'b) set)"
unfolding Basis_prod_def by simp
next
fix u v :: "'a \<times> 'b"
assume "u \<in> Basis" and "v \<in> Basis"
thus "inner u v = (if u = v then 1 else 0)"
unfolding Basis_prod_def inner_prod_def
by (auto simp add: inner_Basis split: split_if_asm)
next
fix x :: "'a \<times> 'b"
show "(\<forall>u\<in>Basis. inner x u = 0) \<longleftrightarrow> x = 0"
unfolding Basis_prod_def ball_Un ball_simps
by (simp add: inner_prod_def prod_eq_iff euclidean_all_zero_iff)
qed
lemma DIM_prod[simp]: "DIM('a \<times> 'b) = DIM('a) + DIM('b)"
unfolding Basis_prod_def
by (subst card_Un_disjoint) (auto intro!: card_image arg_cong2[where f="op +"] inj_onI)
end
end
|
suppressWarnings(library(e1071))
train_model <- function(X, Y, ...){
feats = read.table("task1/data/processed/abt_features.txt", stringsAsFactors=FALSE)[,1]
naiveBayes(X[feats], Y)
}
predict_model <- function(mod, X, type="raw", ...){
predict(mod, newdata=X, type=type)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.