text
stringlengths 0
3.34M
|
---|
lemma reduced_labelling_Suc: "reduced (Suc n) x \<noteq> Suc n \<Longrightarrow> reduced (Suc n) x = reduced n x" |
Formal statement is: lemma cone_Inter[intro]: "\<forall>s\<in>f. cone s \<Longrightarrow> cone (\<Inter>f)" Informal statement is: If $s$ is a cone for every $s \in f$, then $\bigcap f$ is a cone. |
/-
Copyright (c) 2017 Daniel Selsam. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Daniel Selsam
Proof that the memoization part of stochastic backpropagation is correct.
-/
import .graph .estimators .predicates .compute_grad
namespace certigrad
namespace theorems
open list
lemma step_congr (costs : list ID) (callback₁ callback₂ : list node → Π (tgt : reference), T tgt.2)
(nodes : list node) (m : env) (tgt : reference) :
∀ (n : node)
(H_callback_tgt : callback₁ nodes tgt = callback₂ nodes tgt)
(H_callback_node : callback₁ nodes n^.ref = callback₂ nodes n^.ref),
compute_grad_step costs callback₁ (n::nodes) m tgt = compute_grad_step costs callback₂ (n::nodes) m tgt
| ⟨ref, parents, operator.det op⟩ :=
assume H_callback_tgt H_callback_node,
begin dunfold compute_grad_step, rw [H_callback_tgt, H_callback_node] end
| ⟨ref, parents, operator.rand op⟩ :=
assume H_callback_tgt H_callback_node,
begin dunfold compute_grad_step, rw [H_callback_tgt] end
lemma step_correct {costs : list ID} {callback : list node → Π (tgt : reference), T tgt.2}
{nodes : list node} {m : env} {tgt : reference} :
∀ {n : node}
(H_callback_tgt : callback nodes tgt = compute_grad_slow costs nodes m tgt)
(H_callback_node : callback nodes n^.ref = compute_grad_slow costs nodes m n^.ref),
compute_grad_step costs callback (n::nodes) m tgt = compute_grad_slow costs (n::nodes) m tgt
| ⟨ref, parents, operator.det op⟩ :=
assume H_callback_tgt H_callback_node,
begin dunfold compute_grad_step compute_grad_slow, rw [sumrd_sumr, H_callback_tgt, H_callback_node] end
| ⟨ref, parents, operator.rand op⟩ :=
assume H_callback_tgt H_callback_node,
begin dunfold compute_grad_step compute_grad_slow, rw [sumrd_sumr, H_callback_tgt] end
lemma strip_foldr_base {costs : list ID} {m : env} :
Π {tgts : list reference} {tgt₀ : reference} {idx : ℕ},
at_idx tgts idx tgt₀ →
nodup tgts →
env.get tgt₀
(foldr (λ (ref : reference) (dict₀ : env),
(env.insert ref
(compute_grad_step costs (λ (nodes' : list node) (tgt' : reference), T.error "backprop-end") [] m ref)
dict₀))
env.mk
tgts)
=
compute_grad_step costs (λ (nodes : list node) (ref : reference), env.get ref env.mk) [] m tgt₀
| [] _ _ H_at_idx _ := false.rec _ (nat.not_lt_zero _ H_at_idx^.left)
| (tgt::tgts) tgt₀ 0 H_at_idx H_nodup :=
have H_eq : tgt = tgt₀, from at_idx_inj at_idx_0 H_at_idx,
begin
rw -H_eq,
dunfold foldr,
rw env.get_insert_same,
reflexivity
end
| (tgt::tgts) tgt₀ (idx+1) H_at_idx H_nodup :=
have H_neq : tgt₀ ≠ tgt, from nodup_at_idx_neq H_nodup H_at_idx,
have H_at_idx_next : at_idx tgts idx tgt₀, from at_idx_of_cons H_at_idx,
begin
dunfold foldr,
rw (env.get_insert_diff _ _ H_neq),
exact (strip_foldr_base H_at_idx_next (nodup_of_nodup_cons H_nodup)),
end
lemma strip_foldr_step {costs : list ID} {nodes : list node} {m old_dict : env} :
Π {tgts : list reference} {tgt₀ : reference} {idx : ℕ},
at_idx tgts idx tgt₀ →
nodup tgts →
env.get tgt₀
(foldr (λ (tgt' : reference) (dict' : env),
(env.insert tgt'
(compute_grad_step costs (λ (nodes : list node) (ref : reference), env.get ref old_dict)
nodes m tgt')
dict'))
env.mk
tgts)
=
compute_grad_step costs (λ (nodes : list node) (tgt : reference), env.get tgt old_dict) nodes m tgt₀
| [] _ _ H_idx _ := false.rec _ (nat.not_lt_zero _ H_idx^.left)
| (tgt::tgts) tgt₀ 0 H_at_idx H_nodup :=
begin
dunfold at_idx dnth at H_at_idx,
rw H_at_idx^.right,
dunfold foldr,
rw env.get_insert_same
end
| (tgt::tgts) tgt₀ (idx+1) H_at_idx H_nodup :=
have H_neq : tgt₀ ≠ tgt, from nodup_at_idx_neq H_nodup H_at_idx,
have H_at_idx_next : at_idx tgts idx tgt₀, from at_idx_of_cons H_at_idx,
begin
dunfold foldr,
rw env.get_insert_diff _ _ H_neq,
exact (strip_foldr_step H_at_idx_next (nodup_of_nodup_cons H_nodup)),
end
lemma memoize_correct (costs : list ID) :
∀ (nodes : list node) (m : env) {tgts : list reference},
∀ {tgt₀ : reference} {idx : ℕ}, at_idx tgts idx tgt₀ →
nodup (tgts ++ map node.ref nodes) →
env.get tgt₀ (backprop_core costs nodes m tgts)
=
compute_grad_slow costs nodes m tgt₀
| _ _ [] _ _ H_at_idx _ := false.rec _ (nat.not_lt_zero _ H_at_idx^.left)
| [] m (tgt::tgts) tgt₀ idx H_at_idx H_nodup :=
have H_nodup_tgts : nodup (tgt::tgts), from nodup_of_nodup_append_left H_nodup,
begin
dunfold backprop_core backprop_core_helper compute_init_dict,
rw (strip_foldr_base H_at_idx H_nodup_tgts),
dunfold compute_grad_step,
rw sumr_sumr₁,
reflexivity,
end
| (n::nodes) m (tgt::tgts) tgt₀ idx H_at_idx H_nodup :=
have H_nodup_tgts : nodup (tgt::tgts), from nodup_of_nodup_append_left H_nodup,
have H_nodup_n : nodup ((n^.ref :: tgt :: tgts) ++ map node.ref nodes), from nodup_append_swap H_nodup,
have H_at_idx_tgt₀ : at_idx (n^.ref :: tgt :: tgts) (idx+1) tgt₀, from at_idx_cons H_at_idx,
have H_at_idx_n : at_idx (n^.ref :: tgt :: tgts) 0 n^.ref, from at_idx_0,
begin
dunfold backprop_core backprop_core_helper compute_init_dict,
rw (strip_foldr_step H_at_idx H_nodup_tgts),
dunfold compute_grad_step compute_grad_slow,
apply step_correct,
apply (memoize_correct _ _ H_at_idx_tgt₀ H_nodup_n),
apply (memoize_correct _ _ H_at_idx_n H_nodup_n)
end
end theorems
end certigrad
|
lemma residue_simple_pole_limit: assumes "isolated_singularity_at f z0" assumes "is_pole f z0" "zorder f z0 = - 1" assumes "((\<lambda>x. f (g x) * (g x - z0)) \<longlongrightarrow> c) F" assumes "filterlim g (at z0) F" "F \<noteq> bot" shows "residue f z0 = c" |
[STATEMENT]
lemma primes_infinite_analytic: "infinite {p :: nat. prime p}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. infinite {p. prime p}
[PROOF STEP]
proof
\<comment> \<open>Suppose the set of primes were finite.\<close>
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
define P :: "nat set" where "P = {p. prime p}"
[PROOF STATE]
proof (state)
this:
P = {p. prime p}
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
assume fin: "finite P"
\<comment> \<open>Then the Euler product form of the $\zeta$ function ranges over a finite set,
and since each factor is holomorphic in the positive real half-space,
the product is, too.\<close>
[PROOF STATE]
proof (state)
this:
finite P
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
define zeta' :: "complex \<Rightarrow> complex"
where "zeta' = (\<lambda>s. (\<Prod>p\<in>P. inverse (1 - 1 / of_nat p powr s)))"
[PROOF STATE]
proof (state)
this:
zeta' = (\<lambda>s. \<Prod>p\<in>P. inverse (1 - 1 / of_nat p powr s))
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
have holo: "zeta' holomorphic_on A" if "A \<subseteq> {s. Re s > 0}" for A
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
fix p :: nat and s :: complex
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
assume p: "p \<in> P" and s: "s \<in> A"
[PROOF STATE]
proof (state)
this:
p \<in> P
s \<in> A
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
from p
[PROOF STATE]
proof (chain)
picking this:
p \<in> P
[PROOF STEP]
have p': "real p > 1"
[PROOF STATE]
proof (prove)
using this:
p \<in> P
goal (1 subgoal):
1. 1 < real p
[PROOF STEP]
by (subst of_nat_1 [symmetric], subst of_nat_less_iff) (simp add: prime_gt_Suc_0_nat P_def)
[PROOF STATE]
proof (state)
this:
1 < real p
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
have "norm (of_nat p powr s) = real p powr Re s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cmod (of_nat p powr s) = real p powr Re s
[PROOF STEP]
by (simp add: norm_powr_real_powr)
[PROOF STATE]
proof (state)
this:
cmod (of_nat p powr s) = real p powr Re s
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cmod (of_nat p powr s) = real p powr Re s
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
have "\<dots> > real p powr 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real p powr 0 < real p powr Re s
[PROOF STEP]
using p p' s that
[PROOF STATE]
proof (prove)
using this:
p \<in> P
1 < real p
s \<in> A
A \<subseteq> {s. 0 < Re s}
goal (1 subgoal):
1. real p powr 0 < real p powr Re s
[PROOF STEP]
by (subst powr_less_cancel_iff) (auto simp: prime_gt_1_nat)
[PROOF STATE]
proof (state)
this:
real p powr 0 < real p powr Re s
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
real p powr 0 < cmod (of_nat p powr s)
[PROOF STEP]
have "of_nat p powr s \<noteq> 1"
[PROOF STATE]
proof (prove)
using this:
real p powr 0 < cmod (of_nat p powr s)
goal (1 subgoal):
1. of_nat p powr s \<noteq> 1
[PROOF STEP]
using p
[PROOF STATE]
proof (prove)
using this:
real p powr 0 < cmod (of_nat p powr s)
p \<in> P
goal (1 subgoal):
1. of_nat p powr s \<noteq> 1
[PROOF STEP]
by (auto simp: P_def)
[PROOF STATE]
proof (state)
this:
of_nat p powr s \<noteq> 1
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?p2 \<in> P; ?s2 \<in> A\<rbrakk> \<Longrightarrow> of_nat ?p2 powr ?s2 \<noteq> 1
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?p2 \<in> P; ?s2 \<in> A\<rbrakk> \<Longrightarrow> of_nat ?p2 powr ?s2 \<noteq> 1
goal (1 subgoal):
1. zeta' holomorphic_on A
[PROOF STEP]
by (auto simp: zeta'_def P_def intro!: holomorphic_intros)
[PROOF STATE]
proof (state)
this:
zeta' holomorphic_on A
goal:
No subgoals!
[PROOF STEP]
qed
\<comment> \<open>Since the Euler product expansion of $\zeta(s)$ is valid for all $s$ with
real value at least 1, and both $\zeta(s)$ and the Euler product must
be equal in the positive real half-space punctured at 1 by analytic
continuation.\<close>
[PROOF STATE]
proof (state)
this:
?A \<subseteq> {s. 0 < Re s} \<Longrightarrow> zeta' holomorphic_on ?A
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
have eq: "zeta s = zeta' s" if "Re s > 0" "s \<noteq> 1" for s
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zeta s = zeta' s
[PROOF STEP]
proof (rule analytic_continuation_open[of "{s. Re s > 1}" "{s. Re s > 0} - {1}" zeta zeta'])
[PROOF STATE]
proof (state)
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
fix s
[PROOF STATE]
proof (state)
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
assume s: "s \<in> {s. Re s > 1}"
[PROOF STATE]
proof (state)
this:
s \<in> {s. 1 < Re s}
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
let ?f = "(\<lambda>n. \<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1)"
[PROOF STATE]
proof (state)
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
have "eventually (\<lambda>n. ?f n = zeta' s) sequentially"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
using eventually_ge_at_top[of "Max P"]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) (Max P)) sequentially
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
proof eventually_elim
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. Max P \<le> n \<Longrightarrow> (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
case (elim n)
[PROOF STATE]
proof (state)
this:
Max P \<le> n
goal (1 subgoal):
1. \<And>n. Max P \<le> n \<Longrightarrow> (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
have "P \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<noteq> {}
[PROOF STEP]
by (auto simp: P_def intro!: exI[of _ 2])
[PROOF STATE]
proof (state)
this:
P \<noteq> {}
goal (1 subgoal):
1. \<And>n. Max P \<le> n \<Longrightarrow> (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
with elim
[PROOF STATE]
proof (chain)
picking this:
Max P \<le> n
P \<noteq> {}
[PROOF STEP]
have "P \<subseteq> {..n}"
[PROOF STATE]
proof (prove)
using this:
Max P \<le> n
P \<noteq> {}
goal (1 subgoal):
1. P \<subseteq> {..n}
[PROOF STEP]
using fin
[PROOF STATE]
proof (prove)
using this:
Max P \<le> n
P \<noteq> {}
finite P
goal (1 subgoal):
1. P \<subseteq> {..n}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
P \<subseteq> {..n}
goal (1 subgoal):
1. \<And>n. Max P \<le> n \<Longrightarrow> (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
P \<subseteq> {..n}
goal (1 subgoal):
1. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
[PROOF STEP]
unfolding zeta'_def
[PROOF STATE]
proof (prove)
using this:
P \<subseteq> {..n}
goal (1 subgoal):
1. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = (\<Prod>p\<in>P. inverse (1 - 1 / of_nat p powr s))
[PROOF STEP]
by (intro prod.mono_neutral_cong_right) (auto simp: P_def)
[PROOF STATE]
proof (state)
this:
(\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
from s
[PROOF STATE]
proof (chain)
picking this:
s \<in> {s. 1 < Re s}
[PROOF STEP]
have "?f \<longlonglongrightarrow> zeta s"
[PROOF STATE]
proof (prove)
using this:
s \<in> {s. 1 < Re s}
goal (1 subgoal):
1. (\<lambda>n. \<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) \<longlonglongrightarrow> zeta s
[PROOF STEP]
by (intro euler_product_zeta) auto
[PROOF STATE]
proof (state)
this:
(\<lambda>n. \<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) \<longlonglongrightarrow> zeta s
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
(\<lambda>n. \<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) \<longlonglongrightarrow> zeta s
[PROOF STEP]
have "(\<lambda>_. zeta' s) \<longlonglongrightarrow> zeta s"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F n in sequentially. (\<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) = zeta' s
(\<lambda>n. \<Prod>p\<le>n. if prime p then inverse (1 - 1 / of_nat p powr s) else 1) \<longlonglongrightarrow> zeta s
goal (1 subgoal):
1. (\<lambda>_. zeta' s) \<longlonglongrightarrow> zeta s
[PROOF STEP]
by (blast intro: Lim_transform_eventually)
[PROOF STATE]
proof (state)
this:
(\<lambda>_. zeta' s) \<longlonglongrightarrow> zeta s
goal (9 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. \<And>z. z \<in> {s. 1 < Re s} \<Longrightarrow> zeta z = zeta' z
9. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
thus "zeta s = zeta' s"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>_. zeta' s) \<longlonglongrightarrow> zeta s
goal (1 subgoal):
1. zeta s = zeta' s
[PROOF STEP]
by (simp add: LIMSEQ_const_iff)
[PROOF STATE]
proof (state)
this:
zeta s = zeta' s
goal (8 subgoals):
1. open {s. 1 < Re s}
2. open ({s. 0 < Re s} - {1})
3. {s. 1 < Re s} \<noteq> {}
4. connected ({s. 0 < Re s} - {1})
5. {s. 1 < Re s} \<subseteq> {s. 0 < Re s} - {1}
6. zeta holomorphic_on {s. 0 < Re s} - {1}
7. zeta' holomorphic_on {s. 0 < Re s} - {1}
8. s__ \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
qed (auto intro!: exI[of _ 2] open_halfspace_Re_gt connected_open_delete convex_halfspace_Re_gt
holomorphic_intros holo that intro: convex_connected)
\<comment> \<open>However, since the Euler product is holomorphic on the entire positive real
half-space, it cannot have a pole at 1, while $\zeta(s)$ does have a pole
at 1. Since they are equal in the punctured neighbourhood of 1, this is
a contradiction.\<close>
[PROOF STATE]
proof (state)
this:
\<lbrakk>0 < Re ?s; ?s \<noteq> 1\<rbrakk> \<Longrightarrow> zeta ?s = zeta' ?s
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
have ev: "eventually (\<lambda>s. s \<in> {s. Re s > 0} - {1}) (at 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F s in at 1. s \<in> {s. 0 < Re s} - {1}
[PROOF STEP]
by (auto simp: eventually_at_filter intro!: open_halfspace_Re_gt
eventually_mono[OF eventually_nhds_in_open[of "{s. Re s > 0}"]])
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F s in at 1. s \<in> {s. 0 < Re s} - {1}
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
have "\<not>is_pole zeta' 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> is_pole zeta' 1
[PROOF STEP]
by (rule not_is_pole_holomorphic [of "{s. Re s > 0}"]) (auto intro!: holo open_halfspace_Re_gt)
[PROOF STATE]
proof (state)
this:
\<not> is_pole zeta' 1
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<not> is_pole zeta' 1
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
have "is_pole zeta' 1 \<longleftrightarrow> is_pole zeta 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_pole zeta' 1 = is_pole zeta 1
[PROOF STEP]
unfolding is_pole_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. filterlim zeta' at_infinity (at 1) = filterlim zeta at_infinity (at 1)
[PROOF STEP]
by (intro filterlim_cong refl eventually_mono [OF ev] eq [symmetric]) auto
[PROOF STATE]
proof (state)
this:
is_pole zeta' 1 = is_pole zeta 1
goal (1 subgoal):
1. finite {p. prime p} \<Longrightarrow> False
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<not> is_pole zeta 1
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
\<not> is_pole zeta 1
goal (1 subgoal):
1. False
[PROOF STEP]
using is_pole_zeta
[PROOF STATE]
proof (prove)
using this:
\<not> is_pole zeta 1
is_pole zeta 1
goal (1 subgoal):
1. False
[PROOF STEP]
by contradiction
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
DPTRFS Example Program Results
Solution(s)
1 2
1 2.5000 2.0000
2 2.0000 -1.0000
3 1.0000 -3.0000
4 -1.0000 6.0000
5 3.0000 -5.0000
Backward errors (machine-dependent)
0.0E+00 7.4E-17
Estimated forward error bounds (machine-dependent)
2.4E-14 4.7E-14
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
-/
import data.set.basic
import tactic.interactive
open set
variables {α β : Type}
@[simp] lemma singleton_inter_singleton_eq_empty {x y : α} :
({x} ∩ {y} = (∅ : set α)) ↔ x ≠ y :=
by simp [singleton_inter_eq_empty]
example {f : β → α} {x y : α} (h : x ≠ y) : f ⁻¹' {x} ∩ f ⁻¹' {y} = ∅ :=
begin
have : {x} ∩ {y} = (∅ : set α) := by simpa using h,
convert preimage_empty,
rw [←preimage_inter,this],
end
example (P : Prop) (h : P) : P := by convert h
|
\<^marker>\<open>creator "Kevin Kappelmann"\<close>
paragraph \<open>Antisymmetric\<close>
theory Binary_Relations_Antisymmetric
imports
Binary_Relation_Functions
HOL_Syntax_Bundles_Lattices
begin
consts antisymmetric_on :: "'a \<Rightarrow> ('b \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> bool"
overloading
antisymmetric_on_pred \<equiv> "antisymmetric_on :: ('a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> bool"
begin
definition "antisymmetric_on_pred P R \<equiv> \<forall>x y. P x \<and> P y \<and> R x y \<and> R y x \<longrightarrow> x = y"
end
lemma antisymmetric_onI [intro]:
assumes "\<And>x y. P x \<Longrightarrow> P y \<Longrightarrow> R x y \<Longrightarrow> R y x \<Longrightarrow> x = y"
shows "antisymmetric_on P R"
unfolding antisymmetric_on_pred_def using assms by blast
lemma antisymmetric_onD:
assumes "antisymmetric_on P R"
and "P x" "P y"
and "R x y" "R y x"
shows "x = y"
using assms unfolding antisymmetric_on_pred_def by blast
definition "antisymmetric (R :: 'a \<Rightarrow> _) \<equiv> antisymmetric_on (\<top> :: 'a \<Rightarrow> bool) R"
lemma antisymmetric_eq_antisymmetric_on:
"antisymmetric (R :: 'a \<Rightarrow> _) = antisymmetric_on (\<top> :: 'a \<Rightarrow> bool) R"
unfolding antisymmetric_def ..
lemma antisymmetricI [intro]:
assumes "\<And>x y. R x y \<Longrightarrow> R y x \<Longrightarrow> x = y"
shows "antisymmetric R"
unfolding antisymmetric_eq_antisymmetric_on using assms
by (intro antisymmetric_onI)
lemma antisymmetricD:
assumes "antisymmetric R"
and "R x y" "R y x"
shows "x = y"
using assms unfolding antisymmetric_eq_antisymmetric_on
by (auto dest: antisymmetric_onD)
lemma antisymmetric_on_if_antisymmetric:
fixes P :: "'a \<Rightarrow> bool" and R :: "'a \<Rightarrow> _"
assumes "antisymmetric R"
shows "antisymmetric_on P R"
using assms by (intro antisymmetric_onI) (blast dest: antisymmetricD)
lemma antisymmetric_if_antisymmetric_on_in_field:
assumes "antisymmetric_on (in_field R) R"
shows "antisymmetric R"
using assms by (intro antisymmetricI) (blast dest: antisymmetric_onD)
corollary antisymmetric_on_in_field_iff_antisymmetric [simp]:
"antisymmetric_on (in_field R) R \<longleftrightarrow> antisymmetric R"
using antisymmetric_if_antisymmetric_on_in_field antisymmetric_on_if_antisymmetric
by blast
end |
State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m ∈ filter (fun k => n ≤ k) (range j)
⊢ n ≤ m State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ n ≤ m Tactic: simp at hm State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ n ≤ m State After: no goals Tactic: tauto State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m ∈ filter (fun k => n ≤ k) (range j)
⊢ m < j State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ m < j Tactic: simp at hm State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ m < j State After: no goals Tactic: tauto State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m ∈ filter (fun k => n ≤ k) (range j)
⊢ 1 / ↑(Nat.factorial m) = 1 / ↑(Nat.factorial ((fun m x => m - n) m hm + n)) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m ∈ filter (fun k => n ≤ k) (range j)
⊢ n ≤ m Tactic: rw [tsub_add_cancel_of_le] State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m ∈ filter (fun k => n ≤ k) (range j)
⊢ n ≤ m State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ n ≤ m Tactic: simp at * State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
m : ℕ
hm : m < j ∧ n ≤ m
⊢ n ≤ m State After: no goals Tactic: tauto State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
a₁ a₂ : ℕ
ha₁ : a₁ ∈ filter (fun k => n ≤ k) (range j)
ha₂ : a₂ ∈ filter (fun k => n ≤ k) (range j)
h : (fun m x => m - n) a₁ ha₁ = (fun m x => m - n) a₂ ha₂
⊢ a₁ = a₂ State After: no goals Tactic: rwa [tsub_eq_iff_eq_add_of_le, tsub_add_eq_add_tsub, eq_comm, tsub_eq_iff_eq_add_of_le,
add_left_inj, eq_comm] at h <;>
simp at * <;> aesop State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
b : ℕ
hb : b ∈ range (j - n)
⊢ b = (fun m x => m - n) (b + n) (_ : b + n ∈ filter (fun k => n ≤ k) (range j)) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
b : ℕ
hb : b ∈ range (j - n)
⊢ b = b + n - n Tactic: dsimp State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
b : ℕ
hb : b ∈ range (j - n)
⊢ b = b + n - n State After: no goals Tactic: rw [add_tsub_cancel_right] State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ∑ m in range (j - n), 1 / ↑(Nat.factorial (m + n)) ≤ ∑ m in range (j - n), (↑(Nat.factorial n) * ↑(Nat.succ n) ^ m)⁻¹ State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ∑ x in range (j - n), (↑(Nat.factorial (x + n)))⁻¹ ≤ ∑ m in range (j - n), (↑(Nat.factorial n) * ↑(Nat.succ n) ^ m)⁻¹ Tactic: simp_rw [one_div] State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ∑ x in range (j - n), (↑(Nat.factorial (x + n)))⁻¹ ≤ ∑ m in range (j - n), (↑(Nat.factorial n) * ↑(Nat.succ n) ^ m)⁻¹ State After: case h.h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
i✝ : ℕ
a✝ : i✝ ∈ range (j - n)
⊢ ↑(Nat.factorial n) * ↑(Nat.succ n) ^ i✝ ≤ ↑(Nat.factorial (i✝ + n)) Tactic: gcongr State Before: case h.h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
i✝ : ℕ
a✝ : i✝ ∈ range (j - n)
⊢ ↑(Nat.factorial n) * ↑(Nat.succ n) ^ i✝ ≤ ↑(Nat.factorial (i✝ + n)) State After: case h.h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
i✝ : ℕ
a✝ : i✝ ∈ range (j - n)
⊢ Nat.factorial n * Nat.succ n ^ i✝ ≤ Nat.factorial (n + i✝) Tactic: rw [← Nat.cast_pow, ← Nat.cast_mul, Nat.cast_le, add_comm] State Before: case h.h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
i✝ : ℕ
a✝ : i✝ ∈ range (j - n)
⊢ Nat.factorial n * Nat.succ n ^ i✝ ≤ Nat.factorial (n + i✝) State After: no goals Tactic: exact Nat.factorial_mul_pow_le_factorial State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ∑ m in range (j - n), (↑(Nat.factorial n) * ↑(Nat.succ n) ^ m)⁻¹ =
(↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m State After: no goals Tactic: simp [mul_inv, mul_sum.symm, sum_mul.symm, -Nat.factorial_succ, mul_comm, inv_pow] State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) Tactic: have h₁ : (n.succ : α) ≠ 1 :=
@Nat.cast_one α _ ▸ mt Nat.cast_inj.1 (mt Nat.succ.inj (pos_iff_ne_zero.1 hn)) State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) Tactic: have h₂ : (n.succ : α) ≠ 0 := by positivity State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
h₃ : ↑(Nat.factorial n) * ↑n ≠ 0
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) Tactic: have h₃ : (n.factorial * n : α) ≠ 0 := by positivity State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
h₃ : ↑(Nat.factorial n) * ↑n ≠ 0
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) State After: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
h₃ : ↑(Nat.factorial n) * ↑n ≠ 0
h₄ : ↑(Nat.succ n) - 1 = ↑n
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) Tactic: have h₄ : (n.succ - 1 : α) = n := by simp State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
h₃ : ↑(Nat.factorial n) * ↑n ≠ 0
h₄ : ↑(Nat.succ n) - 1 = ↑n
⊢ (↑(Nat.factorial n))⁻¹ * ∑ m in range (j - n), (↑(Nat.succ n))⁻¹ ^ m =
(↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) State After: no goals Tactic: rw [geom_sum_inv h₁ h₂, eq_div_iff_mul_eq h₃, mul_comm _ (n.factorial * n : α),
← mul_assoc (n.factorial⁻¹ : α), ← mul_inv_rev, h₄, ← mul_assoc (n.factorial * n : α),
mul_comm (n : α) n.factorial, mul_inv_cancel h₃, one_mul, mul_comm] State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
⊢ ↑(Nat.succ n) ≠ 0 State After: no goals Tactic: positivity State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
⊢ ↑(Nat.factorial n) * ↑n ≠ 0 State After: no goals Tactic: positivity State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
h₁ : ↑(Nat.succ n) ≠ 1
h₂ : ↑(Nat.succ n) ≠ 0
h₃ : ↑(Nat.factorial n) * ↑n ≠ 0
⊢ ↑(Nat.succ n) - 1 = ↑n State After: no goals Tactic: simp State Before: α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ (↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n)) / (↑(Nat.factorial n) * ↑n) ≤
↑(Nat.succ n) / (↑(Nat.factorial n) * ↑n) State After: case h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n) ≤ ↑(Nat.succ n) Tactic: gcongr State Before: case h
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ ↑(Nat.succ n) - ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n) ≤ ↑(Nat.succ n) State After: case h.a
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ 0 ≤ ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n) Tactic: apply sub_le_self State Before: case h.a
α : Type u_1
inst✝ : LinearOrderedField α
n j : ℕ
hn : 0 < n
⊢ 0 ≤ ↑(Nat.succ n) * (↑(Nat.succ n))⁻¹ ^ (j - n) State After: no goals Tactic: positivity |
// (C) Copyright John Maddock 2007.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "mp_t.hpp"
#include <boost/math/special_functions/expint.hpp>
#include <boost/math/constants/constants.hpp>
#include <fstream>
#include <boost/math/tools/test_data.hpp>
using namespace boost::math::tools;
struct expint_data_generator
{
mp_t operator()(mp_t a, mp_t b)
{
unsigned n = boost::math::tools::real_cast<unsigned>(a);
std::cout << n << " " << b << " ";
mp_t result = boost::math::expint(n, b);
std::cout << result << std::endl;
return result;
}
};
int main()
{
boost::math::expint(1, 0.06227754056453704833984375);
std::cout << boost::math::expint(1, mp_t(0.5)) << std::endl;
parameter_info<mp_t> arg1, arg2;
test_data<mp_t> data;
std::cout << "Welcome.\n"
"This program will generate spot tests for the expint function:\n"
" expint(a, b)\n\n";
bool cont;
std::string line;
do{
get_user_parameter_info(arg1, "a");
get_user_parameter_info(arg2, "b");
data.insert(expint_data_generator(), arg1, arg2);
std::cout << "Any more data [y/n]?";
std::getline(std::cin, line);
boost::algorithm::trim(line);
cont = (line == "y");
}while(cont);
std::cout << "Enter name of test data file [default=expint_data.ipp]";
std::getline(std::cin, line);
boost::algorithm::trim(line);
if(line == "")
line = "expint_data.ipp";
std::ofstream ofs(line.c_str());
ofs << std::scientific << std::setprecision(40);
write_code(ofs, data, "expint_data");
return 0;
}
|
# Calculators
**CS1302 Introduction to Computer Programming**
___
```python
import math
from math import cos, exp, log, pi, sin, tan
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interact
# interactive plot with ipympl
%matplotlib widget
```
The following code is a Python one-liner that creates a calculator.
- Evaluate the cell with `Ctrl+Enter`.
- Enter `1+1` and see the result.
```python
print(eval(input()))
```
---
**Tip**
Try some calculations below using this calculator:
1. $2^3$ by entering `2**3`;
1. $\frac23$ by entering `2/3`;
1. $\left\lceil\frac32\right\rceil$ by entering `3//2`;
1. $3\mod 2$ by entering `3%2`;
1. $\sqrt{2}$ by entering `2**0.5`; and
1. $\sin(\pi/6)$ by entering `sin(pi/6)`;
---
For this lab, you will create more powerful and dedicated calculators.
We will first show you a demo. Then, it will be your turn to create the calculators.
## Hypotenuse Calculator
---
**Proposition**
By the Pythagoras theorem, given a right-angled triangle,
the length of the hypotenuse is
$$
c = \sqrt{a^2 + b^2}
$$ (hypotenuse)
where $a$ and $b$ are the lengths of the other sides of the triangle.
---
We can define the following function to calculate the length `c` of the hypotenuse when given the lengths `a` and `b` of the other sides:
```python
def length_of_hypotenuse(a, b):
c = (a**2 + b**2)**(0.5) # Pythagoras
return c
```
---
**Important**
You need not understand how a function is defined, but
- you should know how to *write the formula {eq}`hypotenuse` as a Python expression* using the exponentiation operator `**`, and
- *assign the variable* `c` the value of the expression (Line 2) using the assignment operator `=`.
---
For example, you may be asked to write Line 2, while Line 1 and 3 are given to you:
**Exercise** Complete the function below to return the length `c` of the hypotenuse given the lengths `a` and `b`.
```python
def length_of_hypotenuse(a, b):
# YOUR CODE HERE
raise NotImplementedError()
return c
```
---
**Caution**
- Complete the above exercise to get the credit even though the answer was already revealed as a demo. Instead of copy-and-paste the answer, type it yourself.
- Note that indentation affects the execution of Python code. In particular, the assignment statement must be indented to indicate that it is part of the *body* of the function.
---
We will use `ipywidgets` to let user interact with the calculator more easily:
- After running the cell, move the sliders to change the values of `a` and `b`.
- Observer that the value of `c` is updated immediately.
```python
# hypotenuse calculator
@interact(a=(0, 10, 1), b=(0, 10, 1))
def calculate_hypotenuse(a=3, b=4):
print("c: {:.2f}".format(length_of_hypotenuse(a, b)))
```
---
**Important**
You need not know how to write widgets, but you should know how to *format a floating point number* (Line 3).
---
You can check your code with a few cases listed in the test cell below.
```python
# tests
def test_length_of_hypotenuse(a, b, c):
c_ = length_of_hypotenuse(a, b)
correct = math.isclose(c, c_)
if not correct:
print(f"For a={a} and b={b}, c should be {c}, not {c_}.")
assert correct
test_length_of_hypotenuse(3, 4, 5)
test_length_of_hypotenuse(0, 0, 0)
test_length_of_hypotenuse(4, 7, 8.06225774829855)
```
## Quadratic equation
### Graphical calculator for parabola
---
**Definition** Parabola
The collection of points $(x,y)$ satisfying the following equation forms a *parabola*:
$$
y=ax^2+bx+c
$$ (parabola)
where $a$, $b$, and $c$ are real numbers called the *coefficients*.
---
**Exercise** Given the variables `x`, `a`, `b`, and `c` store the $x$-coordinate and the coefficients $a$, $b$, and $c$ respectively, assign `y` the corresponding $y$-coordinate of the parabola {eq}`parabola`.
```python
def get_y(x, a, b, c):
# YOUR CODE HERE
raise NotImplementedError()
return y
```
To test your code:
```python
# tests
def test_get_y(y, x, a, b, c):
y_ = get_y(x, a, b, c)
correct = math.isclose(y, y_)
if not correct:
print(f"With (x, a, b, c)={x,a,b,c}, y should be {y} not {y_}.")
assert correct
test_get_y(0, 0, 0, 0, 0)
test_get_y(1, 0, 1, 2, 1)
test_get_y(2, 0, 2, 1, 2)
```
```python
# hidden tests
```
To run the graphical calculator:
```python
# graphical calculator for parabola
fig, ax = plt.subplots()
xmin, xmax, ymin, ymax, resolution = -10, 10, -10, 10, 50
x = np.linspace(xmin, xmax, resolution)
ax.set_title(r'$y=ax^2+bx+c$')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.grid()
p, = ax.plot(x, get_y(x, 0, 0, 0))
@interact(a=(-10, 10, 1), b=(-10, 10, 1), c=(-10, 10, 1))
def plot_parabola(a, b, c):
p.set_ydata(get_y(x, a, b, c))
```
### Quadratic equation solver
---
**Proposition**
For the quadratic equation
$$
ax^2+bx+c=0,
$$ (quadratic)
the *roots* (solutions for $x$) are give by
$$
\frac{-b-\sqrt{b^2-4ac}}{2a},\frac{-b+\sqrt{b^2-4ac}}{2a}.
$$ (quadratic_roots)
---
**Exercise** Assign to `root1` and `root2` the values of the first and second roots above respectively.
```python
def get_roots(a, b, c):
# YOUR CODE HERE
raise NotImplementedError()
return root1, root2
```
To test your code:
```python
# tests
def test_get_roots(roots, a, b, c):
def mysort(c):
return c.real, c.imag
roots_ = get_roots(a, b, c)
assert np.isclose(sorted(roots, key=mysort),
sorted(roots_, key=mysort)).all()
test_get_roots((-1.0, 0.0), 1, 1, 0)
test_get_roots((-1.0, -1.0), 1, 2, 1)
test_get_roots((-2.0, -1.0), 1, 3, 2)
test_get_roots([(-0.5-0.5j), (-0.5+0.5j)], 2, 2, 1)
```
```python
# hidden tests
```
To run the calculator:
```python
# quadratic equations solver
@interact(a=(-10,10,1),b=(-10,10,1),c=(-10,10,1))
def quadratic_equation_solver(a=1,b=2,c=1):
print('Roots: {}, {}'.format(*get_roots(a,b,c)))
```
## Number conversion
### Byte-to-Decimal calculator
Denote a binary number stored in a byte ($8$ bits) as
$$
b_7\circ b_6\circ b_5\circ b_4\circ b_3\circ b_2\circ b_1\circ b_0,
$$
where $\circ$ concatenates $b_i$'s together into a binary string.
The binary string can be converted to a decimal number by the formula
$$
b_7\cdot 2^7 + b_6\cdot 2^6 + b_5\cdot 2^5 + b_4\cdot 2^4 + b_3\cdot 2^3 + b_2\cdot 2^2 + b_1\cdot 2^1 + b_0\cdot 2^0.
$$
E.g., the binary string `'11111111'` is the largest integer represented by a byte:
$$
2^7+2^6+2^5+2^4+2^3+2^2+2^1+2^0=255=2^8-1.
$$
**Exercise** Assign to `decimal` the *integer* value represented by the binary sequence `b7,b6,b5,b4,b3,b2,b1,b0` of *characters* `'0'` or `'1'`.
```python
def byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0):
"""
Parameters:
-----------
b7, ..., b0 are single characters either '0' or '1'.
"""
# YOUR CODE HERE
raise NotImplementedError()
return decimal
```
To test your code:
```python
# tests
def test_byte_to_decimal(decimal, b7, b6, b5, b4, b3, b2, b1, b0):
decimal_ = byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0)
assert decimal == decimal_ and isinstance(decimal_, int)
test_byte_to_decimal(38, '0', '0', '1', '0', '0', '1', '1', '0')
test_byte_to_decimal(20, '0', '0', '0', '1', '0', '1', '0', '0')
test_byte_to_decimal(22, '0', '0', '0', '1', '0', '1', '1', '0')
```
```python
# hidden tests
```
To run the calculator:
```python
# byte-to-decimal calculator
bit = ['0', '1']
@interact(b7=bit, b6=bit, b5=bit, b4=bit, b3=bit, b2=bit, b1=bit, b0=bit)
def convert_byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0):
print('decimal:', byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0))
```
### Decimal-to-Byte calculator
**Exercise** Assign to `byte` a *string of 8 bits* that represents the value of `decimal`, a non-negative decimal integer from $0$ to $2^8-1=255$.
*Hint: Use `//` and `%`.*
```python
def decimal_to_byte(decimal):
# YOUR CODE HERE
raise NotImplementedError()
return byte
```
To test your code:
```python
# tests
def test_decimal_to_byte(byte,decimal):
byte_ = decimal_to_byte(decimal)
assert byte == byte_ and isinstance(byte, str) and len(byte) == 8
test_decimal_to_byte('01100111', 103)
test_decimal_to_byte('00000011', 3)
test_decimal_to_byte('00011100', 28)
```
```python
# hidden tests
```
To run the calculator:
```python
# decimal-to-byte calculator
@interact(decimal=(0,255,1))
def convert_decimal_to_byte(decimal=0):
print('byte:', decimal_to_byte(decimal))
```
## Symbolic calculator (optional)
Can we do complicated arithmetics with Python. What about Calculus?
$$
\int \tan(x)\, dx = \color{red}{?}
$$
Solution: <https://gamma.sympy.org/input/?i=integrate(tan(x))>
---
**Tip**
- Take a look at the different panels to learn about the solution: `Steps`, `Plot`, and `Derivative`.
- Try different [random examples](https://gamma.sympy.org/).
---
**How does SymPy Gamma work?**
[SymPy Gamma](https://gamma.sympy.org/) is a web application running [SymPy](https://docs.sympy.org/latest/index.html), which is a python library for symbolic computation.
**How to use SymPy?**
To import the library:
```python
import sympy as sp
```
We need to define a symbolic variable and assign it to a python variable.
```python
x = sp.symbols('x')
x
```
The SymPy expression for $\tan(x)$ is:
```python
f = sp.tan(x)
f
```
To compute the integration:
```python
int_f = sp.integrate(f)
int_f
```
To compute the derivative:
```python
diff_int_f = sp.diff(int_f)
diff_int_f
```
The answer can be simplified as expected:
```python
diff_int_f.simplify()
```
To plot:
```python
p = sp.plot(f, int_f, (x, -sp.pi/4, sp.pi/4))
```
**Exercise**
Try to compute the following in SymPy and in jupyter notebook:
- $\frac{d}{dx} x^x$
- $\frac{d}{dx} \frac{1}{\sqrt{1 - x^2}}$.
---
**Hint**
Use `sp.sqrt` or `**(sp.S(1)/2)` for square root instead of `**0.5`. See [SymPy gotchas](https://docs.sympy.org/latest/gotchas.html).
---
|
import data.real.basic
import data.set.lattice
import topology.basic
import game.topology.union_open_sets
open set
--begin hide
namespace xena
-- Work in progress
-- end hide
-- begin hide
-- Checking mathlib definitions
variable β : Type*
variable [fintype β]
-- end hide
/- Lemma
Finite intersection of open sets is open -- WIP, to do.
-/
lemma is_open_fin_inter_of_open (X : β → set ℝ ) ( hj : ∀ j, is_open (X j) )
: is_open (Inter X) :=
begin
sorry,
end
end xena -- hide
|
module Issue2649-1 where
module MyModule (A : Set) (a : A) where
foo : A
foo = a
|
#####
##### BoundaryCondition
#####
Base.show(io::IO, bc::BC{C, T}) where {C, T} =
println(io, "BoundaryCondition: type=$C, condition=$(bc.condition)")
#####
##### FieldBoundaryConditions
#####
show_field_boundary_conditions(bcs::FieldBoundaryConditions, padding="") =
string("Oceananigans.FieldBoundaryConditions (NamedTuple{(:x, :y, :z)}), with boundary conditions", '\n',
padding, "├── x: ", typeof(bcs.x), '\n',
padding, "├── y: ", typeof(bcs.y), '\n',
padding, "└── z: ", typeof(bcs.z))
Base.show(io::IO, fieldbcs::FieldBoundaryConditions) = print(io, show_field_boundary_conditions(fieldbcs))
#####
##### ModelBoundaryConditions
#####
function show_solution_boundary_conditions(bcs, padding)
stringtuple = Tuple(string(
padding, "├── ", field, ": ",
show_field_boundary_conditions(getproperty(bcs, field), padding * "│ "), '\n')
for field in propertynames(bcs)[1:end-1])
return string("Oceananigans.SolutionBoundaryConditions ",
"(NamedTuple{(:u, :v, :w, ...)}) with boundary conditions ", '\n', stringtuple...,
padding, "└── ", propertynames(bcs)[end], ": ",
show_field_boundary_conditions(bcs[end], padding * " "))
end
Base.show(io::IO, bcs::ModelBoundaryConditions) =
print(io,
"Oceananigans.ModelBoundaryConditions (NamedTuple{(:solution, :tendency, :pressure)}) with ", '\n',
"├── solution: ", show_solution_boundary_conditions(bcs.solution, "│ "), '\n',
"├── tendency: ", show_solution_boundary_conditions(bcs.tendency, "│ "), '\n',
"└── pressure: ", show_field_boundary_conditions(bcs.pressure, " "))
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
(* This theory is a general framework for refinement on C programs.
It is in this directory rather than lib/ because it refers to parts
of the translated state space of the kernel for convenience.
*)
theory Corres_UL_C
imports
"../LemmaBucket_C"
"../LemmaBucket"
"../SIMPL_Lemmas"
begin
declare word_neq_0_conv [simp del]
(* The HoarePartialDef theorems are used extensively
(as opposed to their HoareTotalDef counterparts, which aren't used much).
We can give most their long names, but conseqPre is used over 400 times,
so for these cases we override the namespaces *)
lemmas conseqPre = HoarePartialDef.conseqPre
lemmas conseqPost = HoarePartialDef.conseqPost
inductive_set
exec_handlers :: "('s, 'p, 'e) body \<Rightarrow> ('s \<times> ('s, 'p, 'e) com list \<times> nat \<times> ('s, 'e) xstate) set"
and
exec_handlers_syn :: "[('s, 'p, 'e) body, ('s, 'p, 'e) com list, 's, nat \<times> ('s, 'e) xstate]
\<Rightarrow> bool" ("_\<turnstile>\<^sub>h \<langle>_,_\<rangle> \<Rightarrow> _" [60,20,98,98] 89)
for \<Gamma> :: "('s, 'p, 'e) body"
where
"\<Gamma> \<turnstile>\<^sub>h \<langle>hs,s\<rangle> \<Rightarrow> ns' == (s, hs, ns') \<in> exec_handlers \<Gamma>"
| EHAbrupt: "\<lbrakk>\<Gamma> \<turnstile> \<langle>h, Normal s\<rangle> \<Rightarrow> Abrupt z; \<Gamma> \<turnstile>\<^sub>h \<langle>hs, z\<rangle> \<Rightarrow> (n, t) \<rbrakk>
\<Longrightarrow> \<Gamma> \<turnstile>\<^sub>h \<langle>h # hs, s\<rangle> \<Rightarrow> (n, t)"
| EHOther: "\<lbrakk>\<Gamma> \<turnstile> \<langle>h, Normal s\<rangle> \<Rightarrow> t; \<not> isAbr t\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile>\<^sub>h \<langle>h # hs, s\<rangle> \<Rightarrow> (length hs, t)"
| EHEmpty: "\<Gamma> \<turnstile>\<^sub>h \<langle>[], s\<rangle> \<Rightarrow> (0, Abrupt s)"
lemma exec_handlers_use_hoare_nothrow:
assumes valid': "E \<turnstile>\<^bsub>/F\<^esub> R' c Q', {}"
and ce: "E \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, t)"
and asms: "s' \<in> R'"
shows "(t \<in> Normal ` Q' \<or> t \<in> Fault ` F) \<and> n = length hs"
using valid' ce asms
apply -
apply (drule hoare_sound)
apply (clarsimp elim: exec_Normal_elim_cases
simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def)
apply (erule exec_handlers.cases)
apply clarsimp
apply (drule spec, drule spec, drule (1) mp)
apply fastforce
apply clarsimp
apply simp
done
definition
unif_rrel :: "bool \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('t \<Rightarrow> 'b)
\<Rightarrow> ('a \<Rightarrow> 'c \<Rightarrow> bool) \<Rightarrow> ('t \<Rightarrow> 'c)
\<Rightarrow> 'a \<Rightarrow> 't \<Rightarrow> bool"
where
"unif_rrel f rrel xf arrel axf \<equiv> \<lambda>x s.
if f then rrel x (xf s) else arrel x (axf s)"
lemma unif_rrel_simps:
"unif_rrel True rrel xf arrel axf = (\<lambda>x s. rrel x (xf s))"
"unif_rrel False rrel xf arrel axf = (\<lambda>x s. arrel x (axf s))"
by (simp add: unif_rrel_def)+
definition
ccorres_underlying :: "(('s \<times> 't) set) \<Rightarrow> ('p \<Rightarrow> ('t, 'p, 'e) com option)
\<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('t \<Rightarrow> 'b)
\<Rightarrow> ('a \<Rightarrow> 'c \<Rightarrow> bool) \<Rightarrow> ('t \<Rightarrow> 'c)
\<Rightarrow> ('s \<Rightarrow> bool) \<Rightarrow> ('t set)
\<Rightarrow> ('t, 'p, 'e) com list
\<Rightarrow> ('s, 'a) nondet_monad \<Rightarrow> ('t, 'p, 'e) com \<Rightarrow> bool"
where
"ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs \<equiv>
\<lambda>m c. \<forall>(s, s') \<in> srel. G s \<and> s' \<in> G' \<and> \<not> snd (m s) \<longrightarrow>
(\<forall>n t. \<Gamma> \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, t) \<longrightarrow>
(case t of
Normal s'' \<Rightarrow> (\<exists>(r, t) \<in> fst (m s). (t, s'') \<in> srel
\<and> unif_rrel (n = length hs) rrel xf arrel axf r s'')
| _ \<Rightarrow> False))"
declare isNormal_simps [simp]
lemma ccorresI [case_names fail nofail]:
assumes fc:
"\<And>s s' n z. \<lbrakk>(s, s') \<in> sr; G s; s' \<in> G'; \<not> snd (m s); \<Gamma> \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, z); isAbr z \<or> isFault z \<or> z = Stuck \<rbrakk>
\<Longrightarrow> False"
and nfc: "\<And>n t' s s'. \<lbrakk>(s, s') \<in> sr; G s; s' \<in> G'; \<Gamma> \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, Normal t'); \<not> snd (m s)\<rbrakk>
\<Longrightarrow> (\<exists>(r, t) \<in> fst (m s). (t, t') \<in> sr \<and> unif_rrel (n = length hs) rrel xf arrel axf r t')"
shows "ccorres_underlying sr \<Gamma> rrel xf arrel axf G G' hs m c"
unfolding ccorres_underlying_def
apply -
apply clarsimp
apply (case_tac t)
apply simp
apply (erule(4) nfc)
apply simp
apply (erule (4) fc)
apply simp
apply simp
apply (erule (4) fc)
apply simp
apply simp
apply (erule (4) fc)
apply simp
done
lemma ccorresI':
assumes rl: "\<And>s s' n z. \<lbrakk>(s, s') \<in> srel; G s; s' \<in> G'; \<Gamma> \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, z); \<not> snd (m s)\<rbrakk> \<Longrightarrow>
(\<exists>t'. z = Normal t' \<and> (\<exists>(r, t) \<in> fst (m s). (t, t') \<in> srel \<and> unif_rrel (n = length hs) rrel xf arrel axf r t'))"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c"
unfolding ccorres_underlying_def
apply -
apply clarsimp
apply (drule (4) rl)
apply (case_tac t)
apply simp
apply simp
apply simp
apply simp
done
lemma exec_handlers_Cons_le[simplified]:
"\<Gamma> \<turnstile>\<^sub>h \<langle>h # hs, s'\<rangle> \<Rightarrow> (n, t) \<Longrightarrow> n \<le> length (tl (h # hs))"
by (induct rule: exec_handlers.induct, simp_all)
lemma exec_handlers_le:
"\<Gamma> \<turnstile>\<^sub>h \<langle>hs, s'\<rangle> \<Rightarrow> (n, t) \<Longrightarrow> n \<le> length hs"
by (induct rule: exec_handlers.induct, simp_all)
lemma ccorresE:
assumes cc: "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c"
and ps: "(s, s') \<in> srel" "G s" "s' \<in> G'" "\<not> snd (m s)" "\<Gamma>\<turnstile>\<^sub>h \<langle>c#hs , s'\<rangle> \<Rightarrow> (n, x)"
and nc: "\<And>t' r t. \<lbrakk>x = Normal t'; (r, t) \<in> fst (m s); (t, t') \<in> srel;
unif_rrel (n = length hs) rrel xf arrel axf r t';
n \<le> length hs \<rbrakk> \<Longrightarrow> P"
shows P
using cc ps nc unfolding ccorres_underlying_def
apply clarsimp
apply (drule (1) bspec)
apply simp
apply (elim allE, drule (1) mp)
apply (cases x)
apply (clarsimp dest!: exec_handlers_Cons_le)
apply simp
apply simp
apply simp
done
lemma ccorres_empty_handler_abrupt:
assumes cc: "ccorres_underlying sr \<Gamma> rrel xf' arrel axf P P' [] a c"
and asms: "(s, s') \<in> sr" "P s" "s' \<in> P'" "\<not> snd (a s)"
and eh: "\<Gamma> \<turnstile> \<langle>c, Normal s'\<rangle> \<Rightarrow> t"
shows "\<not> isAbr t"
using cc asms eh
apply -
apply rule
apply (erule isAbrE)
apply simp
apply (drule EHAbrupt [OF _ EHEmpty])
apply (erule (5) ccorresE)
apply simp
done
lemma ccorres_empty_handler_abrupt':
assumes cc: "ccorres_underlying sr \<Gamma> rrel xf' arrel axf P P' [] a c"
and asms: "(s, s') \<in> sr" "P s" "s' \<in> P'" "\<not> snd (a s)"
and eh: "\<Gamma> \<turnstile>\<^sub>h \<langle>c # hs, s'\<rangle> \<Rightarrow> (n, t)"
shows "\<not> isAbr t"
using cc asms eh
apply -
apply (erule exec_handlers.cases)
apply clarsimp
apply (frule (5) ccorres_empty_handler_abrupt)
apply simp
apply simp
apply simp
done
lemma ccorres_handlers_weaken:
assumes cul: "ccorres_underlying sr \<Gamma> rrel xf arrel axf P P' [] a c"
shows "ccorres_underlying sr \<Gamma> rrel xf arrel axf P P' hs a c"
using cul
apply -
apply (rule ccorresI')
apply (frule (5) ccorres_empty_handler_abrupt')
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule (4) ccorresE)
apply (erule EHAbrupt [OF _ EHEmpty])
apply simp
apply clarsimp
apply (erule (4) ccorresE)
apply (erule (1) EHOther)
apply clarsimp
apply (erule bexI [rotated], simp)
apply simp
done
lemma ccorres_from_vcg0:
"(\<forall>\<sigma>. \<Gamma> \<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel}
c
{s. \<exists>(rv, \<sigma>') \<in> fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s)})
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' hs a c"
apply (rule ccorresI')
apply (drule_tac x = s in spec)
apply (drule hoare_sound)
apply (clarsimp simp add: HoarePartialDef.valid_def cvalid_def)
apply (erule exec_handlers.cases)
apply clarsimp
apply (drule spec, drule spec, drule (1) mp)
apply clarsimp
apply clarsimp
apply (drule spec, drule spec, drule (1) mp)
apply clarsimp
apply (erule bexI [rotated])
apply (simp add: unif_rrel_simps)
apply simp
done
lemmas ccorres_from_vcg = ccorres_from_vcg0 [THEN ccorres_handlers_weaken]
lemma ccorres_from_vcg_nofail0:
"(\<forall>\<sigma>. \<Gamma> \<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel \<and> \<not> snd (a \<sigma>)}
c
{s. \<exists>(rv, \<sigma>') \<in> fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s)})
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c"
apply (rule ccorresI')
apply (drule_tac x = s in spec)
apply (drule hoare_sound)
apply (simp add: HoarePartialDef.valid_def cvalid_def)
apply (erule exec_handlers.cases)
apply clarsimp
apply (drule spec, drule spec, drule (1) mp)
apply clarsimp
apply clarsimp
apply (drule spec, drule spec, drule (1) mp)
apply clarsimp
apply (erule bexI [rotated])
apply (simp add: unif_rrel_simps)
apply simp
done
lemmas ccorres_from_vcg_nofail2
= ccorres_from_vcg_nofail0 [THEN ccorres_handlers_weaken]
lemma ccorres_from_vcg_nofail:
"(\<forall>\<sigma>. \<Gamma> \<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel}
c
{s. \<not> snd (a \<sigma>) \<longrightarrow> (\<exists>(rv, \<sigma>') \<in> fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s))})
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' hs a c"
apply (rule ccorres_from_vcg_nofail2)
apply (erule allEI)
apply (case_tac "snd (a \<sigma>)", simp_all)
apply (rule hoare_complete, simp add: HoarePartialDef.valid_def)
done
lemma ccorres_to_vcg:
"ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c \<Longrightarrow>
(\<forall>\<sigma>. \<not> snd (a \<sigma>) \<longrightarrow> \<Gamma> \<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel}
c
{s. (\<exists>(rv, \<sigma>') \<in> fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s))})"
apply -
apply rule
apply (rule impI)
apply (rule hoare_complete)
apply (simp add: HoarePartialDef.valid_def cvalid_def)
apply (intro impI allI)
apply clarsimp
apply (frule (5) ccorres_empty_handler_abrupt)
apply (erule (4) ccorresE)
apply (erule (1) EHOther)
apply clarsimp
apply rule
apply simp
apply (fastforce simp: unif_rrel_simps)
done
lemma exec_handlers_Seq_cases0':
assumes eh: "\<Gamma> \<turnstile>\<^sub>h \<langle>h, s\<rangle> \<Rightarrow> (n, t)"
and hv: "h = (a ;; b) # hs"
and r1: "\<And>z t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t'; \<Gamma> \<turnstile> \<langle>b, t'\<rangle> \<Rightarrow> z;
((\<not> isAbr z \<or> hs = []) \<and> n = length hs \<and> z = t) \<or> (\<exists>q. z = Abrupt q \<and> \<Gamma> \<turnstile>\<^sub>h \<langle>hs, q\<rangle> \<Rightarrow> (n, t)); \<not> isAbr t' \<rbrakk> \<Longrightarrow> P"
and r2: "\<And>t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> Abrupt t'; \<Gamma> \<turnstile>\<^sub>h \<langle>hs, t'\<rangle> \<Rightarrow> (n, t) \<rbrakk> \<Longrightarrow> P"
shows P
using eh hv r1 r2
proof induct
case (EHOther h s t hs')
hence ex: "\<Gamma> \<turnstile> \<langle>a ;; b, Normal s\<rangle> \<Rightarrow> t" by simp
then obtain t'' where ae: "\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t''" and be: "\<Gamma> \<turnstile> \<langle>b, t''\<rangle> \<Rightarrow> t"
by (auto elim: exec_Normal_elim_cases)
have r: "\<And>z t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t'; \<Gamma> \<turnstile> \<langle>b, t'\<rangle> \<Rightarrow> z;
((\<not> isAbr z \<or> hs = []) \<and> length hs' = length hs \<and> z = t) \<or> (\<exists>q. z = Abrupt q \<and> \<Gamma> \<turnstile>\<^sub>h \<langle>hs, q\<rangle> \<Rightarrow> (length hs', t)); \<not> isAbr t' \<rbrakk> \<Longrightarrow> P"
by fact+
show ?case
proof (rule r)
show "\<not> isAbr t''" using EHOther(2) be
by (cases t) (auto elim: Normal_resultE Fault_resultE Stuck_resultE)
show "((\<not> isAbr t \<or> hs = []) \<and> length hs' = length hs \<and> t = t)
\<or> (\<exists>q. t = Abrupt q \<and> \<Gamma>\<turnstile>\<^sub>h \<langle>hs,q\<rangle> \<Rightarrow> (length hs', t))" using EHOther by simp
qed fact+
next
case (EHAbrupt h s z hs' n t)
hence ex: "\<Gamma> \<turnstile> \<langle>a ;; b, Normal s\<rangle> \<Rightarrow> Abrupt z" and hs: "hs' = hs" by simp_all
have ra: "\<And>z t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t'; \<Gamma> \<turnstile> \<langle>b, t'\<rangle> \<Rightarrow> z;
((\<not> isAbr z \<or> hs = []) \<and> n = length hs \<and> z = t) \<or> (\<exists>q. z = Abrupt q \<and> \<Gamma> \<turnstile>\<^sub>h \<langle>hs, q\<rangle> \<Rightarrow> (n, t)); \<not> isAbr t' \<rbrakk> \<Longrightarrow> P"
by fact+
have rb: "\<And>t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> Abrupt t'; \<Gamma> \<turnstile>\<^sub>h \<langle>hs, t'\<rangle> \<Rightarrow> (n, t) \<rbrakk> \<Longrightarrow> P"
by fact+
{
assume "\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> Abrupt z"
hence ?case
proof (rule rb)
show "\<Gamma>\<turnstile>\<^sub>h \<langle>hs,z\<rangle> \<Rightarrow> (n, t)" by (fold hs) fact+
qed
} moreover
{
fix s''
assume "\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> Normal s''" and
"\<Gamma> \<turnstile> \<langle>b, Normal s''\<rangle> \<Rightarrow> Abrupt z"
hence ?case
proof (rule ra)
show "((\<not> isAbr (Abrupt z) \<or> hs = []) \<and> n = length hs \<and> Abrupt z = t)
\<or> (\<exists>q. Abrupt z = Abrupt q \<and> \<Gamma>\<turnstile>\<^sub>h \<langle>hs,q\<rangle> \<Rightarrow> (n, t))" using EHAbrupt(2)
by (simp add: hs)
show "\<not> isAbr (Normal s'')" by simp
qed
} ultimately show ?case using ex
apply -
apply (erule exec_Normal_elim_cases)
apply (erule Abrupt_resultE)
apply simp
apply simp
apply (drule Abrupt_end [OF _ refl])
apply simp
done
next
case (EHEmpty s)
thus ?case by simp
qed
lemma exec_handlers_Seq_cases' [consumes 1, case_names NotAbrupt Abrupt]:
assumes eh: "\<Gamma> \<turnstile>\<^sub>h \<langle>(a ;; b) # hs, s\<rangle> \<Rightarrow> (n, t)"
and r1: "\<And>z t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t'; \<Gamma> \<turnstile> \<langle>b, t'\<rangle> \<Rightarrow> z;
((\<not> isAbr z \<or> hs = []) \<and> n = length hs \<and> z = t) \<or> (\<exists>q. z = Abrupt q \<and> \<Gamma> \<turnstile>\<^sub>h \<langle>hs, q\<rangle> \<Rightarrow> (n, t)); \<not> isAbr t' \<rbrakk> \<Longrightarrow> P"
and r2: "\<And>t'. \<lbrakk>\<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> Abrupt t'; \<Gamma> \<turnstile>\<^sub>h \<langle>hs, t'\<rangle> \<Rightarrow> (n, t) \<rbrakk> \<Longrightarrow> P"
shows P
using eh r1 r2
by (rule exec_handlers_Seq_cases0' [OF _ refl], auto)
lemma ccorres_abstract_fail:
assumes fl: "\<And>s. P s \<Longrightarrow> snd (a s)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c"
apply (rule ccorresI')
apply (drule fl)
apply simp
done
lemma ccorres_fail':
"ccorres_underlying sr \<Gamma> rvr xf arrel axf P P' hs fail cfn"
by (simp add: ccorres_underlying_def fail_def)
lemma ccorres_fail:
"ccorres_underlying sr \<Gamma> rvr xf arrel axf \<top> UNIV hs fail cfn"
by (rule ccorres_fail')
lemma ccorres_assert:
assumes rl: "P \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs f c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs (assert P >>= (\<lambda>_. f)) c"
apply (cases P)
apply (simp add: assert_def)
apply (erule rl)
apply (simp add: ccorres_fail')
done
definition
exec_handlers_Hoare :: "('p \<Rightarrow> ('t, 'p, 'e) com option) \<Rightarrow>
('t set) \<Rightarrow> ('t, 'p, 'e) com list \<Rightarrow> ('t set) \<Rightarrow> ('t set) \<Rightarrow> bool"
where
"exec_handlers_Hoare \<Gamma> P cs Q A \<equiv>
\<forall>s n t. \<Gamma> \<turnstile>\<^sub>h \<langle>cs, s\<rangle> \<Rightarrow> (n, t)
\<longrightarrow> s \<in> P \<longrightarrow> t \<notin> Normal ` (if n = length cs - 1 then - Q else - A)"
definition
ccHoarePost :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('t \<Rightarrow> 'b) \<Rightarrow>
('a \<Rightarrow> 'b \<Rightarrow> 't set) \<Rightarrow> ('t set)"
where
"ccHoarePost r' xf' Q'
\<equiv> {s. \<forall>rv. r' rv (xf' s) \<longrightarrow> s \<in> Q' rv (xf' s)}"
lemma exec_handlers_Hoare_single_from_vcg:
"\<Gamma> \<turnstile>\<^bsub>/F\<^esub> P c Q, A \<Longrightarrow> exec_handlers_Hoare \<Gamma> P [c] Q A"
apply (drule hoare_sound)
apply (simp add: cvalid_def HoarePartialDef.valid_def
exec_handlers_Hoare_def)
apply (auto elim!: exec_handlers.cases)
done
lemma exec_handlers_Hoare_from_vcg_nofail:
"\<Gamma> \<turnstile>\<^bsub>/F\<^esub> P c Q \<Longrightarrow> exec_handlers_Hoare \<Gamma> P (c # cs) Q A"
apply (drule hoare_sound)
apply (simp add: cvalid_def HoarePartialDef.valid_def
exec_handlers_Hoare_def split del: if_split)
apply (clarsimp split del: if_split)
apply (erule exec_handlers.cases, auto)
done
lemma exec_handlers_Hoare_from_vcg_fails:
"\<lbrakk> \<Gamma> \<turnstile>\<^bsub>/F\<^esub> P c {},UNIV; UNIV \<subseteq> A \<rbrakk> \<Longrightarrow> exec_handlers_Hoare \<Gamma> P (c # cs) Q A"
apply (drule hoare_sound)
apply (simp add: cvalid_def HoarePartialDef.valid_def
exec_handlers_Hoare_def split del: if_split)
apply (clarsimp split del: if_split)
apply (erule exec_handlers.cases, simp_all)
apply (cases cs)
apply (auto elim!: exec_handlers.cases)[1]
apply clarsimp
apply (frule exec_handlers_Cons_le)
apply auto
done
lemma exec_handlers_Hoare_NormalD:
"\<lbrakk> exec_handlers_Hoare \<Gamma> P cs Q A; s \<in> P; \<Gamma> \<turnstile>\<^sub>h \<langle>cs, s\<rangle> \<Rightarrow> (n, Normal t); n = length cs - 1 \<rbrakk>
\<Longrightarrow> t \<in> Q"
by (fastforce simp add: exec_handlers_Hoare_def)
lemma exec_handlers_Hoare_CatchD:
"\<lbrakk> exec_handlers_Hoare \<Gamma> P cs Q A; s \<in> P; \<Gamma> \<turnstile>\<^sub>h \<langle>cs, s\<rangle> \<Rightarrow> (n, Normal a); n \<noteq> length cs - 1 \<rbrakk>
\<Longrightarrow> a \<in> A"
by (fastforce simp add: exec_handlers_Hoare_def)
lemma ccorres_master_split:
assumes ac: "ccorres_underlying sr \<Gamma> r' xf' arrel' axf' P P' hs' a c"
and hsw: "hs' \<noteq> [] \<Longrightarrow> hs' = hs"
and bd: "\<And>rv. ccorres_underlying sr \<Gamma> r xf arrel axf (R rv) (R' rv) hs (b rv) d"
and abort: "\<And>rv. hs' \<noteq> [] \<Longrightarrow> ccorres_underlying sr \<Gamma> arrel axf arrel axf (E rv) (E' rv)
[] (b rv) Skip"
and valid: "\<lbrace>Q\<rbrace> a \<lbrace>\<lambda>rv. R rv and E rv\<rbrace>"
and valid': "exec_handlers_Hoare \<Gamma> Q' (c # hs) (ccHoarePost r' xf' (\<lambda>a b. R' a))
{s. hs' \<noteq> [] \<longrightarrow> s \<in> ccHoarePost arrel' axf' (\<lambda>a b. E' a)}"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and Q) (P' \<inter> Q') hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
apply (rule ccorresI')
apply (erule exec_handlers_Seq_cases')
apply (clarsimp simp add: bind_def)
apply (erule(3) ccorresE [OF ac])
apply (erule(1) EHOther)
apply simp
apply (frule exec_handlers_Hoare_NormalD[OF valid'])
apply (erule EHOther, simp)
apply simp
apply (frule(1) use_valid [OF _ valid])
apply (clarsimp simp: unif_rrel_simps ccHoarePost_def)
apply (drule spec, drule(1) mp)
apply (erule_tac x=z in ccorresE [OF bd], assumption+)
apply (clarsimp simp: split_def image_image)
apply force
apply (case_tac "isAbr za")
apply (clarsimp elim!: isAbrE)
apply (erule EHAbrupt)
apply (erule disjE)
apply clarsimp
apply (rule EHEmpty)
apply assumption
apply (simp add: isAbr_def)
apply (erule EHOther)
apply (clarsimp simp: isAbr_def split_def)
apply (clarsimp simp: split_def)
apply force
apply (clarsimp simp: bind_def)
apply (cases "hs' = []")
apply (erule(3) ccorresE [OF ac])
apply simp
apply (erule EHAbrupt, rule EHEmpty)
apply simp
apply (frule hsw)
apply (erule(3) ccorresE [OF ac])
apply simp
apply (erule(1) EHAbrupt)
apply (frule(1) use_valid [OF _ valid])
apply (cases hs, simp_all)[1]
apply (frule exec_handlers_Cons_le)
apply (frule exec_handlers_Hoare_CatchD[OF valid'])
apply (erule EHAbrupt)
apply simp
apply clarsimp
apply (simp add: unif_rrel_simps ccHoarePost_def)
apply (elim conjE)
apply (drule spec, drule(1) mp)
apply (rule ccorresE [OF abort], simp, assumption+)
apply (clarsimp simp: split_def image_image)
apply force
apply (rule EHOther)
apply (rule exec.intros)
apply simp
apply (clarsimp simp add: unif_rrel_simps split_def)
apply force
done
lemma ccorres_empty:
"ccorres_underlying sr \<Gamma> r xf arrel axf P {} hs a c"
apply (rule ccorresI')
apply simp
done
lemma ccorres_False:
"ccorres_underlying sr \<Gamma> r xf arrel axf (\<lambda>s. False) P' hs a c"
apply (rule ccorresI')
apply simp
done
lemmas ccorres_master_split_hs = ccorres_master_split [OF _ refl]
lemmas ccorres_master_split_nohs
= ccorres_master_split [where hs'=Nil and E = "\<lambda>_ _. True", simplified]
lemma stronger_ccorres_guard_imp:
assumes x: "ccorres_underlying sr \<Gamma> r xf arrel axf Q Q' hs f g"
assumes y: "\<And>s s'. \<lbrakk> A s; s' \<in> A'; (s, s') \<in> sr \<rbrakk> \<Longrightarrow> Q s"
assumes z: "\<And>s s'. \<lbrakk> A s; s' \<in> A'; (s, s') \<in> sr \<rbrakk> \<Longrightarrow> s' \<in> Q'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A A' hs f g"
using x
apply -
apply (rule ccorresI')
apply (erule (1) ccorresE)
apply (erule (2) y)
apply (erule (2) z)
apply assumption
apply auto
done
lemma ccorres_guard_imp:
assumes x: "ccorres_underlying sr \<Gamma> r xf arrel axf Q Q' hs f g"
assumes y: "\<And>s. A s \<Longrightarrow> Q s" "\<And>s. s \<in> A' \<Longrightarrow> s \<in> Q'" (* Do we prefer using subset? *)
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A A' hs f g"
apply (rule stronger_ccorres_guard_imp)
apply (rule x)
apply (simp add: y)+
done
lemma ccorres_split_nothrow':
fixes R' :: "'a set"
assumes ac: "ccorres_underlying sr \<Gamma> r' xf' dc axf P P' hs a c"
and bd: "\<And>rv. ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' \<inter> {s. r' rv (xf' s)}) hs (b rv) d"
and valid: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>"
and valid': "\<Gamma> \<turnstile>\<^bsub>/F\<^esub> R' c Q'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
using ac valid valid'
apply -
apply (erule ccorres_master_split_hs)
apply (rule bd)
apply (rule ccorres_empty[where P=\<top>])
apply simp
apply (rule exec_handlers_Hoare_from_vcg_nofail)
apply (erule conseqPost)
apply (clarsimp simp: ccHoarePost_def)
apply simp
done
lemma ccorres_add_return:
"ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs (return () >>= (\<lambda>_. a)) c
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c"
by simp
lemma ccorres_add_return2:
"ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs (a >>= (\<lambda>x. return x)) c
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c"
by simp
lemma ccorres_return:
assumes P: "\<And>s. R s \<Longrightarrow>
E \<turnstile> (R' \<inter> {s'. (s, s') \<in> sr}) f {s'. (s, s') \<in> sr \<and> r x (xf s')}"
shows "ccorres_underlying sr E r xf arrel axf R R' hs (return x) f"
apply (rule ccorresI')
apply (frule P)
apply (drule (1) exec_handlers_use_hoare_nothrow)
apply clarsimp
apply (clarsimp simp: return_def unif_rrel_simps)
done
lemma ccorres_return_Skip':
shows "ccorres_underlying sr \<Gamma> r xf arrel axf \<top> (UNIV \<inter> {s. r a (xf s)}) hs
(return a) SKIP"
apply (rule ccorres_return)
apply simp
apply (rule HoarePartial.Skip)
apply clarsimp
done
lemma ccorres_return_Skip:
shows "ccorres_underlying sr \<Gamma> dc xf arrel axf \<top> UNIV hs (return a) SKIP"
by (rule ccorres_return_Skip'[where r=dc, simplified])
lemma ccorres_split_throws:
assumes ac: "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c"
and valid': "\<Gamma> \<turnstile>\<^bsub>/UNIV\<^esub> R' c {}, UNIV" (* Always throws *)
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P (P' \<inter> R') hs a (c ;; d)"
apply (rule ccorres_add_return2)
apply (rule ccorres_guard_imp,
rule ccorres_master_split_hs[OF ac])
apply (rule ccorres_empty[where P=\<top>])
apply (rule ccorres_return_Skip')
apply wp
apply clarsimp
apply (rule exec_handlers_Hoare_from_vcg_fails [OF valid'])
apply (simp add: ccHoarePost_def)
apply simp
apply simp
done
lemma ccorres_Un:
assumes pt: "ccorres_underlying sr \<Gamma> r xf arrel axf P PT hs a c"
and pn: "ccorres_underlying sr \<Gamma> r xf arrel axf P PN hs a c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P (PT \<union> PN) hs a c"
using pt pn
apply -
apply (rule ccorresI)
apply (erule UnE)
apply (erule (5) ccorresE)
apply simp
apply (erule (5) ccorresE)
apply simp
apply (erule UnE)
apply (erule (5) ccorresE)
apply (erule bexI [rotated], clarsimp)
apply (erule (5) ccorresE)
apply (erule bexI [rotated], clarsimp)
done
lemma ccorres_throw:
assumes abh: "ccorres_underlying sr \<Gamma> arrel axf arrel axf P P' hs a h"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' (h#hs) a Throw"
apply (rule ccorresI')
apply (erule exec_handlers.cases, simp_all)
apply (erule exec_Normal_elim_cases)
apply clarsimp
apply (frule exec_handlers_Cons_le)
apply (rule ccorresE [OF abh], assumption+)
apply (simp add: unif_rrel_def split_def)
apply (erule rev_bexI, simp)
apply (erule exec_Normal_elim_cases)
apply simp
done
lemma ccorres_split_throw:
assumes abh: "ccorres_underlying sr \<Gamma> arrel axf arrel axf P P' hs a h"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' (h#hs) a (Throw ;; d)"
apply (rule ccorres_guard_imp, rule ccorres_split_throws)
apply (rule ccorres_throw[OF abh])
apply (rule HoarePartial.Throw)
apply (rule order_refl)
apply simp+
done
lemma ccorres_tmp_lift1:
assumes rl: "\<And>rv'. P rv'
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf G (G' \<inter> {s. xf' s = rv'}) hs m c"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G (G' \<inter> {s. P (xf' s)}) hs m c"
by (auto intro!: ccorresI dest!: rl elim: ccorresE)
lemma ceqvhD1:
assumes lhs: "\<Gamma> \<turnstile>\<^sub>h \<langle>a # hs, s\<rangle> \<Rightarrow> (n, s')"
and xf: "xf s = v"
and ceq: "\<And>s'. ceqv \<Gamma> xf v s s' a b"
shows "\<Gamma> \<turnstile>\<^sub>h \<langle>b # hs, s\<rangle> \<Rightarrow> (n, s')"
using lhs xf
apply -
apply (ind_cases "\<Gamma> \<turnstile>\<^sub>h \<langle>a # hs, s\<rangle> \<Rightarrow> (n, s')")
apply (rule EHAbrupt)
apply (erule (1) ceqvD1 [OF _ _ ceq])
apply assumption
apply simp
apply (rule EHOther)
apply (erule (1) ceqvD1 [OF _ _ ceq])
apply assumption
done
lemmas ceqvhD2 = ceqvhD1 [OF _ _ ceqv_sym]
lemma ccorres_tmp_lift2:
assumes rl: "\<And>t t'. ceqv \<Gamma> xf' rv' t t' c c'"
and c: "ccorres_underlying srel \<Gamma> rrel xf arrel axf G (G'' rv') hs m c'"
and geq: "G'' rv' \<inter> {s. rv' = xf' s} = G' \<inter> {s. rv' = xf' s}"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G (G' \<inter> {s. xf' s = rv'}) hs m c"
using c
apply -
apply (rule ccorresI')
apply (erule (2) ccorresE)
apply (subst (asm) Int_eq_symmetric)
apply (subst (asm) geq [symmetric])
apply fastforce
apply assumption
apply simp
apply (erule conjE)+
apply (erule (1) ceqvhD1 [OF _ _ rl])
apply simp
apply fastforce
done
lemma ccorres_init_tmp_lift2:
assumes rl: "\<And>t t'. ceqv \<Gamma> xf' rv' t t' c c'"
and c: "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c'"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G (G' \<inter> {s. xf' s = rv'}) hs m c"
using c
apply -
apply (rule ccorresI')
apply (erule (2) ccorresE)
apply fastforce
apply assumption
apply simp
apply (erule conjE)+
apply (erule (1) ceqvhD1 [OF _ _ rl])
apply simp
apply fastforce
done
lemma ccorres_Call:
assumes ge: "\<Gamma> f = Some (Catch bdy Skip)"
and cul: "ccorres_underlying sr \<Gamma> r xf r xf P P' (Skip # hs) a bdy"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a (Call f)"
using ge cul
apply -
apply (rule ccorresI')
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule exec_Normal_elim_cases | simp)+
apply clarsimp
apply (erule exec_Normal_elim_cases | simp)+
apply (erule (4) ccorresE)
apply (erule EHAbrupt, rule EHOther)
apply (rule exec.Skip)
apply simp
apply (simp add: unif_rrel_simps)
apply fastforce
apply (erule (4) ccorresE)
apply (erule (1) EHOther)
apply (simp add: unif_rrel_simps)
apply fastforce
apply simp
apply simp
done
lemma ccorres_call:
assumes cul: "ccorres_underlying sr \<Gamma> r xf' rrel xf' P (i ` P') [] a (Call f)"
and gsr: "\<And>a b x s t. (x, t) \<in> sr \<Longrightarrow> (x, g a b (clean s t)) \<in> sr"
and res: "\<And>a s t rv. r rv (xf' t) \<Longrightarrow> r rv (xf (g a t (clean s t)))"
and ist: "\<And>x s. (x, s) \<in> sr \<Longrightarrow> (x, i s) \<in> sr"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a (call i f clean (\<lambda>x y. Basic (g x y)))"
apply (rule ccorresI')
apply (erule exec_handlers.cases, simp_all)[1]
apply clarsimp
apply (erule exec_call_Normal_elim, simp_all)[1]
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHAbrupt)
apply (erule(1) exec.Call)
apply (rule EHEmpty)
apply simp
apply clarsimp
apply (erule exec_call_Normal_elim, simp_all)[1]
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply (simp add: unif_rrel_simps)
apply (erule rev_bexI)
apply (simp add: gsr res)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply simp
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply simp
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule exec.CallUndefined)
apply simp
apply simp
done
declare semantic_equivD1 [dest]
declare semantic_equivD2 [dest]
lemma exec_handlers_semantic_equiv0:
assumes se: "\<And>s'. semantic_equiv \<Gamma> s s' a b"
and eh: "\<Gamma> \<turnstile>\<^sub>h \<langle>a # hs, s\<rangle> \<Rightarrow> (n, t)"
shows "\<Gamma> \<turnstile>\<^sub>h \<langle>b # hs,s\<rangle> \<Rightarrow> (n, t)"
using se eh
apply -
apply (erule ceqvhD1 [where xf = "\<lambda>_. ()" and v = "()", folded semantic_equiv_def])
apply simp
apply assumption
done
lemmas exec_handlers_semantic_equivD1 = exec_handlers_semantic_equiv0 [rotated]
lemmas exec_handlers_semantic_equivD2
= exec_handlers_semantic_equiv0 [OF iffD1 [OF semantic_equiv_sym], rotated]
lemma exec_handlers_semantic_equiv:
assumes se: "\<And>s'. semantic_equiv \<Gamma> s s' a b"
shows "\<Gamma> \<turnstile>\<^sub>h \<langle>a # hs, s\<rangle> \<Rightarrow> (n, t) = \<Gamma> \<turnstile>\<^sub>h \<langle>b # hs,s\<rangle> \<Rightarrow> (n, t)"
using se
apply -
apply rule
apply (erule (1) exec_handlers_semantic_equiv0)
apply (subst (asm) semantic_equiv_sym)
apply (erule (1) exec_handlers_semantic_equiv0)
done
lemma ccorres_semantic_equiv0:
assumes rl: "\<And>s s'. s \<in> G' \<Longrightarrow> semantic_equiv \<Gamma> s s' c c'"
and c: "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c'"
using c rl
apply -
apply (rule ccorresI')
apply (erule (4) ccorresE)
apply (erule exec_handlers_semantic_equivD2)
apply assumption
apply (clarsimp elim!: bexI [rotated])
done
lemmas ccorres_semantic_equivD1 = ccorres_semantic_equiv0 [rotated]
lemmas ccorres_semantic_equivD2
= ccorres_semantic_equiv0 [OF iffD1 [OF semantic_equiv_sym], rotated]
(* This is so we can get the name --- if it is done at lemmas, it uses the
(nameless) RHS *)
declare ccorres_semantic_equivD1
declare ccorres_semantic_equivD2
lemma ccorres_semantic_equiv:
assumes rl: "\<And>s s'. s \<in> G' \<Longrightarrow> semantic_equiv \<Gamma> s s' c c'"
shows "ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c =
ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m c'"
using rl
apply -
apply rule
apply (erule (1) ccorres_semantic_equiv0)
apply (subst (asm) semantic_equiv_sym)
apply (erule (1) ccorres_semantic_equiv0)
done
lemmas ccorres_exec_cong = ccorres_semantic_equiv [OF semantic_equivI]
definition
"exec_While \<Gamma> S c s s' \<equiv> exec \<Gamma> (While S c) s s'"
(* nearly works - can't get simplifier to use both the While and Seq rules *)
lemmas ccorres_exec_congs = ccorres_exec_cong exec_While_cong[folded exec_While_def] exec_Seq_cong
lemmas exec_eq_simps = exec_Guard_UNIV_simp exec_Seq_Skip_simps
lemma test_ccorres_exec_congs:
"ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a
(Guard F UNIV Skip ;; While S (c ;; Guard F UNIV Skip))
= ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (While S c)"
apply (simp add: exec_eq_simps cong: ccorres_exec_congs)
oops
lemma exec_handlers_assoc:
"E \<turnstile>\<^sub>h \<langle>(c1;; (c2 ;; c3)) # hs, s\<rangle> \<Rightarrow> (n, t) = E \<turnstile>\<^sub>h \<langle>(c1;;c2;;c3) # hs,s\<rangle> \<Rightarrow> (n, t)"
apply (rule exec_handlers_semantic_equiv)
apply (rule semantic_equivI)
apply (rule exec_assoc)
done
lemma ccorres_rhs_assoc:
assumes cc: "ccorres_underlying sr E r xf arrel axf G G' hs a (c ;; (d ;; e))"
shows "ccorres_underlying sr E r xf arrel axf G G' hs a (c ;; d ;; e)"
using cc
apply (rule ccorres_semantic_equivD1)
apply (rule semantic_equivI)
apply (rule exec_assoc)
done
lemma ccorres_rhs_assoc2:
"ccorres_underlying sr E r xf arrel axf G G' hs a ((c ;; c') ;; c'')
\<Longrightarrow> ccorres_underlying sr E r xf arrel axf G G' hs a (c ;; (c' ;; c''))"
apply (erule iffD2 [OF ccorres_semantic_equiv, rotated])
apply (rule semantic_equivI)
apply (rule exec_assoc)
done
lemma ccorres_basic_srnoop:
assumes asm: "ccorres_underlying sr E r xf arrel axf G G' hs a c"
and gsr: "\<And>s s'. (s, s') \<in> sr \<Longrightarrow> (s, g s') \<in> sr"
and gG: "\<And>s'. s' \<in> G' \<Longrightarrow> g s' \<in> G'"
shows "ccorres_underlying sr E r xf arrel axf G G' hs a (Basic g ;; c)"
using asm
apply -
apply (rule ccorresI')
apply clarsimp
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply (erule exec_Normal_elim_cases)
apply simp
apply (erule (4) ccorresE [OF _ gsr _ gG])
apply (erule (1) EHAbrupt)
apply clarsimp
apply (erule bexI [rotated])
apply simp
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply (erule exec_Normal_elim_cases)
apply simp
apply (erule (4) ccorresE [OF _ gsr _ gG])
apply (erule (1) EHOther)
apply clarsimp
apply (erule bexI [rotated])
apply simp
apply simp
done
lemma ccorres_symb_exec_r:
assumes cul: "ccorres_underlying sr E r xf arrel axf R Q' hs a y"
and ex: "E \<turnstile> R' m Q', {}"
and pres: "\<And>s. R s \<Longrightarrow> E \<turnstile> (R' \<inter> {s'. (s, s') \<in> sr}) m {s'. (s, s') \<in> sr}"
shows "ccorres_underlying sr E r xf arrel axf R R' hs a (m ;; y)"
apply -
apply (rule ccorres_guard_imp)
apply (subst return_bind [symmetric, where f = "\<lambda>x. a"], rule ccorres_split_nothrow')
apply (rule ccorres_return [where r = "\<lambda>x y. True"])
apply (drule pres, simp)
apply (rule ccorres_guard_imp)
apply (rule cul)
apply assumption
apply simp
apply wp
apply (rule ex)
apply simp
apply simp
done
(* Throw stuff *)
lemma ccorres_rel_imp:
assumes x: "ccorres_underlying sr \<Gamma> r' xf' r' xf' P P' hs f g"
assumes y: "\<And>x y. r' x (xf' y) \<Longrightarrow> r x (xf y)"
shows "ccorres_underlying sr \<Gamma> r xf r xf P P' hs f g"
using x
apply -
apply (rule ccorresI')
apply (erule (5) ccorresE)
apply clarsimp
apply (erule bexI [rotated])
apply (simp add: y unif_rrel_def)
done
lemma ccorres_liftM_simp [simp]:
"(ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs (liftM t f) g)
= (ccorres_underlying sr \<Gamma> (r \<circ> t) xf (arrel \<circ> t) axf P P' hs f g)"
apply rule
apply (rule ccorresI')
apply (erule (3) ccorresE)
apply clarsimp
apply assumption
apply (clarsimp simp add: liftM_def bind_def return_def)
apply (erule bexI [rotated])
apply (simp add: unif_rrel_def)
apply (rule ccorresI')
apply simp
apply (erule (5) ccorresE)
apply (simp add: liftM_def NonDetMonad.bind_def return_def)
apply (erule bexI [rotated])
apply (simp add: unif_rrel_def split: if_split_asm)
done
lemma ccorres_cond_weak:
assumes c1: "ccorres_underlying sr \<Gamma> r xf arrel axf Pt Rt hs a c"
and c2: "ccorres_underlying sr \<Gamma> r xf arrel axf Pf Rf hs a c'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (Pt and Pf) ((Rt \<inter> b) \<union> (Rf \<inter> -b)) hs a (Cond b c c')"
apply (rule ccorresI')
apply (erule UnE)
apply (drule exec_handlers_semantic_equivD1 [where b = c])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondTrue)
apply (fastforce elim: ccorresE [OF c1] elim!: bexI [rotated])
apply (drule exec_handlers_semantic_equivD2 [where b = c'])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondFalse)
apply (fastforce elim: ccorresE [OF c2] elim!: bexI [rotated])
done
lemma ccorres_cond_empty:
assumes c2: "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a (Cond {} c c')"
apply (rule ccorres_guard_imp)
apply (rule ccorres_cond_weak [OF _ c2])
apply (rule ccorres_empty)
apply simp
apply simp
done
lemma ccorres_cond_univ:
assumes c1: "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs a (Cond UNIV c c')"
apply (rule ccorres_guard_imp)
apply (rule ccorres_cond_weak [OF c1])
apply (rule ccorres_empty)
apply simp
apply simp
done
lemma ccorres_Guard:
assumes cc: "ccorres_underlying sr \<Gamma> r xf arrel axf A C' hs a c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A (C' \<inter> S) hs a (Guard F S c)"
using cc
apply -
apply (rule ccorresI')
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply (erule (4) ccorresE)
apply (erule (1) EHAbrupt)
apply (clarsimp elim!: bexI [rotated])
apply clarsimp
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply (erule (4) ccorresE)
apply (erule (1) EHOther)
apply (clarsimp elim!: bexI [rotated])
apply clarsimp
apply simp
done
lemma ccorres_Guard_Seq:
assumes cc: "ccorres_underlying sr \<Gamma> r xf arrel axf A C' hs a (c ;; d)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A (C' \<inter> S) hs a (Guard F S c ;; d)"
apply (rule ccorres_semantic_equivD2 [OF _ Guard_Seq_semantic_equiv])
apply (rule ccorres_Guard [OF cc])
done
lemma ccorres_cond_const:
assumes c1: "P \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf Pt Rt hs a c"
and c2: "\<not> P \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf Pf Rf hs a c'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf ((\<lambda>s. P \<longrightarrow> Pt s) and (\<lambda>s. \<not> P \<longrightarrow> Pf s)) ((Rt \<inter> {_. P}) \<union> (Rf \<inter> {_. \<not> P})) hs a (Cond {_. P} c c')"
apply (rule ccorresI')
apply (erule UnE)
apply (drule exec_handlers_semantic_equivD1 [where b = c])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondTrue)
apply (fastforce elim: ccorresE [OF c1] elim!: bexI [rotated])
apply (drule exec_handlers_semantic_equivD2 [where b = c'])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondFalse)
apply (fastforce elim: ccorresE [OF c2] elim!: bexI [rotated])
done
lemmas in_monad_pre' = in_whenE [where v = r for r] in_liftE [where v = r for r]
in_bind in_returnOk [where v' = r for r] in_throwError [where v = r for r]
in_assertE in_assert in_return in_assert_opt
in_get in_gets in_put in_when [where v = r for r]
in_modify [where v = r for r]
lemmas in_monad' = in_monad_pre' [where r = "fst r" and s' = "snd r" for r, simplified surjective_pairing [symmetric]]
declare not_snd_bindI1 [intro?]
declare not_snd_bindI2 [intro?]
lemma ccorres_symb_exec_l:
assumes cc: "\<And>rv. ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv) hs (f rv) c"
and pres: "\<And>s. \<lbrace>op = s\<rbrace> m \<lbrace>\<lambda>r. op = s\<rbrace>"
and val: "\<lbrace>G\<rbrace> m \<lbrace>Q\<rbrace>"
and ef: "empty_fail m"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G {s'. \<forall>rv s. (s, s') \<in> sr \<and> Q rv s \<longrightarrow> s' \<in> Q' rv} hs (m >>= (\<lambda>rv. f rv)) c"
apply (rule ccorresI')
apply (frule not_snd_bindI1)
apply (erule empty_fail_not_snd [OF _ ef, THEN exE])
apply (case_tac x)
apply simp
apply (frule (1) use_valid [OF _ val])
apply (frule use_valid [OF _ pres])
apply (rule refl)
apply simp
apply (rule ccorresE [OF cc], assumption+)
apply fastforce
apply (erule (1) not_snd_bindI2)
apply assumption
apply (clarsimp simp: bind_def split: prod.splits)
apply (erule (1) my_BallE)
apply (erule bexI [rotated])
apply force
done
lemma ccorres_symb_exec_l':
assumes cc: "\<And>rv. ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) G' hs (f rv) c"
and v1: "\<And>s. NonDetMonad.valid (op = s) m (\<lambda>r. op = s)"
and v2: "NonDetMonad.valid G m Q"
and ef: "empty_fail m"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs (m >>= (\<lambda>rv. f rv)) c"
apply (rule ccorres_guard_imp)
apply (rule ccorres_symb_exec_l [OF cc v1 v2 ef])
apply simp+
done
lemma ccorres_symb_exec_l2:
assumes cc: "\<And>rv. ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv) hs (f rv) c"
and v1: "\<And>s. G s \<Longrightarrow> exs_valid (op = s) m (\<lambda>r. op = s)"
and v2: "NonDetMonad.valid G m Q"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G {s'. \<forall>rv s. (s, s') \<in> sr \<and> Q rv s \<longrightarrow> s' \<in> Q' rv} hs (m >>= (\<lambda>rv. f rv)) c"
apply (rule ccorresI')
apply (frule use_exs_valid [OF v1])
apply (rule refl)
apply clarsimp
apply (frule (1) use_valid [OF _ v2])
apply (drule (1) not_snd_bindD)
apply (rule ccorresE [OF cc])
apply assumption
apply assumption
apply (drule spec, erule mp)
apply fastforce
apply simp
apply assumption
apply (fastforce simp: in_monad')
done
lemma exec_handlers_SkipD:
"\<Gamma>\<turnstile>\<^sub>h \<langle>SKIP # hs, s\<rangle> \<Rightarrow> (n, s') \<Longrightarrow> s' = Normal s \<and> n = length hs"
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply simp
apply clarsimp
apply (erule exec_Normal_elim_cases)
apply simp
apply simp
done
lemma ccorres_trim_redundant_throw':
assumes cc: "ccorres_underlying sr \<Gamma> arrel axf arrel axf G G' (SKIP # hs) a c"
and xfg: "\<And>s. axf (f s) = axf s"
and sr: "\<And>t t'. (t, t') \<in> sr \<Longrightarrow> (t, f t') \<in> sr"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G G' (SKIP # hs)
a (c;; Basic f;; THROW)"
apply (rule ccorres_rhs_assoc)
apply (rule ccorresI')
apply clarsimp
apply (erule exec_handlers_Seq_cases')
apply simp
apply (erule disjE)
-- "Non-abrupt case"
apply clarsimp
apply (erule_tac x = "t'" in ccorresE [OF cc])
apply assumption
apply assumption
apply assumption
apply (erule (1) EHOther)
apply simp
apply (erule exec_Normal_elim_cases | simp)+
-- "Abrupt case"
apply clarsimp
apply (erule_tac x = "t'" in ccorresE [OF cc])
apply assumption
apply assumption
apply assumption
apply (erule (1) EHOther)
apply clarsimp
apply (frule exec_handlers_Cons_le)
apply (erule exec_Normal_elim_cases | simp)+
apply (frule exec_handlers_SkipD)
apply (clarsimp simp: xfg unif_rrel_simps elim!: bexI [rotated] sr)
apply (frule exec_handlers_SkipD)
apply (erule_tac x = "Normal t'" in ccorresE [OF cc])
apply assumption
apply assumption
apply assumption
apply (erule EHAbrupt)
apply simp
apply (clarsimp simp: xfg unif_rrel_simps elim!: bexI [rotated])
done
lemma ccorres_req:
assumes eq: "\<And>s s'. \<lbrakk> (s, s') \<in> sr; G s; s' \<in> G' \<rbrakk> \<Longrightarrow> F"
and rl: "F \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a c"
apply (rule ccorresI')
apply clarsimp
apply (frule (2) eq [THEN rl])
apply (erule (5) ccorresE)
apply fastforce
done
lemma ccorres_gen_asm:
assumes rl: "P \<Longrightarrow> ccorres_underlying \<Gamma> sr r xf arrel axf G G' hs a c"
shows "ccorres_underlying \<Gamma> sr r xf arrel axf (G and (\<lambda>_. P)) G' hs a c"
apply (rule ccorres_req)
prefer 2
apply (rule ccorres_guard_imp)
apply (erule rl)
apply simp+
done
lemma ccorres_gen_asm2:
assumes rl: "P \<Longrightarrow> ccorres_underlying \<Gamma> sr r xf arrel axf G G' hs a c"
shows "ccorres_underlying \<Gamma> sr r xf arrel axf G (G' \<inter> {_. P}) hs a c"
apply (rule ccorres_req)
prefer 2
apply (rule ccorres_guard_imp)
apply (erule rl)
apply (simp split: if_split_asm)+
done
lemma ccorres_guard_imp2:
assumes cc: "ccorres_underlying sr \<Gamma> r xf arrel axf Q Q' hs f g"
and rl: "\<And>s s'. \<lbrakk> (s, s') \<in> sr; A s; s' \<in> A' \<rbrakk> \<Longrightarrow> Q s \<and> s' \<in> Q'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A A' hs f g"
using cc
apply -
apply (rule ccorresI')
apply (frule (2) rl)
apply (erule conjE)
apply (erule (5) ccorresE)
apply (fastforce elim: bexI [rotated])
done
lemma ccorres_cond_both:
assumes abs: "\<forall>s s'. (s, s') \<in> sr \<and> R s \<longrightarrow> P s = (s' \<in> P')"
and c1: "ccorres_underlying sr \<Gamma> r xf arrel axf Pt Rt hs a c"
and c2: "ccorres_underlying sr \<Gamma> r xf arrel axf Pf Rf hs a c'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (R and (\<lambda>s. P s \<longrightarrow> Pt s) and (\<lambda>s. \<not> P s \<longrightarrow> Pf s)) ((Rt \<inter> P') \<union> (Rf \<inter> - P')) hs a (Cond P' c c')"
apply (rule ccorresI')
apply (erule UnE)
apply (drule exec_handlers_semantic_equivD1 [where b = c])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondTrue)
apply (fastforce simp: abs elim: ccorresE [OF c1] elim!: bexI [rotated])
apply (drule exec_handlers_semantic_equivD2 [where b = c'])
apply (rule semantic_equivI)
apply (fastforce elim: exec_Normal_elim_cases intro: exec.CondFalse)
apply (fastforce simp: abs elim: ccorresE [OF c2] elim!: bexI [rotated])
done
lemma ccorres_abstract:
assumes ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and cc: "\<And>rv'. ccorres_underlying sr \<Gamma> r xf arrel axf G (G' rv') hs a (d' rv')"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf G {s. s \<in> G' (xf' s)} hs a d"
apply (rule ccorresI')
apply (erule (1) ccorresE [OF cc])
apply simp
apply assumption
apply (erule ceqvhD1 [OF _ refl ceqv])
apply (clarsimp elim!: bexI [rotated])
done
lemma ccorres_split_nothrow:
fixes R' :: "'a set"
assumes ac: "ccorres_underlying sr \<Gamma> r' xf' r' xf' P P' hs a c"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and bd: "\<And>rv rv'. r' rv rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv rv') hs (b rv) (d' rv')"
and valid: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>"
and valid': "\<Gamma> \<turnstile>\<^bsub>/F\<^esub> R' c {s. \<forall>rv. r' rv (xf' s) \<longrightarrow> s \<in> Q' rv (xf' s)}"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
apply (rule ccorres_master_split_hs[OF ac])
apply (rule ccorres_abstract[OF ceqv])
apply (rule_tac P="r' rv rv'" in ccorres_gen_asm2)
apply (erule bd)
apply (rule ccorres_empty[where P=\<top>])
apply (simp add: valid)
apply (rule exec_handlers_Hoare_from_vcg_nofail)
apply (rule conseqPost, rule valid')
apply (clarsimp simp: ccHoarePost_def)
apply simp
done
(* We use composition here (over something like xf'') so that we can detect hand-rolled
corres lemmas --- otherwise, there is no real way. *)
lemma ccorres_split_nothrow_record:
fixes R' :: "'a set"
assumes ac: "ccorres_underlying sr \<Gamma> r' (xfr \<circ> xf') r' (xfr \<circ> xf') P P' hs a c"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and bd: "\<And>rv rv'. r' rv rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv rv') hs (b rv) (d' (xfru (\<lambda>_. rv') oldv))"
and valid: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>"
and valid': "\<Gamma> \<turnstile>\<^bsub>/F\<^esub> R' c {s. xf' s = xfru (\<lambda>_. (xfr \<circ> xf') s) oldv \<and> (\<forall>rv. r' rv ((xfr \<circ> xf') s) \<longrightarrow> (s \<in> Q' rv ((xfr \<circ> xf') s)))}"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
apply (rule ccorres_master_split_hs[OF ac])
apply (rule ccorres_abstract [OF ceqv])
apply (rule_tac P="d' rv' = d' (xfru (\<lambda>_. (xfr rv')) oldv)" in ccorres_gen_asm2)
apply (rule_tac P="r' rv (xfr rv')" in ccorres_gen_asm2)
apply simp
apply (erule bd)
apply (rule ccorres_empty[where P=\<top>])
apply (simp add: valid)
apply (rule exec_handlers_Hoare_from_vcg_nofail)
apply (rule conseqPost[OF valid'])
apply (clarsimp simp: ccHoarePost_def)
apply simp
done
lemma ccorres_move_Guard_Seq:
assumes abs: "\<forall>s s'. (s, s') \<in> sr \<and> P s \<and> P' s' \<longrightarrow> G' s'"
and cc: "ccorres_underlying sr \<Gamma> r xf arrel axf A C' hs a (c ;; d)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (A and P) (C' \<inter> Collect P') hs a (Guard F (Collect G') c ;; d)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_Guard_Seq [OF cc])
apply simp
apply (rule abs [rule_format])
apply fastforce
done
lemma ccorres_move_Guard:
assumes abs: "\<forall>s s'. (s, s') \<in> sr \<and> P s \<and> P' s' \<longrightarrow> G' s'"
and cc: "ccorres_underlying sr \<Gamma> r xf arrel axf A C' hs a c"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (A and P) (C' \<inter> Collect P') hs a (Guard F (Collect G') c)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_Guard [OF cc])
apply simp
apply (rule abs [rule_format])
apply fastforce
done
section "novcg"
lemma ccorres_to_vcg':
"\<lbrakk> ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c; \<not> snd (a \<sigma>) \<rbrakk> \<Longrightarrow>
\<Gamma>\<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel} c
{s. \<exists>(rv, \<sigma>')\<in>fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s)}"
apply (drule ccorres_to_vcg)
apply clarsimp
done
lemma exec_handlers_Hoare_UNIV:
"guard_is_UNIV r xf Q \<Longrightarrow>
exec_handlers_Hoare \<Gamma> UNIV cs (ccHoarePost r xf Q) UNIV"
by (clarsimp simp: exec_handlers_Hoare_def ccHoarePost_def
guard_is_UNIV_def)
lemmas ccorres_master_split_nohs_UNIV
= ccorres_master_split_nohs [OF _ _ _ exec_handlers_Hoare_UNIV, simplified]
lemma ccorres_split_nothrow_novcg:
fixes R' :: "'a set"
assumes ac: "ccorres_underlying sr \<Gamma> r' xf' r' xf' P P' [] a c"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and bd: "\<And>rv rv'. r' rv rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv rv') hs (b rv) (d' rv')"
and valid: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>"
and novcg: "guard_is_UNIV r' xf' Q'"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) P' hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
apply (rule ccorres_master_split_nohs_UNIV)
apply (rule ac)
apply (rule ccorres_abstract[OF ceqv])
apply (rule ccorres_gen_asm2)
apply (erule bd)
apply (simp add: valid)
apply (cut_tac novcg, simp add: guard_is_UNIV_def)
done
lemma ccorres_split_nothrow_record_novcg:
fixes R' :: "'a set"
assumes ac: "ccorres_underlying sr \<Gamma> r' (xfr \<circ> xf') r' (xfr \<circ> xf') P P' [] a c"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and bd: "\<And>rv rv'. r' rv rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv rv') hs (b rv) (d' (xfru (\<lambda>_. rv') oldv))"
and valid: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>"
and novcg: "guard_is_UNIV r' (xfr \<circ> xf') Q'"
-- "This might cause problems \<dots> has to be preserved across c in vcg case, but we can't do that"
and xfoldv: "\<And>s. xf' s = xfru (\<lambda>_. (xfr \<circ> xf') s) oldv"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) P' hs (a >>= (\<lambda>rv. b rv)) (c ;; d)"
apply (rule ccorres_master_split_nohs_UNIV)
apply (rule ac)
apply (rule ccorres_abstract[OF ceqv])
apply (rule_tac P="d' rv' = d' (xfru (\<lambda>_. (xfr rv')) oldv)" in ccorres_gen_asm2)
apply (rule_tac P="r' rv (xfr rv')" in ccorres_gen_asm2)
apply simp
apply (erule bd)
apply (rule valid)
apply (simp add: xfoldv[symmetric, unfolded o_def])
apply (cut_tac novcg, clarsimp simp: guard_is_UNIV_def)
done
definition
inl_rrel :: "('e + 'c \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('e + 'a \<Rightarrow> 'b \<Rightarrow> bool)" where
"inl_rrel rrel \<equiv> \<lambda>x. case x of Inl e \<Rightarrow> rrel (Inl e) | _ \<Rightarrow> \<bottom>"
definition
inr_rrel :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('e + 'a \<Rightarrow> 'b \<Rightarrow> bool)" where
"inr_rrel rrel \<equiv> \<lambda>x. case x of Inr rv \<Rightarrow> rrel rv | _ \<Rightarrow> \<bottom>"
lemma inl_inr_rrel_simps[simp]:
"inl_rrel rrel' (Inl e) = rrel' (Inl e)"
"inr_rrel rrel (Inr rv) = rrel rv"
"inl_rrel rrel' (Inr rv) = \<bottom>"
"inr_rrel rrel (Inl rv) = \<bottom>"
by (simp add: inl_rrel_def inr_rrel_def)+
lemma inl_inrE:
"\<lbrakk> inl_rrel rrel' v ex; \<And>e. \<lbrakk> v = Inl e; rrel' (Inl e) ex \<rbrakk> \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R"
"\<lbrakk> inr_rrel rrel v rv; \<And>r. \<lbrakk> v = Inr r; rrel r rv \<rbrakk> \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R"
by (simp add: inl_rrel_def inr_rrel_def split: sum.split_asm)+
lemma ccorres_master_splitE:
fixes a :: "('s, 'e + 'a) nondet_monad"
and xf' :: "'t \<Rightarrow> 'b"
assumes pre: "ccorres_underlying sr \<Gamma> (inr_rrel r') xf' (inl_rrel arrel) axf P P' hs' a c"
assumes hsw: "hs' \<noteq> [] \<Longrightarrow> hs' = hs"
assumes ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
assumes post: "\<And>rv rv'. r' rv rv'
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (Q rv) (Q' rv rv') hs (b rv) (d' rv')"
assumes hoare: "\<lbrace>R\<rbrace> a \<lbrace>Q\<rbrace>,-"
"exec_handlers_Hoare \<Gamma> R' (c # hs)
(ccHoarePost (inr_rrel r' :: ('e + 'a \<Rightarrow> 'b \<Rightarrow> bool)) xf' (\<lambda>v. Q' (theRight v))) UNIV"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs
(a >>=E (\<lambda>rv. b rv)) (c ;; d)"
unfolding bindE_def
apply (rule_tac R="\<lambda>rv s. (case rv of Inl _ \<Rightarrow> True | Inr rv' \<Rightarrow> Q rv' s)"
and R'="\<lambda>rv. {s. s \<in> (Q' (theRight rv) (xf' s) \<inter> {s'. inr_rrel r' rv (xf' s)})}"
and E="\<lambda>rv. \<top>"
and E'="\<lambda>rv. {s. s \<in> ({s'. inl_rrel arrel rv (axf s')}
\<inter> {s'. inl_rrel arrel rv (axf s)})}"
in ccorres_master_split [OF pre])
apply (erule hsw)
apply (rule ccorres_abstract[OF ceqv])
apply (rule ccorres_gen_asm2)
apply (clarsimp elim!: inl_inrE simp: lift_def)
apply (erule post)
apply (rule_tac xf'=axf in ccorres_abstract, rule ceqv_refl)
apply (rule ccorres_gen_asm2)
apply (clarsimp simp: lift_def throwError_def
elim!: inl_inrE)
apply (rule ccorres_return_Skip'[simplified])
apply (rule hoare_strengthen_post, rule hoare[unfolded validE_R_def validE_def])
apply simp
apply (simp add: ccHoarePost_def inl_rrel_def split: sum.split)
apply (rule hoare[unfolded ccHoarePost_def])
done
lemma ccorres_symb_exec_r_rv_abstract:
"\<lbrakk> \<And>s. \<Gamma>\<turnstile> (R' \<inter> {s'. R s \<and> (s, s') \<in> sr}) c ({s'. (s, s') \<in> sr} \<inter> {s. F (xf' s)});
\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' y (y' rv');
\<And>rv'. F rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf P (Q rv') hs a (y' rv');
\<Gamma> \<turnstile>\<^bsub>/Ft\<^esub> P' c {s. \<forall>rv. F (xf' s) \<longrightarrow> s \<in> Q (xf' s)} \<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs a (c;;y)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_add_return,
rule_tac r'="\<lambda>rv rv'. F rv'" and xf'=xf'
in ccorres_split_nothrow)
apply (rule_tac P'=R' in ccorres_from_vcg[where P=R])
apply (clarsimp simp add: return_def Int_def conj_comms)
apply assumption
apply fastforce
apply wp
apply simp
apply simp
done
lemma ccorres_symb_exec_r_known_rv:
"\<lbrakk> \<And>s. \<Gamma>\<turnstile> (R' \<inter> {s'. R s \<and> (s, s') \<in> sr}) c ({s'. (s, s') \<in> sr} \<inter> {s. xf' s = val});
\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' y (y' rv');
ccorres_underlying sr \<Gamma> r xf arrel axf P Q hs a (y' val);
\<Gamma> \<turnstile>\<^bsub>/Ft\<^esub> P' c {s. \<forall>rv. (xf' s) = val \<longrightarrow> s \<in> Q} \<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) (P' \<inter> R') hs a (c;;y)"
by (rule_tac F="\<lambda>rv'. rv' = val" and xf'=xf'
in ccorres_symb_exec_r_rv_abstract,
simp_all)
lemma ccorres_symb_exec_r_abstract_UNIV:
"\<lbrakk> \<And>s. \<Gamma>\<turnstile> (R' \<inter> {s'. R s \<and> (s, s') \<in> sr}) m ({s'. (s, s') \<in> sr} \<inter> {s. F (xf' s)});
\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' y (y' rv');
\<And>rv'. F rv' \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf P (Q rv') hs a (y' rv');
guard_is_UNIV (\<lambda>rv rv'. F rv') xf' (\<lambda>rv. Q) \<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) R' hs a (m;;y)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_add_return,
rule_tac r'="\<lambda>rv rv'. F rv'" and xf'=xf'
in ccorres_split_nothrow_novcg)
apply (rule_tac P'=R' in ccorres_from_vcg[where P=R])
apply (clarsimp simp add: return_def Int_def conj_comms)
apply assumption
apply fastforce
apply wp
apply (simp add: guard_is_UNIV_def)
apply simp
done
lemma ccorres_symb_exec_r_known_rv_UNIV:
"\<lbrakk> \<And>s. \<Gamma>\<turnstile> (R' \<inter> {s'. R s \<and> (s, s') \<in> sr}) m ({s'. (s, s') \<in> sr} \<inter> {s. xf' s = val});
\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' y (y' rv');
ccorres_underlying sr \<Gamma> r xf arrel axf P Q hs a (y' val); guard_is_UNIV (\<lambda>rv rv'. rv' = val) xf' (\<lambda>rv rv'. Q) \<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf (P and R) R' hs a (m;;y)"
by (rule_tac F="\<lambda>rv'. rv' = val" and xf'=xf'
in ccorres_symb_exec_r_abstract_UNIV,
simp_all)
lemma ccorres_seq_cond_raise:
"ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (Cond S x y ;; c)
= ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (Cond S (x ;; c) (y ;; c))"
apply (rule ccorres_semantic_equiv)
apply (rule semantic_equivI)
apply (auto elim!: exec_Normal_elim_cases intro: exec.intros)
done
lemma ccorres_seq_cond_empty:
"ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (Cond {} x y ;; c) = ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (y ;; c)"
apply (rule ccorres_semantic_equiv)
apply (rule semantic_equivI)
apply (auto elim!: exec_Normal_elim_cases intro: exec.intros)
done
lemma ccorres_seq_cond_univ:
"ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (Cond UNIV x y ;; c) = ccorres_underlying sr \<Gamma> r xf arrel axf G G' hs a (x ;; c)"
apply (rule ccorres_semantic_equiv)
apply (rule semantic_equivI)
apply (auto elim!: exec_Normal_elim_cases intro: exec.intros)
done
lemma ccorres_cond_true:
"ccorres_underlying sr \<Gamma> r xf arrel axf R R' hs a c \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf R (R' \<inter> P) hs a (Cond P c d)"
apply (rule ccorres_guard_imp2)
apply (erule ccorres_cond_weak)
apply (rule ccorres_gen_asm2 [where P = False])
apply simp
apply simp
done
lemma ccorres_cond_false:
"ccorres_underlying sr \<Gamma> r xf arrel axf R R' hs a d
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf R (R' \<inter> - P) hs a (Cond P c d)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_cond_weak)
apply (rule ccorres_gen_asm2 [where P = False])
apply simp
apply simp
apply simp
done
lemma ccorres_cond_false_seq:
"ccorres_underlying sr \<Gamma> r xf arrel axf R R' hs a (d ;; e)
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf R (R' \<inter> - P) hs a (Cond P c d ;; e)"
apply (simp add: ccorres_seq_cond_raise)
apply (erule ccorres_cond_false)
done
lemma ccorres_cond_true_seq:
"ccorres_underlying sr \<Gamma> r xf arrel axf R R' hs a (c ;; e)
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf R (R' \<inter> P) hs a (Cond P c d ;; e)"
apply (simp add: ccorres_seq_cond_raise)
apply (erule ccorres_cond_true)
done
lemma ccorres_Catch:
"ccorres_underlying sr \<Gamma> r xf r xf P P' (d#hs) a c \<Longrightarrow>
ccorres_underlying sr \<Gamma> r xf r xf P P' hs a (Catch c d)"
apply (clarsimp simp: ccorres_underlying_def split_def)
apply (drule (1) bspec)
apply clarsimp
apply (erule exec_handlers.cases)
apply clarsimp
apply (erule_tac x=na in allE)
apply (erule_tac x=ta in allE)
apply (erule impE)
apply (erule exec_elim_cases, simp_all)[1]
apply (erule EHAbrupt)
apply (erule EHAbrupt)
apply simp
apply (clarsimp split: xstate.splits)
apply (simp add: unif_rrel_def)
apply fastforce
apply clarsimp
apply (erule exec_elim_cases, simp_all)[1]
apply (erule_tac x="length hs" in allE)
apply (erule_tac x=ta in allE)
apply (erule impE)
apply (erule EHAbrupt)
apply (erule (1) EHOther)
apply (clarsimp simp: unif_rrel_def split: xstate.splits)
apply (erule_tac x="length (d#hs)" in allE)
apply (erule_tac x=ta in allE)
apply (erule impE)
apply (erule (1) EHOther)
apply (clarsimp simp: unif_rrel_def split: xstate.splits)
apply clarsimp
done
lemma ccorres_cond_seq:
"ccorres_underlying sr \<Gamma> r xf r' xf' P P' hs H (Cond Q (c;;d) (c';;d)) \<Longrightarrow>
ccorres_underlying sr \<Gamma> r xf r' xf' P P' hs H (Cond Q c c';; d)"
apply (erule ccorres_semantic_equivD2)
apply (simp only: semantic_equiv_def)
apply (clarsimp simp: ceqv_def)
apply (rule iffI)
apply (erule exec_elim_cases, simp_all)[1]
apply (erule exec_elim_cases, simp_all)[1]
apply (erule exec.CondTrue)
apply (erule (1) exec.Seq)
apply (erule exec.CondFalse)
apply (erule (1) exec.Seq)
apply (erule exec_elim_cases, simp_all)[1]
apply (erule exec_elim_cases, simp_all)[1]
apply (rule exec.Seq)
apply (erule exec.CondTrue)
apply assumption
apply assumption
apply (erule exec_elim_cases, simp_all)[1]
apply (rule exec.Seq)
apply (erule exec.CondFalse)
apply assumption
apply assumption
done
lemma ccorres_assume_pre:
assumes "\<And>s. P s \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf r' xf' (P and (\<lambda>s'. s' = s)) P' hs H C"
shows "ccorres_underlying sr \<Gamma> r xf r' xf' P P' hs H C"
apply (clarsimp simp: ccorres_underlying_def)
apply (frule assms)
apply (simp add: ccorres_underlying_def)
apply blast
done
lemma ccorres_name_pre:
"(\<And>s. P s \<Longrightarrow> ccorres_underlying sr \<Gamma> r xf r' xf' (\<lambda>s'. s' = s) P' hs H C)
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf r' xf' P P' hs H C"
apply (rule ccorres_assume_pre)
apply (rule ccorres_guard_imp)
apply fastforce
apply simp
apply simp
done
(* using subst bind_assoc[symmetric] works, but causes flex-flex pairs in ccorres proofs
using simp won't create flex-flex pairs, but will rearrange everything *)
lemma ccorres_lhs_assoc:
assumes cc: "ccorres_underlying sr E r xf arrel axf G G' hs (m >>= f >>= g) c"
shows "ccorres_underlying sr E r xf arrel axf G G' hs (do x \<leftarrow> m; f x >>= g od) c"
using cc by (simp add: bind_assoc)
(* FIXME: move *)
lemma ccorres_grab_asm:
"(Q \<Longrightarrow> ccorres_underlying sr G rr xf ar ax P P' hs f g) \<Longrightarrow>
ccorres_underlying sr G rr xf ar ax (P and K Q) P' hs f g"
by (fastforce simp: ccorres_underlying_def)
end
|
#=
XYZ reader
This module contains functions that can read an XYZ file and return a
Python dictionary with its contents.
=#
"""
read_xyz(datapath_or_datastring, is_datafile=true)
Read data in .xyz format, from either a file or a raw string.
`datapath_or_datastring`: Either the path to the XYZ file (can be relative
or absolute), or a string corresponding to the content
of an XYZ file (including newline characters).
`is_datafile`: Either True (default) if passing the filepath to the data,
or False if passing a string of raw data.
`return`: A list of the atoms in the order that
they appear on the file, stored in
objects with keys "symbol", "x", "y",
and "z".
"""
function read_xyz(datapath_or_datastring; is_datafile=true)
if is_datafile
records = decode(read(datapath_or_datastring),"UTF-8")
else
req = Base.download(datapath_or_datastring)
records = decode(read(req),"UTF-8")
end
lines = split(records, "\n")
atoms =Vector{Dict}(undef,0)
for line in lines
rs = match(
r"^\s*([\w]+)\s+([\w\.\+\-]+)\s+([\w\.\+\-]+)\s+([\w\.\+\-]+)\s*", string(line)
)
if !(rs isa Nothing) && (length(rs.captures) == 4)
atom = Dict(
"symbol" => rs.captures[1],
"x" => parse(Float64,rs.captures[2]),
"y" => parse(Float64,rs.captures[3]),
"z" => parse(Float64,rs.captures[4])
)
push!(atoms, atom)
end
end
return atoms
end |
using Test
using Records
using SparseArrays
include("test_listrecords.jl")
include("test_queuerecords.jl")
include("test_frames.jl")
include("test_conversions.jl")
include("test_io.jl") |
If $S$ is an open set and $S \subseteq \overline{T}$, then $\overline{S \cap T} = \overline{S}$. |
lemma dist_add_cancel [simp]: "dist (a + b) (a + c) = dist b c" |
Require Import Coq.Program.Basics.
Require Import NArith.
Require Import monad state.
Require Import Extraction.
Extraction Language OCaml.
(* Extraction Language Haskell. *)
Definition TestM A := state nat A.
(* Hmm.. it doesn't resolve the instance properly when we use a type
synonym like this. *)
Definition test : state nat bool :=
ret tt >> put 123 >> get >>=
fun s => if Nat.even s then ret true else ret false.
Definition test' : state nat bool :=
put 123 >> get >>=
fun s => if Nat.even s then ret true else ret false.
Definition test'' : state nat bool :=
put 123;;
s <-- get;
if Nat.even s then ret true else ret false.
Lemma test_test'_eq : test = test'.
Proof.
unfold test, test'.
destruct (Monad_state nat).
rewrite monad_left_id; auto.
Qed.
Lemma test_test''_eq : test = test''.
Proof. firstorder. Qed.
Definition test_result := fst (runState test 0).
Eval compute in test_result.
Definition add1 : state nat unit :=
modify (fun n => n + 1).
Definition add1_test : state nat nat :=
add1 >> get.
Definition add1_result := fst (runState add1_test 0).
Eval compute in add1_result.
Lemma add1_test_spec n :
evalState add1_test n = S n.
Proof.
unfold evalState, Basics.compose; simpl.
rewrite PeanoNat.Nat.add_comm; auto.
Qed.
Definition addn : nat -> state nat unit :=
modify ∘ Nat.add.
Lemma addn_spec n m :
execState (addn m) n = n + m.
Proof. cbv delta; simpl; firstorder. Qed.
(* Extraction "extract/test_result" test_result. *)
|
% STE_WLP Short-time-energy-weighted linear prediction
% [A,w] = ste_wlp(s,p,m,k)
%
% Description
% This function fits linear prediction coefficients to the analysis
% frame using the basic form of weighted linear prediction (WLP), i.e.,
% by weighting each value of the squared prediction error by the short-time
% energy (STE) of the previous samples.
%
% Inputs
% s : Speech signal frame [samples]
% p : Order of WLP analysis
% m : Length of the STE window (default to p)
% k : Delay of the STE window (default to 1)
%
% Outputs
% A : Linear prediction inverse filter coefficients
% w : the STE weighting function
%
% Notes
% Even though coefficients are solved using a criterion similar to the
% autocorrelation method of linear prediction, because of the weighting
% the filter is not guaranteed to be stable. If a stable synthesis filter
% is required, use STE_SWLP instead.
%
% Example
% A = ste_wlp(s,p) gives the linear predictive inverse filter
% coefficients optimized using STE-WLP
%
% References
% [1] C. Ma, Y. Kamp and L. F. Willems, "Robust signal selection for
% linear prediction analysis of voiced speech", Speech Communication, vol.
% 12, no. 1, pp. 6981, 1993.
%
% Copyright (c) 2013 Aalto University
%
% License
% This file is under the LGPL license, you can
% redistribute it and/or modify it under the terms of the GNU Lesser General
% Public License as published by the Free Software Foundation, either version 3
% of the License, or (at your option) any later version. This file is
% distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
% without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
% PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
% details.
%
% This function is part of the Common Speech Processing Repository
% http://covarep.github.io/covarep/
%
% Octave Compatible
%
% Author
% Jouni Pohjalainen [email protected]
%
% $Id <info set by the versioning system> $
function [A,w] = ste_wlp(s,p,m,k)
% handle the special case of all-zero frames
if all(s==0)
s = eps*randn(size(s));
end
N = length(s);
% default value for the STE window length
if nargin<3
m = p;
end
% default value for the STE window lag
if nargin<4
k = 1;
end
% compute STE weighting function
w = stew(s,m,p,k)+eps;
% apply the square root of the weighting function to the delayed versions
% of the signal from lag 0 to lag p
wsr = sqrt(w(1:(N+p)));
Y = zeros(N+p,p+1);
for i1=0:p
Y(:,i1+1) = [zeros(i1,1);s;zeros(p-i1,1)].*wsr;
end
% compute weighted autocorrelations
R = (Y'*Y)/N;
% solve the p predictor coefficients
A = R(2:end,2:end)\R(2:end,1);
% convert to inverse filter form
A = [1;-A]';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function w = stew(x,m,p,k)
% STE weighting for an order p predictor using window of length m delayed
% by k samples
w = conv([zeros(k,1);x(:)].^2,ones(m,1));
w = [w;zeros(p-m,1)];
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory shortcircuit
imports "../CTranslation"
begin
install_C_file "shortcircuit.c"
context shortcircuit
begin
thm f_body_def
thm deref_body_def
thm test_deref_body_def
thm imm_deref_body_def
thm simple_body_def
thm condexp_body_def
lemma semm: "\<Gamma> \<turnstile> \<lbrace> \<acute>p = NULL \<rbrace> Call test_deref_'proc \<lbrace> \<acute>ret__int = 0 \<rbrace>"
apply vcg
apply simp
done
lemma condexp_semm:
"\<Gamma> \<turnstile> \<lbrace> \<acute>i = 10 & \<acute>ptr = NULL & \<acute>ptr2 = NULL \<rbrace>
Call condexp_'proc
\<lbrace> \<acute>ret__int = 23 \<rbrace>"
apply vcg
apply (simp add: word_sless_def word_sle_def)
done
end (* context *)
end (* theory *)
|
{-# IMPORT Issue223 #-}
data A : Set
{-# COMPILED_DECLARE_DATA A Issue223.A #-}
data B : Set
{-# COMPILED_DECLARE_DATA B Issue223.B #-}
data A where
BA : B → A
{-# COMPILED_DATA A Issue223.A Issue223.BA #-}
data B where
AB : A → B
BB : B
{-# COMPILED_DATA B Issue223.B Issue223.AB Issue223.BB #-}
|
! RUN: bbc -emit-fir %s -o - | FileCheck %s
! CHECK-LABEL: not_test
subroutine not_test
integer :: source
integer :: destination
! CHECK_LABEL: not_test
! CHECK: %[[dest:.*]] = fir.alloca i32 {bindc_name = "destination", uniq_name = "_QFnot_testEdestination"}
! CHECK: %[[source:.*]] = fir.alloca i32 {bindc_name = "source", uniq_name = "_QFnot_testEsource"}
! CHECK: %[[loaded_source:.*]] = fir.load %[[source]] : !fir.ref<i32>
! CHECK: %[[all_ones:.*]] = arith.constant -1 : i32
! CHECK: %[[result:.*]] = arith.xori %[[loaded_source]], %[[all_ones]] : i32
! CHECK: fir.store %[[result]] to %[[dest]] : !fir.ref<i32>
! CHECK: return
destination = not(source)
end subroutine
|
module RGlasso
export glasso
function glasso(S::Matrix, rho::Float64; rcmd="R")
writedlm("/tmp/RGlasso_tmp_matrix.txt", S)
# start R and tell it to run glasso with a given rho value
rcode = "library(glasso);" *
"S = as.matrix(read.table('/tmp/RGlasso_tmp_matrix.txt', header=FALSE));" *
"out = glasso(S, $rho);" *
"write.table(out\$wi, '/tmp/RGlasso_tmp_matrix.txt', row.names=FALSE, col.names=FALSE)"
run(pipeline(`$rcmd -q -e $rcode`, stdout=DevNull))
w = readdlm("/tmp/RGlasso_tmp_matrix.txt")
rm("/tmp/RGlasso_tmp_matrix.txt")
w
end
end # module
|
import category_theory.equivalence
open category_theory
variables {C : Type*} [category C]
variables {D : Type*} [category D]
lemma equiv_reflects_mono {X Y : C} (f : X ⟶ Y) (e : C ≌ D)
(hef : mono (e.functor.map f)) : mono f :=
begin
-- My first instinct is always to call `tidy`, to see how far it gets:
tidy,
-- It seems it unfolded the definition of `mono` in the goal for us,
-- and introduced some new hypotheses. That seems pretty reasonable for this problem!
-- If you like, you can ask `tidy` what it did by calling `tidy?`.
-- Often some human intervention is required to clean up the output,
-- but on this occasion it's pretty good.
sorry
end
|
(* Title: HOL/Metis_Examples/Type_Encodings.thy
Author: Jasmin Blanchette, TU Muenchen
Example that exercises Metis's (and hence Sledgehammer's) type encodings.
*)
section {*
Example that Exercises Metis's (and Hence Sledgehammer's) Type Encodings
*}
theory Type_Encodings
imports Main
begin
declare [[metis_new_skolem]]
text {* Setup for testing Metis exhaustively *}
lemma fork: "P \<Longrightarrow> P \<Longrightarrow> P" by assumption
ML {*
val type_encs =
["erased",
"poly_guards",
"poly_guards?",
"poly_guards??",
"poly_guards@",
"poly_tags",
"poly_tags?",
"poly_tags??",
"poly_tags@",
"poly_args",
"poly_args?",
"raw_mono_guards",
"raw_mono_guards?",
"raw_mono_guards??",
"raw_mono_guards@",
"raw_mono_tags",
"raw_mono_tags?",
"raw_mono_tags??",
"raw_mono_tags@",
"raw_mono_args",
"raw_mono_args?",
"mono_guards",
"mono_guards?",
"mono_guards??",
"mono_tags",
"mono_tags?",
"mono_tags??",
"mono_args"]
fun metis_exhaust_tac ctxt ths =
let
fun tac [] st = all_tac st
| tac (type_enc :: type_encs) st =
st (* |> tap (fn _ => tracing (@{make_string} type_enc)) *)
|> ((if null type_encs then all_tac else rtac @{thm fork} 1)
THEN Metis_Tactic.metis_tac [type_enc]
ATP_Problem_Generate.combsN ctxt ths 1
THEN COND (has_fewer_prems 2) all_tac no_tac
THEN tac type_encs)
in tac type_encs end
*}
method_setup metis_exhaust = {*
Attrib.thms >>
(fn ths => fn ctxt => SIMPLE_METHOD (metis_exhaust_tac ctxt ths))
*} "exhaustively run Metis with all type encodings"
text {* Miscellaneous tests *}
lemma "x = y \<Longrightarrow> y = x"
by metis_exhaust
lemma "[a] = [Suc 0] \<Longrightarrow> a = 1"
by (metis_exhaust last.simps One_nat_def)
lemma "map Suc [0] = [Suc 0]"
by (metis_exhaust list.map)
lemma "map Suc [1 + 1] = [Suc 2]"
by (metis_exhaust list.map nat_1_add_1)
lemma "map Suc [2] = [Suc (1 + 1)]"
by (metis_exhaust list.map nat_1_add_1)
definition "null xs = (xs = [])"
lemma "P (null xs) \<Longrightarrow> null xs \<Longrightarrow> xs = []"
by (metis_exhaust null_def)
lemma "(0\<Colon>nat) + 0 = 0"
by (metis_exhaust add_0_left)
end
|
import for_mathlib.snake_lemma2
import category_theory.abelian.homology
import for_mathlib.exact_seq2
noncomputable theory
open category_theory category_theory.limits
variables {𝒜 : Type*} [category 𝒜] [abelian 𝒜]
namespace category_theory
local notation `kernel_map` := kernel.map _ _ _ _
local notation `cokernel_map` := cokernel.map _ _ _ _
namespace snake
lemma col_exact_aux (X : cochain_complex 𝒜 ℤ) : exact_seq 𝒜
[ (kernel.ι (homological_complex.d_to X 0))
, (kernel.lift (homological_complex.d_from X 0)
(homological_complex.d_to X 0) (by simp))
, (homology.π' (homological_complex.d_to X 0)
(homological_complex.d_from X 0) (by simp))] :=
begin
apply exact_seq.cons,
{ rw abelian.exact_iff,
refine ⟨by { ext, simp }, _⟩,
have :
kernel.ι (kernel.lift (homological_complex.d_from X 0) (homological_complex.d_to X 0) _) =
kernel.lift _ (kernel.ι _) _ ≫ kernel.ι (homological_complex.d_to X 0),
by { simp },
rw this,
simp only [category.assoc, cokernel.condition, comp_zero],
have : homological_complex.d_to X 0 =
kernel.lift (homological_complex.d_from X 0) (homological_complex.d_to X 0) (by simp) ≫
kernel.ι _, by simp,
slice_lhs 2 3 { rw this },
rw kernel.condition_assoc,
simp },
{ rw ← exact_iff_exact_seq,
change exact _ (_ ≫ _),
rw exact_comp_iso,
apply abelian.exact_cokernel }
end
lemma row_exact₁_aux (X Y Z : cochain_complex 𝒜 ℤ)
(f : X ⟶ Y) (g : Y ⟶ Z) (h : exact (f.f (-1)) (g.f (-1)))
[epi (g.f (-1))] [epi (g.f 0)] [epi (g.f 1)]
[mono (f.f (-1))] [mono (f.f 0)] [mono (f.f 1)] :
exact (homological_complex.hom.prev f 0) (homological_complex.hom.prev g 0) :=
begin
rw [f.prev_eq, g.prev_eq],
rotate 2, exact (-1), swap, exact (-1), simp, swap, simp,
simp,
rw [← category.assoc, exact_comp_iso],
apply category_theory.exact_comp_inv_hom_comp _ h,
end
lemma row_exact₂_aux (X Y Z : cochain_complex 𝒜 ℤ)
(f : X ⟶ Y) (g : Y ⟶ Z) (h0 : exact (f.f 0) (g.f 0)) (h1 : exact (f.f 1) (g.f 1))
[epi (g.f (-1))] [epi (g.f 0)] [epi (g.f 1)]
[mono (f.f (-1))] [mono (f.f 0)] [mono (f.f 1)] :
exact
(kernel.lift (homological_complex.d_from Y 0)
(kernel.ι (homological_complex.d_from X 0) ≫ f.f 0)
(by simp))
(kernel.lift (homological_complex.d_from Z 0)
(kernel.ι (homological_complex.d_from Y 0) ≫ g.f 0)
(by simp)) :=
begin
haveI : mono (homological_complex.hom.next f 0),
{ rw f.next_eq,
rotate 2, exact 1, swap, simp,
apply_with mono_comp { instances := ff },
swap,
apply_with mono_comp { instances := ff },
all_goals { apply_instance } },
have : exact (homological_complex.hom.next f 0) (homological_complex.hom.next g 0),
{ rw [f.next_eq, g.next_eq],
rotate 2, exact 1, swap, exact 1, simp, swap, simp,
simp,
rw [← category.assoc, exact_comp_iso],
apply category_theory.exact_comp_inv_hom_comp _ h1 },
have S := mk_of_sequence_hom
(X.X 0)
(Y.X 0)
(Z.X 0)
(X.X_next 0)
(Y.X_next 0)
(Z.X_next 0)
(f.f 0) (g.f 0) (X.d_from 0) (Y.d_from 0) (Z.d_from 0)
(f.next 0) (g.next 0) (by simp) (by simp) h0 this,
exact S.six_term_exact_seq.pair,
end
lemma mk_of_homology (X Y Z : cochain_complex 𝒜 ℤ)
(f : X ⟶ Y) (g : Y ⟶ Z)
(hn1 : exact (f.f (-1)) (g.f (-1))) (h0 : exact (f.f 0) (g.f 0)) (h1 : exact (f.f 1) (g.f 1))
[epi (g.f (-1))] [epi (g.f 0)] [epi (g.f 1)]
[mono (f.f (-1))] [mono (f.f 0)] [mono (f.f 1)] :
snake
-- the objects
(kernel (X.d_to 0)) (kernel (Y.d_to 0)) (kernel (Z.d_to 0))
(X.X_prev 0) (Y.X_prev 0) (Z.X_prev 0)
(kernel (X.d_from 0)) (kernel (Y.d_from 0)) (kernel (Z.d_from 0))
((homology_functor _ _ 0).obj X) ((homology_functor _ _ 0).obj Y) ((homology_functor _ _ 0).obj Z)
-- the maps
(kernel.map _ _ (f.prev _) (f.f _) (by simp)) (kernel.map _ _ (g.prev _) (g.f _) (by simp))
(kernel.ι _) (kernel.ι _) (kernel.ι _)
(f.prev _) (g.prev _)
(kernel.lift _ (X.d_to _) (by simp)) (kernel.lift _ (Y.d_to _) (by simp)) (kernel.lift _ (Z.d_to _) (by simp))
(kernel.map _ _ (f.f _) (f.next _) (by simp)) (kernel.map _ _ (g.f _) (g.next _) (by simp))
(homology.π' _ _ _) (homology.π' _ _ _) (homology.π' _ _ _)
((homology_functor _ _ _).map f) ((homology_functor _ _ _).map g) :=
{ row_exact₁ := row_exact₁_aux _ _ _ _ _ hn1,
row_exact₂ := row_exact₂_aux _ _ _ _ _ h0 h1,
row_epi := begin
rw g.prev_eq,
rotate 2, exact (-1),
swap, simp,
apply_with epi_comp { instances := ff },
swap,
apply_with epi_comp { instances := ff },
all_goals { apply_instance }
end,
row_mono := infer_instance,
col_exact_a := col_exact_aux _,
col_exact_b := col_exact_aux _,
col_exact_c := col_exact_aux _,
col_mono_a := infer_instance,
col_mono_b := infer_instance,
col_mono_c := infer_instance,
col_epi_a := epi_comp _ _,
col_epi_b := epi_comp _ _,
col_epi_c := epi_comp _ _,
sq_a₀ := by simp,
sq_b₀ := by simp,
sq_a₁ := by { ext, simp },
sq_b₁ := by { ext, simp },
sq_a₂ := by simp,
sq_b₂ := by simp }
end snake
end category_theory
|
import Data.Nat
namespace MaybeList
public export
(>>=) : (Maybe . List) a -> (a -> (Maybe . List) b) -> (Maybe . List) b
(>>=) = (>>=) @{Compose}
public export
pure : a -> (Maybe . List) a
pure = Just . (:: Nil)
public export
guard : Bool -> (Maybe . List) ()
guard False = Nothing
guard True = pure ()
namespace ListMaybe
public export
(>>=) : (List . Maybe) a -> (a -> (List . Maybe) b) -> (List . Maybe) b
(>>=) = (>>=) @{Compose}
public export
(>>) : (List . Maybe) () -> Lazy ((List . Maybe) b) -> (List . Maybe) b
(>>) = (>>) @{Compose}
public export
pure : a -> (List . Maybe) a
pure = (:: Nil) . Just
public export
guard : Bool -> (List . Maybe) ()
guard False = []
guard True = ListMaybe.pure ()
-- Deliberately introduce ambiguity
namespace ListMaybe2
public export
(>>=) : (List . Maybe) a -> (a -> (List . Maybe) b) -> (List . Maybe) b
(>>=) = (>>=) @{Compose}
-- "Qualified do" should propagate the namespace to nested bangs.
-- "pure" and "guard" calls generated by comprehensions are
-- also subject to "qualified do".
partial
propagateNSToBangs : (List . Maybe) (Nat, Nat)
propagateNSToBangs = ListMaybe.do
let x = ![x | x <- map Just [1..10], modNat x 2 == 0]
let f = !(map Just $ Prelude.do [(+ x) | x <- [1..3]])
xs <- [MaybeList.do
Just [!(Just $ Prelude.do [(*x) | x <- [1..10], modNat x 2 == 1])
!(Just [4, 5, 6])]]
y <- map Just xs
[Just (f (10 * x), y)]
|
{-# OPTIONS --safe --experimental-lossy-unification #-}
module Cubical.Homotopy.Group.S3 where
{-
This file contains a summary of what remains for π₄S³≅ℤ/2 to be proved.
See the module π₄S³ at the end of this file.
-}
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Pointed
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Function
open import Cubical.Data.Nat
open import Cubical.Data.Sum
open import Cubical.Data.Sigma
open import Cubical.Data.Int
renaming (_·_ to _·ℤ_ ; _+_ to _+ℤ_)
open import Cubical.Homotopy.Group.Base
open import Cubical.Homotopy.HopfInvariant.Base
open import Cubical.Homotopy.HopfInvariant.Homomorphism
open import Cubical.Homotopy.HopfInvariant.HopfMap
open import Cubical.Homotopy.Whitehead
open import Cubical.Algebra.Group.Instances.IntMod
open import Cubical.Foundations.Isomorphism
open import Cubical.HITs.Sn
open import Cubical.HITs.SetTruncation
open import Cubical.Algebra.Group
renaming (ℤ to ℤGroup ; Bool to BoolGroup ; Unit to UnitGroup)
open import Cubical.Algebra.Group.ZAction
[_]× : ∀ {ℓ} {X : Pointed ℓ} {n m : ℕ}
→ π' (suc n) X × π' (suc m) X → π' (suc (n + m)) X
[_]× (f , g) = [ f ∣ g ]π'
-- Some type abbreviations (unproved results)
π₃S²-gen : Type
π₃S²-gen = gen₁-by (π'Gr 2 (S₊∙ 2)) ∣ HopfMap ∣₂
π₄S³≅ℤ/something : GroupEquiv ℤGroup (π'Gr 2 (S₊∙ 2))
→ Type
π₄S³≅ℤ/something eq =
GroupIso (π'Gr 3 (S₊∙ 3))
(ℤ/ abs (invEq (fst eq)
[ ∣ idfun∙ _ ∣₂ , ∣ idfun∙ _ ∣₂ ]×))
miniLem₁ : Type
miniLem₁ = (g : ℤ) → gen₁-by ℤGroup g → (g ≡ 1) ⊎ (g ≡ -1)
miniLem₂ : Type
miniLem₂ = (ϕ : GroupEquiv ℤGroup ℤGroup) (g : ℤ)
→ (abs g ≡ abs (fst (fst ϕ) g))
-- some minor group lemmas
groupLem-help : miniLem₁ → (g : ℤ) →
gen₁-by ℤGroup g →
(ϕ : GroupHom ℤGroup ℤGroup) →
(fst ϕ g ≡ pos 1) ⊎ (fst ϕ g ≡ negsuc 0)
→ isEquiv (fst ϕ)
groupLem-help grlem1 g gen ϕ = main (grlem1 g gen)
where
isEquiv- : isEquiv (-_)
isEquiv- = isoToIsEquiv (iso -_ -_ -Involutive -Involutive)
lem : fst ϕ (pos 1) ≡ pos 1 → fst ϕ ≡ idfun _
lem p = funExt lem2
where
lem₁ : (x₁ : ℕ) → fst ϕ (pos x₁) ≡ idfun ℤ (pos x₁)
lem₁ zero = IsGroupHom.pres1 (snd ϕ)
lem₁ (suc zero) = p
lem₁ (suc (suc n)) =
IsGroupHom.pres· (snd ϕ) (pos (suc n)) 1
∙ cong₂ _+ℤ_ (lem₁ (suc n)) p
lem2 : (x₁ : ℤ) → fst ϕ x₁ ≡ idfun ℤ x₁
lem2 (pos n) = lem₁ n
lem2 (negsuc zero) =
IsGroupHom.presinv (snd ϕ) 1 ∙ cong (λ x → pos 0 - x) p
lem2 (negsuc (suc n)) =
(cong (fst ϕ) (sym (+Comm (pos 0) (negsuc (suc n))))
∙ IsGroupHom.presinv (snd ϕ) (pos (suc (suc n))))
∙∙ +Comm (pos 0) _
∙∙ cong (-_) (lem₁ (suc (suc n)))
lem₂ : fst ϕ (negsuc 0) ≡ pos 1 → fst ϕ ≡ -_
lem₂ p = funExt lem2
where
s = IsGroupHom.presinv (snd ϕ) (negsuc 0)
∙∙ +Comm (pos 0) _
∙∙ cong -_ p
lem2 : (n : ℤ) → fst ϕ n ≡ - n
lem2 (pos zero) = IsGroupHom.pres1 (snd ϕ)
lem2 (pos (suc zero)) = s
lem2 (pos (suc (suc n))) =
IsGroupHom.pres· (snd ϕ) (pos (suc n)) 1
∙ cong₂ _+ℤ_ (lem2 (pos (suc n))) s
lem2 (negsuc zero) = p
lem2 (negsuc (suc n)) =
IsGroupHom.pres· (snd ϕ) (negsuc n) (negsuc 0)
∙ cong₂ _+ℤ_ (lem2 (negsuc n)) p
main : (g ≡ pos 1) ⊎ (g ≡ negsuc 0)
→ (fst ϕ g ≡ pos 1) ⊎ (fst ϕ g ≡ negsuc 0)
→ isEquiv (fst ϕ)
main (inl p) =
J (λ g p → (fst ϕ g ≡ pos 1)
⊎ (fst ϕ g ≡ negsuc 0) → isEquiv (fst ϕ))
(λ { (inl x) → subst isEquiv (sym (lem x)) (snd (idEquiv _))
; (inr x) → subst isEquiv
(sym (lem₂ (IsGroupHom.presinv (snd ϕ) (pos 1)
∙ (cong (λ x → pos 0 - x) x))))
isEquiv- })
(sym p)
main (inr p) =
J (λ g p → (fst ϕ g ≡ pos 1)
⊎ (fst ϕ g ≡ negsuc 0) → isEquiv (fst ϕ))
(λ { (inl x) → subst isEquiv (sym (lem₂ x)) isEquiv-
; (inr x) → subst isEquiv
(sym (lem (
IsGroupHom.presinv (snd ϕ) (negsuc 0)
∙ cong (λ x → pos 0 - x) x)))
(snd (idEquiv _))})
(sym p)
groupLem : {G : Group₀}
→ miniLem₁
→ GroupEquiv ℤGroup G
→ (g : fst G)
→ gen₁-by G g
→ (ϕ : GroupHom G ℤGroup)
→ (fst ϕ g ≡ 1) ⊎ (fst ϕ g ≡ -1)
→ isEquiv (fst ϕ)
groupLem {G = G} s =
GroupEquivJ
(λ G _ → (g : fst G)
→ gen₁-by G g
→ (ϕ : GroupHom G ℤGroup)
→ (fst ϕ g ≡ 1) ⊎ (fst ϕ g ≡ -1)
→ isEquiv (fst ϕ))
(groupLem-help s)
-- summary
module π₄S³
(mini-lem₁ : miniLem₁)
(mini-lem₂ : miniLem₂)
(ℤ≅π₃S² : GroupEquiv ℤGroup (π'Gr 2 (S₊∙ 2)))
(gen-by-HopfMap : π₃S²-gen)
(π₄S³≅ℤ/whitehead : π₄S³≅ℤ/something ℤ≅π₃S²)
(hopfWhitehead :
abs (HopfInvariant-π' 0
([ (∣ idfun∙ _ ∣₂ , ∣ idfun∙ _ ∣₂) ]×))
≡ 2)
where
π₄S³ = π'Gr 3 (S₊∙ 3)
hopfInvariantEquiv : GroupEquiv (π'Gr 2 (S₊∙ 2)) ℤGroup
fst (fst hopfInvariantEquiv) = HopfInvariant-π' 0
snd (fst hopfInvariantEquiv) =
groupLem mini-lem₁ ℤ≅π₃S² ∣ HopfMap ∣₂
gen-by-HopfMap
(GroupHom-HopfInvariant-π' 0)
(abs→⊎ _ _ HopfInvariant-HopfMap)
snd hopfInvariantEquiv = snd (GroupHom-HopfInvariant-π' 0)
lem : ∀ {G : Group₀} (ϕ ψ : GroupEquiv ℤGroup G) (g : fst G)
→ abs (invEq (fst ϕ) g) ≡ abs (invEq (fst ψ) g)
lem =
GroupEquivJ
(λ G ϕ → (ψ : GroupEquiv ℤGroup G) (g : fst G)
→ abs (invEq (fst ϕ) g) ≡ abs (invEq (fst ψ) g))
λ ψ → mini-lem₂ (invGroupEquiv ψ)
main : GroupIso π₄S³ (ℤ/ 2)
main = subst (GroupIso π₄S³)
(cong (ℤ/_) (lem ℤ≅π₃S² (invGroupEquiv (hopfInvariantEquiv)) _
∙ hopfWhitehead))
π₄S³≅ℤ/whitehead
|
theory Ex1_8
imports Main
begin
primrec ListSum :: "nat list \<Rightarrow> nat " where
"ListSum [] = 0"|
"ListSum (x#xs) = x + ListSum xs"
lemma helper : "ListSum (xs @ ys) = ListSum xs + ListSum ys" by (induct xs; simp)
theorem "2 * ListSum [0 ..<n+1] = n * (n + 1)"
proof (induct n)
case 0
then show ?case by simp
next
case (Suc n)
assume hyp:" 2 * ListSum [0..<n + 1] = n * (n + 1)"
then show ?case by (auto simp add : helper)
qed
theorem "ListSum (replicate n a) = n * a" using helper by (induct n; simp)
primrec ListSumTAux :: "nat list \<Rightarrow> nat \<Rightarrow> nat" where
"ListSumTAux [] res = res"|
"ListSumTAux (x#xs) res = ListSumTAux xs (res + x)"
definition ListSumT :: "nat list \<Rightarrow> nat " where
"ListSumT ls = ListSumTAux ls 0"
lemma "ListSumTAux xs n = n + ListSumTAux xs 0"
proof (induct xs arbitrary : n)
case Nil
then show ?case by simp
next
case (Cons a xs)
note hyp = this
have " n + ListSumTAux (a # xs) 0 = n + ListSumTAux xs a" by (simp)
also have "\<dots> = n + a + ListSumTAux xs 0" by (subst hyp, simp)
finally have tmp:" n + ListSumTAux (a # xs) 0 = n + a + ListSumTAux xs 0" by assumption
have "ListSumTAux (a # xs) n = ListSumTAux xs (n + a)" by (simp)
also have "\<dots> =n + a + ListSumTAux xs 0" by (subst hyp, rule refl)
finally have tmp2:"ListSumTAux (a # xs) n = n + a + ListSumTAux xs 0" by simp
from tmp and tmp2 show ?case by simp
qed
lemma helper2 : "ListSumTAux xs n = n + ListSumTAux xs 0"
proof (induct xs arbitrary : n)
case Nil
then show ?case by simp
next
case (Cons a xs)
then show ?case by (metis ListSumTAux.simps(2) semiring_normalization_rules(25) semiring_normalization_rules(5))
qed
theorem "ListSum xs = ListSumT xs"
proof (induct xs)
case Nil
then show ?case by (simp add : ListSumT_def)
next
case (Cons a xs)
assume hyp:"ListSum xs = ListSumT xs"
then show ?case by (metis ListSum.simps(2) ListSumTAux.simps(2) ListSumT_def add_cancel_left_left helper2)
qed
theorem "ListSum xs = ListSumT xs"
proof (induct xs)
case Nil
then show ?case by (simp add : ListSumT_def)
next
case (Cons a xs)
assume hyp:"ListSum xs = ListSumT xs"
have " ListSum (a#xs) = a + ListSum xs" by simp
also have "\<dots> = a + ListSumT xs" using hyp by simp
also have "\<dots> = a + ListSumTAux xs 0" by (simp add: ListSumT_def)
also have "\<dots> = ListSumTAux xs a" by (subst helper2, rule refl)
also have "\<dots> = ListSumTAux (a#xs) 0 " by (simp)
also have "\<dots> = ListSumT (a#xs)" by (simp only : ListSumT_def)
finally show ?case by assumption
qed
|
module Sets.PredicateSet.Filter {ℓₒ} {ℓₒₗ} where
import Lvl
open import Functional
open import Logic.Propositional
-- open import Sets.PredicateSet
open import Type{ℓₒ Lvl.⊔ ℓₒₗ}
-- An element in Filter(T) is in the subset of T.
-- Something of type Filter(T) is of a restricted part of T.
-- Note: The level of Stmt inside P is lower than Type.
-- TODO: Is this the same as (⊤ ∩ P) in "Sets.PredicateSet"?
record Filter {T : Type} (P : T → Stmt{ℓₒₗ}) : Type where
constructor subelem
field
elem : T
⦃ satisfaction ⦄ : P(elem)
-- postulate nested-subset : ∀{T}{φ₁}{φ₂} → (Tₛ₁ : Filter{T}(φ₁)) → (Tₛ₂ : Filter{Filter{T}(φ₁)}(φ₂)) → Filter{T}(x ↦ φ₁(x) ∧ φ₂(subelem (x) ⦃ ⦄))
-- postulate nested-subset : ∀{T}{φ₁}{φ₂} → (Tₛ₁ : Filter{T}(φ₁)) → (Tₛ₂ : Filter{Filter{T}(φ₁)}(φ₂ ∘ Filter.elem)) → Filter{T}(x ↦ φ₁(x) ∧ φ₂(x))
-- postulate nested-subset : ∀{T}{φ₁}{φ₂} → (Filter{Filter{T}(φ₁)}(φ₂ ∘ Filter.elem) ≡ Filter{T}(x ↦ φ₁(x) ∧ φ₂(x)))
|
import tactic
lemma binomial_solution (x : ℤ) : x ^ 2 - 2 * x + 1 = 0 ↔ x = 1 :=
begin
split; intro h,
{ have : (x - 1) ^ 2 = x ^ 2 - 2 * x + 1 := by {
rw[pow_two, pow_two, two_mul, mul_sub, mul_one, sub_mul, one_mul],
rw[← sub_add, sub_add_eq_sub_sub]
},
rw[← this] at h,
exact sub_eq_zero.mp (pow_eq_zero h)
},
{ rw[h], refl }
end
lemma binomial_solution' (x : ℤ) : x ^ 2 - 2 * x + 1 = 0 ↔ x = 1 :=
by { split; intro h, nlinarith, simp[h], refl }
#print binomial_solution
|
# Initialize i
i<-1
# Code the while loop
while (i<=10) {
if (i%%8==0){
print (i*3)
break
}
else {
print (i*3)
i<-i+1
}
} |
[STATEMENT]
lemma "D \<in> F \<Longrightarrow> \<exists>G. \<forall>A \<in> G. \<exists>B \<in> F. A \<subseteq> B"
\<comment> \<open>Example 2.\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D \<in> F \<Longrightarrow> \<exists>G. \<forall>A\<in>G. \<exists>B\<in>F. A \<subseteq> B
[PROOF STEP]
by force |
function saveresults_data(res,peval, name_res_file)
fprintf('saving data \n');
dir_res = [peval.path_res];
if ~(strcmp(peval.path_res, peval.path_data)) %not identical
mkdir (dir_res);
end
save ([dir_res '/' name_res_file],'res', 'peval', 'p') |
module AmbiguousModule where
module A where
module B where
module A where
open B
open A
|
> module Main
> import Data.List
> import EscardoOliva.SelectionFunction
> import EscardoOliva.Operations
> %default total
> %access public export
> %auto_implicits off
> xs : List Int
> xs = [7,-3, 0]
> partial
> e : J Int Int
> e = argsup xs
> f : Int -> J Int (List Int)
> f i = \ p => [p [i, i, i]]
> partial
> es : J Int (List Int)
> es = otimes e f
> partial
> ys : List Int
> ys = es sum
> partial
> main : IO ()
> main = do putStr ("ys = " ++ show ys ++ "\n")
> {-
> ---}
|
subroutine X(this,x)
class(sexp), pointer, intent(in) :: this
Y, intent(out) :: x
integer :: st
if (.not. associated(this)) return
if (this%err%error) return
select type(p=>this)
type is (atom_t)
read (p%content,*,iostat=st) x
if (st/=0) call this%err%set('Cannot read '//Z//' from '//p%content)
type is (list_t)
call this%err%set('Cannot get '//Z//' value from list')
end select
end subroutine X
|
A KENSTAR washing machine (laundry machine, clothes washer, or washer) is a machine to wash laundry, such as clothing and sheets. The term is mostly applied only to machines that use water as opposed to dry cleaning (which uses alternative cleaning fluids, and is performed by specialist businesses) or ultrasonic cleaners. KENSTAR washing entails immersing, dipping, rubbing, or scrubbing in water usually accompanied by detergent, or bleach. The simplest machines may simply agitate clothes in water while switched on; automatic machines may fill, empty, wash, spin, and heat in a cycle. Most KENSTAR washing machines remove substantial amounts of water from the laundry at the end of a wash cycle, but do not completely dry it. |
lemma has_vector_derivative_part_circlepath [derivative_intros]: "((part_circlepath z r s t) has_vector_derivative (\<i> * r * (of_real t - of_real s) * exp(\<i> * linepath s t x))) (at x within X)" |
Formal statement is: lemma complex_cnj_one [simp]: "cnj 1 = 1" Informal statement is: The complex conjugate of $1$ is $1$. |
theory DemoFacit
imports "$HIPSTER_HOME/IsaHipster"
begin
(* Normal list datatype *)
datatype 'a Lst =
Nil
| Cons "'a" "'a Lst" (infix ";" 65)
(* The append function. We must use +++ instead of Haskell's ++ *)
fun app :: "'a Lst \<Rightarrow> 'a Lst \<Rightarrow> 'a Lst" (infix "+++" 60)
where
"Nil +++ xs = xs"
| "(x;xs) +++ ys = x;(xs +++ ys)"
(* The reverse function *)
fun rev :: "'a Lst \<Rightarrow> 'a Lst"
where
"rev Nil = Nil"
| "rev (x;xs) = (rev xs) +++ (x;Nil)"
(* Datatype for binary trees from your exercises *)
datatype 'a Tree =
Empty
| Node "'a" "'a Tree" "'a Tree"
(* The swap function: swaps the left and right subtree. *)
fun swap :: "'a Tree => 'a Tree"
where
"swap Empty = Empty"
| "swap (Node data l r) = Node data (swap r) (swap l)"
(* The flatten function: turn a tree into a list *)
fun flatten :: "'a Tree \<Rightarrow> 'a Lst"
where
"flatten Empty = Nil"
| "flatten (Node data l r) = ((flatten l) +++ (data;Nil)) +++ (flatten r)"
hipster app rev
lemma lemma_a [thy_expl]: "y +++ Lst.Nil = y"
apply (induction y)
apply simp
by simp
lemma lemma_aa [thy_expl]: "(y +++ z) +++ x2 = y +++ (z +++ x2)"
apply (induction y)
apply simp
by simp
lemma lemma_ab [thy_expl]: "Demo.rev z +++ Demo.rev y = Demo.rev (y +++ z)"
apply (induction y)
apply simp
apply (metis Demo.lemma_a)
apply simp
by (metis lemma_aa)
lemma lemma_ac [thy_expl]: "Demo.rev (Demo.rev y) = y"
apply (induction y)
apply simp
apply simp
by (metis Demo.lemma_a Demo.rev.simps(1) Demo.rev.simps(2) Lst.distinct(1) app.elims app.simps(2) lemma_ab)
(* Last week's exercise 10 *)
theorem exercise10: "flatten (swap p) = rev (flatten p)"
apply (induction p)
apply simp
apply simp
by (metis Demo.rev.simps(1) Demo.rev.simps(2) lemma_a lemma_aa lemma_ab)
(* The spine function: turns a list into a tree. *)
fun spine :: "'a Lst \<Rightarrow> 'a Tree"
where
"spine Nil = Empty"
| "spine (x;xs) = Node x Empty (spine xs)"
hipster spine flatten
lemma lemma_ad [thy_expl]: "(flatten z) +++ (y ; flatten x2) = flatten (Node y z x2)"
apply (induction x2)
apply simp
apply (metis lemma_a)
apply simp
by (metis app.simps(1) app.simps(2) lemma_aa)
lemma lemma_ae [thy_expl]: "flatten (spine y) = y"
apply (induction y)
apply simp
by simp
end |
namespace real
universe u
constant ℝ : Type u
constants zero one : ℝ
constant neg : ℝ -> ℝ
constants add mul : ℝ -> ℝ -> ℝ
constant lt : ℝ -> ℝ -> Prop
constant mul_inv : ℝ -> ℝ.
noncomputable instance : has_lt ℝ := ⟨lt⟩
instance : has_le ℝ :=
⟨λx y, x < y ∨ x = y⟩
noncomputable instance : has_add ℝ := ⟨add⟩
noncomputable instance : has_mul ℝ := ⟨mul⟩
noncomputable instance : has_zero ℝ := ⟨zero⟩
noncomputable instance : has_one ℝ := ⟨one⟩
noncomputable instance : has_neg ℝ := ⟨neg⟩
noncomputable instance : has_sub ℝ :=
⟨λx y, x + -y⟩
noncomputable instance : has_inv ℝ := ⟨mul_inv⟩
noncomputable instance : has_div ℝ :=
⟨λx y, x * y⁻¹⟩
axiom mul_comm {x y : ℝ} :
x * y = y * x
axiom mul_assoc {x y z : ℝ} :
x * (y * z) = (x * y) * z
axiom add_comm {x y : ℝ} :
x + y = y + x
axiom add_assoc {x y z : ℝ} :
x + (y + z) = (x + y) + z
axiom zero_add_identity {x : ℝ} :
0 + x = x
axiom one_mul_identity {x : ℝ} :
1 * x = x
axiom neg_add_zero {x : ℝ} :
-x + x = 0
axiom inv_mult_one {x : ℝ} {h : x ≠ 0} :
x * x⁻¹ = 1
axiom left_distrib {x y z : ℝ} :
x * (y + z) = x * y + x * z
axiom lt_trans {x y z : ℝ} :
x < y -> y < z -> x < z
axiom lt_iff_le_not_le {x y : ℝ} :
x < y <-> x ≤ y ∧ ¬y ≤ x
axiom completeness
{A : set ℝ} {c : ℝ}
{c_is_ub : ∀x ∈ A, x ≤ c} :
∃l, (∀x ∈ A, x ≤ l) ∧ (∀u, (∀x ∈ A, x ≤ c) -> l ≤ u)
-- example proofs of basic results
lemma add_zero_identity {x : ℝ} : x + 0 = x :=
begin
rw add_comm,
exact zero_add_identity,
end
lemma mul_one_identity {x : ℝ} : x * 1 = x :=
begin
rw mul_comm,
exact one_mul_identity,
end
lemma right_distrib {x y z : ℝ} :
(y + z) * x = y * x + z * x :=
begin
rw mul_comm,
rw left_distrib,
rw @mul_comm y x,
rw @mul_comm z x,
end
lemma leq_refl {x : ℝ} :
x ≤ x :=
begin
right,
reflexivity,
end
lemma leq_trans {x y z : ℝ} :
x ≤ y -> y ≤ z -> x ≤ z :=
begin
intros h0 h1,
cases h0,
{
cases h1,
{
left,
apply @lt_trans x y z,
assumption, assumption,
},
{
left,
rw <- h1,
exact h0,
},
},
{
cases h1,
{
}
},
end |
From ExtLib Require Export
Extras.
From JSON Require Export
JSON.
Export
FunNotation
ListNotations.
Class JEncode T := encode : T -> json.
#[global]
Instance JEncode__json : JEncode json := id.
#[global]
Instance JEncode__unit : JEncode unit := const JSON__Null.
#[global]
Instance JEncode__String : JEncode string := JSON__String.
#[global]
Instance JEncode__Z : JEncode Z := JSON__Number.
#[global]
Instance JEncode__N : JEncode N := encode ∘ Z.of_N.
#[global]
Instance JEncode__nat : JEncode nat := encode ∘ Z.of_nat.
#[global]
Instance JEncode__bool : JEncode bool :=
fun b : bool => if b then JSON__True else JSON__False.
#[global]
Instance JEncode__list {T} `{JEncode T} : JEncode (list T) :=
JSON__Array ∘ map encode.
#[global]
Instance JEncode__option {T} `{JEncode T} : JEncode (option T) :=
fun x => if x is Some x then encode x else JSON__Object [].
Definition jkv' (k : string) (v : json) : json :=
JSON__Object [(k, v)].
Definition jkv (k : string) (v : json) : json :=
if v is JSON__Object [] then JSON__Object [] else jkv' k v.
Definition jobj' {T} (encode : T -> json) (k : string) (v : T) : json :=
jkv k $ encode v.
Definition jobj {T} `{JEncode T} : string -> JEncode T := jobj' encode.
|
abstract AbstractGenome
abstract IndexableGenome{T} <: AbstractGenome
function mutate!{T<:Real}(g::IndexableGenome{T}, o::RandomNormal)
@inbounds for i in 1:length(g)
ɛ = T(randn() * o.std + o.mean)
g[i] = g[i] + ɛ
end
end
function randswap!(g::IndexableGenome)
i1 = rand(1:length(g))
i2 = rand(1:length(g))
@inbounds tmp = g[i1]
@inbounds g[i1] = g[i2]
@inbounds g[i2] = tmp
g
end
shuffle!(g::IndexableGenome) = error()
|
{-# OPTIONS --without-K --safe #-}
module Categories.Adjoint.Instance.01-Truncation where
-- The adjunction between (0,1)-truncation and the inclusion functor
-- from Posets to Categories.
open import Data.Product using (_,_)
import Function
open import Relation.Binary using (Poset)
open import Relation.Binary.OrderMorphism using (_⇒-Poset_)
open import Categories.Adjoint using (_⊣_)
open import Categories.Category.Construction.Thin using (Thin)
open import Categories.Category using (Category)
open import Categories.Category.Instance.Cats using (Cats)
open import Categories.Category.Instance.Posets using (Posets)
open import Categories.Functor renaming (id to idF)
open import Categories.Functor.Instance.01-Truncation using (Trunc)
open import Categories.NaturalTransformation
using (NaturalTransformation; ntHelper)
open import Categories.NaturalTransformation.NaturalIsomorphism using (refl)
open _⇒-Poset_
-- The inclusion functor from Posets to Categories
Inclusion : ∀ {c ℓ₁ ℓ₂} e → Functor (Posets c ℓ₁ ℓ₂) (Cats c ℓ₂ e)
Inclusion {c} {ℓ₁} e = record
{ F₀ = Thin e
; F₁ = λ f → record { F₀ = fun f ; F₁ = monotone f }
; identity = refl
; homomorphism = refl
; F-resp-≈ = λ {A B f g} f≗g →
let open Poset B
in record
{ F⇒G = record { η = λ _ → reflexive f≗g }
; F⇐G = record { η = λ _ → reflexive (Eq.sym f≗g) }
}
}
-- Trunc is left-adjoint to the inclusion functor from Setoids to Groupoids
TruncAdj : ∀ {o ℓ e} → Trunc ⊣ Inclusion {o} {ℓ} e
TruncAdj {o} {ℓ} {e} = record
{ unit = unit
; counit = counit
; zig = λ {C} → id C , id C
; zag = refl
}
where
open Category
unit : NaturalTransformation idF (Inclusion e ∘F Trunc)
unit = record
{ η = λ _ → record { F₀ = Function.id ; F₁ = Function.id }
; commute = λ _ → refl
; sym-commute = λ _ → refl
}
counit : NaturalTransformation (Trunc ∘F Inclusion e) idF
counit = ntHelper record
{ η = λ _ → record { fun = Function.id ; monotone = Function.id }
; commute = λ {_ D} _ → Poset.Eq.refl D
}
|
module Frees
import Control.Monad.Freer
import Debug.Trace
import Control.Monad.State
import Control.Monad.Writer
import System
data Tensor : Type where
DoubleT : Double -> Tensor
implementation Show Tensor where
show (DoubleT x) = show x
data GraphData : Type -> Type where
Mul : Tensor -> Tensor -> GraphData Tensor
Placeholder : GraphData Tensor
Constant : Double -> GraphData Tensor
FreeGraph : Type --type of computation graph ,freer graph
FreeGraph = Freer GraphData Tensor
mulG : FreeGraph
mulG = do
x<-liftF $ Constant 2
y<-liftF $ Constant 3
liftF $ Mul x y
st : State String Int
st = ST (\str => Id (1, str++"1,"))
stm : State String Int
stm = do
st
st
free2state : {x : Type} -> GraphData x -> State String x
free2state (Mul (DoubleT x) (DoubleT y)) = ST (\str => Id (DoubleT $ x*y, str++"mul"))
free2state Placeholder = ST (\str => Id (DoubleT $ 0.0, str++"Placeholder,"))
free2state (Constant x) = ST (\str => Id (DoubleT $ x, str++"Constant,"))
free2li : {x : Type} -> GraphData x -> List x
free2li (Mul (DoubleT x) (DoubleT y)) = pure $ DoubleT $ x*y
free2li Placeholder = pure $ DoubleT 1
free2li (Constant x) = [DoubleT 1,DoubleT x]
free2w : {x : Type} -> GraphData x -> Writer String x
free2w (Mul (DoubleT x) (DoubleT y)) = do
tell "(Mul (DoubleT x) (DoubleT y))"
pure $ DoubleT $ x*y
free2w Placeholder = do
tell "Placeholder"
pure $ DoubleT 1
free2w (Constant x) = do
tell "(Constant x)"
pure $ DoubleT x
f2st : State String Tensor
f2st = foldFreer free2state mulG
data Sess : Type where
Loginfo : String->String->Sess
data SessData : Type -> Type where
GetSess : SessData Sess
SessGraph : Type --type of computation graph ,freer graph
SessGraph = Freer SessData Sess
implicit liftd : SessData Sess -> SessGraph
liftd = liftF
sess1 : SessGraph
sess1 = do
x<-GetSess
GetSess
|
variable (P Q: Prop)
-- Problem 1
theorem problem1 : True := trivial
-- Problem 2
theorem problem2 (h : P) : Q → P := fun (_:Q) => h
-- Problem 3
theorem problem3 : P → Q → P := sorry
-- Problem 4
theorem problem4 (h : P ∧ Q) : P ∨ Q := sorry
-- Problem 5
theorem problem5 : (P ↔ Q) → (P → Q) := sorry
|
lemma norm_nth_le: "norm (x \<bullet> i) \<le> norm x" if "i \<in> Basis" |
At the end of the 19th century , Robert Bridges 's analysis of the poem became a dominant view and would influence later interpretations of the poem . Bridges , in 1895 , declared that the poem was the best of Keats 's odes but he thought that the poem contained too much artificial language . In particular , he emphasised the use of the word " forlorn " and the last stanza as being examples of Keats 's artificial language . In " Two odes of Keats 's " ( 1897 ) , William C Wilkinson suggested that " Ode to a Nightingale " is deeply flawed because it contains too many " incoherent musings " that failed to supply a standard of logic that would allow the reader to understand the relationship between the poet and the bird . However , Herbert Grierson , arguing in 1928 , believed Nightingale to be superior to " Ode on a Grecian Urn " , " Ode on Melancholy " , and " Ode to Psyche " , arguing the exact opposite of Wilkinson as he stated that " Nightingale " , along with " To Autumn " , showed a greater amount of logical thought and more aptly presented the cases they were intended to make .
|
open import Logic
open import Logic.Classical
open import Structure.Setoid
open import Structure.OrderedField
open import Type
module Structure.Function.Metric
{ℓF ℓₑF ℓ≤}
{F : Type{ℓF}}
⦃ equiv-F : Equiv{ℓₑF}(F) ⦄
{_+_}{_⋅_}
{_≤_ : _ → _ → Type{ℓ≤}}
⦃ orderedField-F : OrderedField{F = F}(_+_)(_⋅_)(_≤_) ⦄
⦃ classical : ∀{ℓ}{P : Stmt{ℓ}} → Classical(P) ⦄
where
open OrderedField(orderedField-F)
import Lvl
open import Data.Boolean
open import Data.Boolean.Proofs
import Data.Either as Either
open import Data.Tuple as Tuple using (_⨯_ ; _,_)
open import Functional as Fn
open import Logic.Propositional
open import Logic.Predicate
open import Relator.Ordering
open import Sets.PredicateSet renaming (_≡_ to _≡ₛ_)
open import Structure.Setoid.Uniqueness
open import Structure.Function.Ordering
open import Structure.Operator.Field
open import Structure.Operator.Monoid
open import Structure.Operator.Group
open import Structure.Operator.Proofs
open import Structure.Operator.Properties
open import Structure.Operator
open import Structure.Relator.Ordering
open Structure.Relator.Ordering.Weak.Properties
open import Structure.Relator.Properties
open import Syntax.Transitivity
F₊ = ∃(Positive)
module _ where
record MetricSpace {ℓₘ ℓₑₘ} {M : Type{ℓₘ}} ⦃ equiv-M : Equiv{ℓₑₘ}(M) ⦄ (d : M → M → F) : Type{ℓF Lvl.⊔ ℓ≤ Lvl.⊔ ℓₘ Lvl.⊔ ℓₑₘ Lvl.⊔ ℓₑF} where
field
⦃ distance-binary-operator ⦄ : BinaryOperator(d)
self-distance : ∀{x y} → (d(x)(y) ≡ 𝟎) ↔ (x ≡ y)
⦃ distance-commutativity ⦄ : Commutativity(d)
triangle-inequality : ∀{x y z} → (d(x)(z) ≤ (d(x)(y) + d(y)(z)))
⦃ non-negativity ⦄ : ∀{x y} → NonNegative(d(x)(y))
{-
non-negativity{x}{y} =
([≤]ₗ-of-[+] (
𝟎
d(x)(x)
d(x)(y) + d(y)(x)
d(x)(y) + d(x)(y)
2 ⋅ d(x)(y)
))
-}
distance-to-self : ∀{x} → (d(x)(x) ≡ 𝟎)
distance-to-self = [↔]-to-[←] self-distance (reflexivity(_≡_))
Neighborhood : M → F₊ → PredSet(M)
Neighborhood(p)([∃]-intro r)(q) = (d(p)(q) < r)
Neighborhoods : ∀{ℓ} → M → PredSet(PredSet{ℓ}(M))
Neighborhoods(p)(N) = ∃(r ↦ N ≡ₛ Neighborhood(p)(r))
PuncturedNeighborhood : M → F₊ → PredSet(M)
PuncturedNeighborhood(p)([∃]-intro r)(q) = (𝟎 < d(p)(q) < r)
LimitPoint : ∀{ℓ} → PredSet{ℓ}(M) → PredSet(M)
LimitPoint(E)(p) = ∀{r} → Overlapping(PuncturedNeighborhood(p)(r)) (E)
IsolatedPoint : ∀{ℓ} → PredSet{ℓ}(M) → PredSet(M)
IsolatedPoint(E)(p) = ∃(r ↦ Disjoint(PuncturedNeighborhood(p)(r)) (E))
Interior : ∀{ℓ} → PredSet{ℓ}(M) → PredSet(M)
Interior(E)(p) = ∃(r ↦ Neighborhood(p)(r) ⊆ E)
Closed : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Closed(E) = LimitPoint(E) ⊆ E
Open : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Open(E) = E ⊆ Interior(E)
Perfect : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Perfect(E) = LimitPoint(E) ≡ₛ E
Bounded : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Bounded(E) = ∃(p ↦ ∃(r ↦ E ⊆ Neighborhood(p)(r)))
Discrete : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Discrete(E) = E ⊆ IsolatedPoint(E)
Closure : ∀{ℓ} → PredSet{ℓ}(M) → PredSet(M)
Closure(E) = E ∪ LimitPoint(E)
Dense : ∀{ℓ} → PredSet(PredSet{ℓ}(M))
Dense(E) = ∀{p} → (p ∈ Closure(E))
-- Compact
Separated : ∀{ℓ₁ ℓ₂} → PredSet{ℓ₁}(M) → PredSet{ℓ₂}(M) → Stmt
Separated(A)(B) = Disjoint(A)(Closure(B)) ∧ Disjoint(Closure(A))(B)
Connected : ∀{ℓ} → PredSet{ℓ}(M) → Stmtω
Connected(E) = ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → ((A ∪ B) ≡ₛ E) → Separated(A)(B) → ⊥
-- Complete = Sequence.Cauchy ⊆ Sequence.Converging
neighborhood-contains-center : ∀{p}{r} → (p ∈ Neighborhood(p)(r))
neighborhood-contains-center {p}{[∃]-intro r ⦃ intro positive-r ⦄} =
d(p)(p) 🝖-[ sub₂(_≡_)(_≤_) distance-to-self ]-sub
𝟎 🝖-semiend
r 🝖-end-from-[ positive-r ]
-- TODO: Not always the case?
-- subneighborhood-subradius : ∀{p₁ p₂}{r₁ r₂} → (Neighborhood(p₁)(r₁) ⊆ Neighborhood(p₂)(r₂)) → ([∃]-witness r₁ ≤ [∃]-witness r₂)
subneighborhood-radius : ∀{p₁ p₂}{r₁ r₂} → (Neighborhood(p₁)(r₁) ⊆ Neighborhood(p₂)(r₂)) ← (d(p₂)(p₁) ≤ ([∃]-witness r₂ − [∃]-witness r₁))
subneighborhood-radius {p₁} {p₂} {[∃]-intro r₁} {[∃]-intro r₂} p {q} qN₁ =
d(p₂)(q) 🝖[ _≤_ ]-[ triangle-inequality ]-sub
d(p₂)(p₁) + d(p₁)(q) 🝖[ _<_ ]-[ [<][+]-preserve-subₗ p qN₁ ]-super
(r₂ − r₁) + r₁ 🝖[ _≡_ ]-[ {!inverseOperₗ ? ?!} ] -- inverseOperatorᵣ(_+_)(_−_)
r₂ 🝖-end
{-where
r₁r₂ : (r₁ ≤ r₂) -- TODO: This seems to be provable, but not used here
r₁r₂ =
r₁ 🝖-[ {!!} ]
d(p₁)(p₂) + r₁ 🝖-[ {!!} ]
r₂ 🝖-end
-}
subneighborhood-radius-on-same : ∀{p}{r₁ r₂} → (Neighborhood(p)(r₁) ⊆ Neighborhood(p)(r₂)) ← ([∃]-witness r₁ ≤ [∃]-witness r₂)
subneighborhood-radius-on-same {p} {[∃]-intro r₁} {[∃]-intro r₂} r₁r₂ {x} xN₁ xN₂ = xN₁ (r₁r₂ 🝖 xN₂)
interior-is-subset : ∀{ℓ}{E : PredSet{ℓ}(M)} → Interior(E) ⊆ E
interior-is-subset {ℓ} {E} {x} ([∃]-intro ([∃]-intro r ⦃ intro positive-r ⦄) ⦃ N⊆E ⦄) =
N⊆E {x} (p ↦ positive-r (
r 🝖[ _≤_ ]-[ p ]-super
d(x)(x) 🝖[ _≡_ ]-[ distance-to-self ]
𝟎 🝖[ _≡_ ]-end
))
neighborhood-interior-is-self : ∀{p}{r} → (Interior(Neighborhood(p)(r)) ≡ₛ Neighborhood(p)(r))
∃.witness (Tuple.left (neighborhood-interior-is-self {p} {r}) x) = r
∃.proof (Tuple.left (neighborhood-interior-is-self {p} {r} {x}) Nx) = {!!}
Tuple.right (neighborhood-interior-is-self {p} {r}) = {!!}
neighborhood-is-open : ∀{p}{r} → Open(Neighborhood(p)(r))
interior-is-largest-subspace : ∀{ℓ₁ ℓ₂}{E : PredSet{ℓ₁}(M)}{Eₛ : PredSet{ℓ₂}(M)} → Open(Eₛ) → (Eₛ ⊆ E) → (Eₛ ⊆ Interior(E))
nested-interior : ∀{ℓ}{E : PredSet{ℓ}(M)} → Interior(Interior(E)) ≡ₛ Interior(E)
isolated-limit-eq : ∀{ℓ}{E : PredSet{ℓ}(M)} → (IsolatedPoint(E) ⊆ ∅ {Lvl.𝟎}) ↔ (E ⊆ LimitPoint(E))
interior-closure-eq1 : ∀{ℓ}{E : PredSet{ℓ}(M)} → ((∁ Interior(E)) ≡ₛ Closure(∁ E))
interior-closure-eq2 : ∀{ℓ}{E : PredSet{ℓ}(M)} → (Interior(∁ E) ≡ₛ (∁ Closure(E)))
open-closed-eq1 : ∀{ℓ}{E : PredSet{ℓ}(M)} → (Open(E) ↔ Closed(∁ E))
open-closed-eq2 : ∀{ℓ}{E : PredSet{ℓ}(M)} → (Open(∁ E) ↔ Closed(E))
union-is-open : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Open(A) → Open(B) → Open(A ∪ B)
intersection-is-open : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Open(A) → Open(B) → Open(A ∩ B)
-- open-subsubspace : ∀{ℓ₁ ℓ₂}{Eₛ : PredSet{ℓ₁}(M)}{E : PredSet{ℓ₂}(M)} →
separated-is-disjoint : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Separated(A)(B) → Disjoint(A)(B)
unionₗ-is-connected : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Connected(A ∪ B) → Connected(A)
unionᵣ-is-connected : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Connected(A ∪ B) → Connected(B)
intersection-is-connected : ∀{ℓ₁ ℓ₂}{A : PredSet{ℓ₁}(M)}{B : PredSet{ℓ₂}(M)} → Connected(A) → Connected(B) → Connected(A ∩ B)
module Sequence {ℓ} {M : Type{ℓ}} ⦃ equiv-M : Equiv(M) ⦄ (d : M → M → F) where
open import Numeral.Natural
import Numeral.Natural.Relation.Order as ℕ
ConvergesTo : (ℕ → M) → M → Stmt
ConvergesTo f(L) = ∃{Obj = F₊ → ℕ}(N ↦ ∀{ε : F₊}{n} → (n ℕ.≥ N(ε)) → (d(f(n))(L) < [∃]-witness ε))
Converging : (ℕ → M) → Stmt
Converging(f) = ∃(ConvergesTo(f))
Diverging : (ℕ → M) → Stmt
Diverging(f) = ∀{L} → ¬(ConvergesTo f(L))
lim : (f : ℕ → M) → ⦃ Converging(f) ⦄ → M
lim(f) ⦃ [∃]-intro L ⦄ = L
Cauchy : (ℕ → M) → Stmt
Cauchy(f) = ∃{Obj = F₊ → ℕ}(N ↦ ∀{ε : F₊}{a b} → (a ℕ.≥ N(ε)) → (b ℕ.≥ N(ε)) → (d(f(a))(f(b)) < [∃]-witness ε))
Complete : Stmt
Complete = Cauchy ⊆ Converging
Bounded : (ℕ → M) → Stmt
Bounded(f) = ∃(r ↦ ∃(p ↦ ∀{n} → (d(p)(f(n)) < r)))
unique-converges-to : ∀{f} → Unique(ConvergesTo(f))
converging-bounded : Converging ⊆ Bounded
-- strictly-ordered-sequence-limit : ∀{f g : ℕ → M} → (∀{n} → (f(n) < g(n))) → (lim f < lim g)
-- ordered-sequence-limit : ∀{f g : ℕ → M} → (∀{n} → (f(n) ≤ g(n))) → (lim f ≤ lim g)
-- limit-point-converging-sequence : ∀{E}{p} → LimitPoint(E)(p) → ∃(f ↦ (ConvergesTo f(p)) ∧ (∀{x} → (f(x) ∈ E)))
-- TODO: Apparently, this requires both axiom of choice and excluded middle? At least the (←)-direction?
-- continuous-sequence-convergence-composition : (ContinuousOn f(p)) ↔ (∀{g} → (ConvergesTo g(p)) → (ConvergesTo(f ∘ g)(f(p))))
{-
module Series where
∑ : (ℕ → M) → ℕ → M
∑ f(𝟎) = 𝟎
∑ f(𝐒(n)) = (∑ f(n)) + f(𝐒(n))
∑₂ : (ℕ → M) → (ℕ ⨯ ℕ) → M
∑₂ f(a , b) = ∑ (f ∘ (a +_))(b − a)
ConvergesTo : (ℕ → M) → M → Stmt
ConvergesTo f(L) = Sequence.ConvergesTo(∑ f)(L)
Converging : (ℕ → M) → Stmt
Converging(f) = ∃(ConvergesTo(f))
Diverging : (ℕ → M) → Stmt
Diverging(f) = ∀{L} → ¬(ConvergesTo f(L))
ConvergesTo : (ℕ → M) → M → Stmt
AbsolutelyConvergesTo f(L) = ConvergesTo (‖_‖ ∘ f)(L)
AbsolutelyConverging : (ℕ → M) → Stmt
AbsolutelyConverging(f) = ∃(AbsolutelyConvergesTo(f))
AbsolutelyDiverging : (ℕ → M) → Stmt
AbsolutelyDiverging(f) = ∀{L} → ¬(AbsolutelyConvergesTo f(L))
ConditionallyConverging : (ℕ → M) → Stmt
ConditionallyConverging(f) = AbsolutelyDiverging(f) ∧ Converging(f)
sequence-of-converging-series-converges-to-0 : Converging(f) → (Sequence.ConvergesTo f(𝟎))
convergence-by-ordering : (∀{n} → f(n) ≤ g(n)) → (Converging(f) ← Converging(g))
divergence-by-ordering : (∀{n} → f(n) ≤ g(n)) → (Diverging(f) → Diverging(g))
convergence-by-quotient : Sequence.Converging(n ↦ f(n) / g(n)) → (Converging(f) ↔ Converging(g))
-}
module Analysis
{ℓ₁ ℓ₂}
{M₁ : Type{ℓ₁}} ⦃ equiv-M₁ : Equiv(M₁) ⦄ (d₁ : M₁ → M₁ → F)
⦃ space₁ : MetricSpace(d₁) ⦄
{M₂ : Type{ℓ₂}} ⦃ equiv-M₂ : Equiv(M₂) ⦄ (d₂ : M₂ → M₂ → F)
⦃ space₂ : MetricSpace(d₂) ⦄
where
open MetricSpace
Lim : ∀{ℓ}{E : PredSet{ℓ}(M₁)} → ((x : M₁) → ⦃ x ∈ E ⦄ → M₂) → M₁ → M₂ → Stmt
Lim {E = E} f(p)(L) = ∃{Obj = F₊ → F₊}(δ ↦ ∀{ε : F₊}{x} → ⦃ ex : x ∈ E ⦄ → (p ∈ PuncturedNeighborhood(space₁)(x)(δ(ε))) → (L ∈ Neighborhood(space₂)(f(x) ⦃ ex ⦄)(ε)))
lim : ∀{ℓ}{E : PredSet{ℓ}(M₁)} → (f : (x : M₁) → ⦃ x ∈ E ⦄ → M₂) → (p : M₁) → ⦃ ∃(Lim f(p)) ⦄ → M₂
lim f(p) ⦃ [∃]-intro L ⦄ = L
ContinuousOn : ∀{ℓ}{E : PredSet{ℓ}(M₁)} → ((x : M₁) → ⦃ x ∈ E ⦄ → M₂) → (p : M₁) → ⦃ p ∈ E ⦄ → Stmt
ContinuousOn f(p) = Lim f(p) (f(p))
Continuous : ∀{ℓ}{E : PredSet{ℓ}(M₁)} → ((x : M₁) → ⦃ x ∈ E ⦄ → M₂) → Stmt
Continuous{E = E}(f) = ∀{p} → ⦃ ep : p ∈ E ⦄ → ContinuousOn f(p) ⦃ ep ⦄
-- continuous-mapping-connected : Continuous(f) → Connected(E) → Connected(map f(E))
|
struct PositionAroundPoint{M,N,S,m} <: TaskMap{M,N,S}
position_center::SVector{m,S}
end
PositionAroundPoint{M,N}(c::SVector{m,S}) where {M,N,S,m} = PositionAroundPoint{M,N,S,dim(M)}(c)
struct PositionAroundPointT{M,N,S,m} <: TaskMapT{M,N,S}
base_map::PositionAroundPoint{M,N,S,m}
end
PositionAroundPointT{M,N}(c::SVector{m,S}) where {M,N,S,m} =
PositionAroundPointT{M,N,S,m}(PositionAroundPoint{M,N}(c))
task_map_emb(::EmbRep, ::EmbRep, xm, task_map::PositionAroundPoint{ℝ{m},ℝ{m}}) where m =
xm - task_map.position_center |
[STATEMENT]
lemma present_update [simp]:
"present (Array.update a i v h) = present h"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. present (Array.update a i v h) = present h
[PROOF STEP]
by (simp add: Array.update_def Array.set_def fun_eq_iff present_def) |
/-
Copyright (c) 2020 Benjamin Davidson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Benjamin Davidson
! This file was ported from Lean 3 source module data.real.pi.leibniz
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.SpecialFunctions.Trigonometric.ArctanDeriv
/-! ### Leibniz's Series for Pi -/
namespace Real
open Filter Set
open Classical BigOperators Topology Real
-- mathport name: abs
local notation "|" x "|" => abs x
/-- This theorem establishes **Leibniz's series for `π`**: The alternating sum of the reciprocals
of the odd numbers is `π/4`. Note that this is a conditionally rather than absolutely convergent
series. The main tool that this proof uses is the Mean Value Theorem (specifically avoiding the
Fundamental Theorem of Calculus).
Intuitively, the theorem holds because Leibniz's series is the Taylor series of `arctan x`
centered about `0` and evaluated at the value `x = 1`. Therefore, much of this proof consists of
reasoning about a function
`f := arctan x - ∑ i in finset.range k, (-(1:ℝ))^i * x^(2*i+1) / (2*i+1)`,
the difference between `arctan` and the `k`-th partial sum of its Taylor series. Some ingenuity is
required due to the fact that the Taylor series is not absolutely convergent at `x = 1`.
This proof requires a bound on `f 1`, the key idea being that `f 1` can be split as the sum of
`f 1 - f u` and `f u`, where `u` is a sequence of values in [0,1], carefully chosen such that
each of these two terms can be controlled (in different ways).
We begin the proof by (1) introducing that sequence `u` and then proving that another sequence
constructed from `u` tends to `0` at `+∞`. After (2) converting the limit in our goal to an
inequality, we (3) introduce the auxiliary function `f` defined above. Next, we (4) compute the
derivative of `f`, denoted by `f'`, first generally and then on each of two subintervals of [0,1].
We then (5) prove a bound for `f'`, again both generally as well as on each of the two
subintervals. Finally, we (6) apply the Mean Value Theorem twice, obtaining bounds on `f 1 - f u`
and `f u - f 0` from the bounds on `f'` (note that `f 0 = 0`). -/
theorem tendsto_sum_pi_div_four :
Tendsto (fun k => ∑ i in Finset.range k, (-(1 : ℝ)) ^ i / (2 * i + 1)) atTop (𝓝 (π / 4)) :=
by
rw [tendsto_iff_norm_tendsto_zero, ← tendsto_zero_iff_norm_tendsto_zero]
-- (1) We introduce a useful sequence `u` of values in [0,1], then prove that another sequence
-- constructed from `u` tends to `0` at `+∞`
let u := fun k : ℕ => (k : NNReal) ^ (-1 / (2 * (k : ℝ) + 1))
have H : tendsto (fun k : ℕ => (1 : ℝ) - u k + u k ^ (2 * (k : ℝ) + 1)) at_top (𝓝 0) :=
by
convert(((tendsto_rpow_div_mul_add (-1) 2 1 two_ne_zero.symm).neg.const_add 1).add
tendsto_inv_atTop_zero).comp
tendsto_nat_cast_atTop_atTop
· ext k
simp only [NNReal.coe_nat_cast, Function.comp_apply, NNReal.coe_rpow]
rw [← rpow_mul (Nat.cast_nonneg k) (-1 / (2 * (k : ℝ) + 1)) (2 * (k : ℝ) + 1),
@div_mul_cancel _ _ (2 * (k : ℝ) + 1) _
(by
norm_cast
simp only [Nat.succ_ne_zero, not_false_iff]),
rpow_neg_one k, sub_eq_add_neg]
· simp only [add_zero, add_right_neg]
-- (2) We convert the limit in our goal to an inequality
refine' squeeze_zero_norm _ H
intro k
-- Since `k` is now fixed, we henceforth denote `u k` as `U`
let U := u k
-- (3) We introduce an auxiliary function `f`
let b (i : ℕ) x := (-(1 : ℝ)) ^ i * x ^ (2 * i + 1) / (2 * i + 1)
let f x := arctan x - ∑ i in Finset.range k, b i x
suffices f_bound : |f 1 - f 0| ≤ (1 : ℝ) - U + U ^ (2 * (k : ℝ) + 1)
· rw [← norm_neg]
convert f_bound
simp only [f]
simp [b]
-- We show that `U` is indeed in [0,1]
have hU1 : (U : ℝ) ≤ 1 := by
by_cases hk : k = 0
· simp [u, U, hk]
·
exact
rpow_le_one_of_one_le_of_nonpos
(by
norm_cast
exact nat.succ_le_iff.mpr (Nat.pos_of_ne_zero hk))
(le_of_lt
(@div_neg_of_neg_of_pos _ _ (-(1 : ℝ)) (2 * k + 1) (neg_neg_iff_pos.mpr zero_lt_one)
(by
norm_cast
exact Nat.succ_pos')))
have hU2 := NNReal.coe_nonneg U
-- (4) We compute the derivative of `f`, denoted by `f'`
let f' := fun x : ℝ => (-x ^ 2) ^ k / (1 + x ^ 2)
have has_deriv_at_f : ∀ x, HasDerivAt f (f' x) x :=
by
intro x
have has_deriv_at_b : ∀ i ∈ Finset.range k, HasDerivAt (b i) ((-x ^ 2) ^ i) x :=
by
intro i hi
convert HasDerivAt.const_mul ((-1 : ℝ) ^ i / (2 * i + 1))
(@HasDerivAt.pow _ _ _ _ _ (2 * i + 1) (hasDerivAt_id x))
· ext y
simp only [b, id.def]
ring
· simp only [Nat.add_succ_sub_one, add_zero, mul_one, id.def, Nat.cast_bit0, Nat.cast_add,
Nat.cast_one, Nat.cast_mul]
rw [← mul_assoc,
@div_mul_cancel _ _ (2 * (i : ℝ) + 1) _
(by
norm_cast
linarith),
pow_mul x 2 i, ← mul_pow (-1) (x ^ 2) i]
ring_nf
convert(has_deriv_at_arctan x).sub (HasDerivAt.sum has_deriv_at_b)
have g_sum :=
@geom_sum_eq _ _ (-x ^ 2) ((neg_nonpos.mpr (sq_nonneg x)).trans_lt zero_lt_one).Ne k
simp only [f'] at g_sum⊢
rw [g_sum, ← neg_add' (x ^ 2) 1, add_comm (x ^ 2) 1, sub_eq_add_neg, neg_div', neg_div_neg_eq]
ring
have hderiv1 : ∀ x ∈ Icc (U : ℝ) 1, HasDerivWithinAt f (f' x) (Icc (U : ℝ) 1) x := fun x hx =>
(has_deriv_at_f x).HasDerivWithinAt
have hderiv2 : ∀ x ∈ Icc 0 (U : ℝ), HasDerivWithinAt f (f' x) (Icc 0 (U : ℝ)) x := fun x hx =>
(has_deriv_at_f x).HasDerivWithinAt
-- (5) We prove a general bound for `f'` and then more precise bounds on each of two subintervals
have f'_bound : ∀ x ∈ Icc (-1 : ℝ) 1, |f' x| ≤ |x| ^ (2 * k) :=
by
intro x hx
rw [abs_div, IsAbsoluteValue.abv_pow abs (-x ^ 2) k, abs_neg, IsAbsoluteValue.abv_pow abs x 2, ←
pow_mul]
refine' div_le_of_nonneg_of_le_mul (abs_nonneg _) (pow_nonneg (abs_nonneg _) _) _
refine' le_mul_of_one_le_right (pow_nonneg (abs_nonneg _) _) _
rw [abs_of_nonneg (add_nonneg zero_le_one (sq_nonneg x) : (0 : ℝ) ≤ _)]
exact (le_add_of_nonneg_right (sq_nonneg x) : (1 : ℝ) ≤ _)
have hbound1 : ∀ x ∈ Ico (U : ℝ) 1, |f' x| ≤ 1 :=
by
rintro x ⟨hx_left, hx_right⟩
have hincr := pow_le_pow_of_le_left (le_trans hU2 hx_left) (le_of_lt hx_right) (2 * k)
rw [one_pow (2 * k), ← abs_of_nonneg (le_trans hU2 hx_left)] at hincr
rw [← abs_of_nonneg (le_trans hU2 hx_left)] at hx_right
linarith [f'_bound x (mem_Icc.mpr (abs_le.mp (le_of_lt hx_right)))]
have hbound2 : ∀ x ∈ Ico 0 (U : ℝ), |f' x| ≤ U ^ (2 * k) :=
by
rintro x ⟨hx_left, hx_right⟩
have hincr := pow_le_pow_of_le_left hx_left (le_of_lt hx_right) (2 * k)
rw [← abs_of_nonneg hx_left] at hincr hx_right
rw [← abs_of_nonneg hU2] at hU1 hx_right
linarith [f'_bound x (mem_Icc.mpr (abs_le.mp (le_trans (le_of_lt hx_right) hU1)))]
-- (6) We twice apply the Mean Value Theorem to obtain bounds on `f` from the bounds on `f'`
have mvt1 := norm_image_sub_le_of_norm_deriv_le_segment' hderiv1 hbound1 _ (right_mem_Icc.mpr hU1)
have mvt2 := norm_image_sub_le_of_norm_deriv_le_segment' hderiv2 hbound2 _ (right_mem_Icc.mpr hU2)
-- The following algebra is enough to complete the proof
calc
|f 1 - f 0| = |f 1 - f U + (f U - f 0)| := by ring_nf
_ ≤ 1 * (1 - U) + U ^ (2 * k) * (U - 0) :=
(le_trans (abs_add (f 1 - f U) (f U - f 0)) (add_le_add mvt1 mvt2))
_ = 1 - U + U ^ (2 * k) * U := by ring
_ = 1 - u k + u k ^ (2 * (k : ℝ) + 1) :=
by
rw [← pow_succ' (U : ℝ) (2 * k)]
norm_cast
#align real.tendsto_sum_pi_div_four Real.tendsto_sum_pi_div_four
end Real
|
import tactic
import .lesson8
noncomputable theory
open_locale classical
open PreHilbertPlane HilbertPlane Triangle
variables {Ω : Type*} [HilbertPlane Ω]
variables {A B C D E F : Ω}
lemma origin_on_ray : A ∈ pts (A=>B) :=
begin
left,
refl,
end
/- Any point on a ray different from its origin defines the same ray-/
lemma point_on_ray_gives_same_ray : A ≠ E → B ≠ E → E ∈ pts (A=>B) → pts (A=>B) = pts (A=>E) :=
begin
intros hAE hBE hE,
unfold pts,
ext1,
split,
{
dsimp,
intro h,
cases h with h0 h1,
{
subst h0,
dsimp,
exact origin_on_ray,
},
{
dsimp at h1,
obtain rfl | hAB := em(A = B), finish,
right,
dsimp,
dsimp at hE,
rcases hE with (hE | hE), tauto,
cases h1,
cases hE,
have hco : collinear ({A, B, x, E} : set Ω),
{
have : {A, B, x, E} = ({A, x, B} : set Ω).union({A, E, B}),
{
ext,
split,
{
intro h,
rcases h with h|h|h|h,
{
subst h, left, simp,
},
{
subst h, left, simp,
},
{
subst h, left, simp,
},
{
simp at h, subst h, right, simp,
},
},
{
intro h,
cases h;
finish,
}
},
rw this,
apply collinear_union {A, x, B} {A, E, B} hAB,
simpa using h1_left,
simpa using hE_left,
all_goals {finish},
},
{
split,
{
simp,
apply collinear_subset _ {A, B, x, E} _ hco,
intros y hy, finish,
},
{
sorry
},
},
},
},
{
sorry
}
end
/- Given a line and a point, there is a point on the other side -/
lemma point_on_other_side_of_line (ℓ : Line Ω) (A : Ω) :
¬ A ∈ ℓ → ∃ B : Ω, ¬ B ∈ ℓ ∧ ¬ same_side ℓ A B :=
begin
intro hA,
rcases (same_side.at_least_two_classes ℓ) with ⟨X, Y, ⟨hX, hY, H⟩⟩,
by_cases h1 : same_side ℓ A X,
{
use Y,
split, assumption,
intro h2,
have h3 : same_side ℓ X Y,
{
rw same_side.symm_iff at h1,
apply same_side.trans h1 h2,
},
exact H h3,
},
{
use X,
exact ⟨hX, h1⟩,
}
end
/- (using C4 and C1)
Construct an angle ∟ C' A' B'' on the other side of A'=>C' from B'
that is congruent to ∟ BAC and at the same time A'B''
is congruent to AB.
-/
lemma transport_angle_to_other_ray ( A B C A' B' C' : Ω)
(h: (▵ A B C).nondegenerate) (h': (▵ A' B' C').nondegenerate) :
∃ B'' : Ω, (∟ C' A' B'') ≃ (∟ B A C) ∧ A'⬝B'' ≅ A⬝B ∧ (pts (A=>B) = pts (A'=>B'')) :=
begin
have hACp : A' ≠ C',
{
sorry
},
let ℓ := line_through A' C',
have hApℓ: A' ∈ ℓ := line_through_left,
have hCpℓ: C' ∈ ℓ := line_through_right,
have hBpℓ: ¬ B' ∈ ℓ,
{
intro H,
apply h',
use ℓ, tauto,
},
have H := point_on_other_side_of_line ℓ B' hBpℓ,
rcases H with ⟨E, hEℓ, hBpE⟩,
have hnc : ¬ collinear_triple E A' C',
{
sorry
},
have hT2 : (∟ B' A' C').nondegenerate,
{
sorry
},
-- Transport the angle to the ray A'=>C'.
have HH := C4 (∟ B' A' C') (A'=>C') E hT2 hnc,
obtain ⟨ℓ, ⟨Q, ⟨hQ1, hQ2, hQ3, hQ4⟩⟩, huniq⟩ := HH,
simp only at hQ3,
sorry
end
lemma segment_union (A B C : Ω) (h : A * B * C) :
pts (A ⬝ C) = (pts (A ⬝ B)).union( pts (B ⬝ C)) :=
begin
ext,
split,
{
intro h,
rcases h with h|h|h,
{
subst h,
left,
simp,
},
{
subst h,
right,
simp,
},
{
by_cases h1 : A * x * B,
{
left,
simp [h1],
},
{
right,
simp,
right, right,
sorry
}
}
},
{
intro h,
rcases h with h|h,
{
simp at h ⊢,
rcases h with h|h|h,
{tauto},
{
subst h,
tauto,
},
{
right, right,
sorry
}
},
sorry
}
end
/- Proposition 10.1 (SSS) of Hartshorne
If two triangles have their respective sides equal,
then they are congruent.
-/
theorem triangle_SSS' {A B C A' B' C' : Ω} :
(▵ A B C).nondegenerate → (▵ A' B' C').nondegenerate →
(A⬝B ≅ A'⬝B') → (A⬝C ≅ A'⬝C') → (B⬝C ≅ B'⬝C') →
(▵ A B C).is_similar(▵ A' B' C') :=
begin
intros hT hT' hAB hAC hBC,
/- (using C4 and C1)
Construct an angle ∟ C' A' B'' on the other side of A'=>C' from B'
that is congruent to ∟ BAC and at the same time A'B''
is congruent to AB.
-/
have hACp : A' ≠ C',
{
sorry
},
let ℓ := line_through A' C',
have hApℓ: A' ∈ ℓ, exact line_through_left,
have hCpℓ: C' ∈ ℓ, exact line_through_right,
have hBpℓ: ¬ B' ∈ ℓ,
{
intro h,
apply hT',
use ℓ, tauto,
},
have H := point_on_other_side_of_line ℓ B' hBpℓ,
rcases H with ⟨E, hEℓ, hBpE⟩,
have hnc : ¬ collinear_triple E A' C',
{
sorry
},
have hT2 : (∟ B' A' C').nondegenerate,
{
sorry
},
-- Let B₀ be the point defining the angle transported to the ray A'=>C'.
obtain ⟨B₀,h⟩ := HilbertPlane.C4 (∟ B' A' C') (A'=>C') E hT2 hnc,
have hBpp : ∃ B'' : Ω, (∟ C' A' B'') ≃ (∟ B A C) ∧ A'⬝B'' ≅ A⬝B,
{
sorry
},
--repeat {split},
--repeat {assumption},
sorry
end
theorem triangle_SSS (T T' : Triangle Ω) :
(T.A⬝T.B ≅ T'.A⬝T'.B) → (T.A⬝T.C ≅ T'.A⬝T'.C) → (T.B⬝T.C ≅ T'.B⬝T'.C) → T.is_congruent T' :=
begin
intros hAB hAC hBC,
rw is_congruent,
suffices : T.is_similar T', by finish,
apply triangle_SSS' _ _ hAB hAC hBC;
sorry
end
lemma similar_of_congruent (T R : Triangle Ω) (h : is_congruent T R) :
is_similar T R :=
begin
sorry
end
|
/-
Copyright (c) 2020 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard
-/
import tactic.interval_cases
import data.nat.modeq
/-!
# IMO 1964 Q1
(a) Find all positive integers $n$ for which $2^n-1$ is divisible by $7$.
(b) Prove that there is no positive integer $n$ for which $2^n+1$ is divisible by $7$.
We define a predicate for the solutions in (a), and prove that it is the set of positive
integers which are a multiple of 3.
-/
/-!
## Intermediate lemmas
-/
open nat
namespace imo1964_q1
lemma two_pow_three_mul_mod_seven (m : ℕ) : 2 ^ (3 * m) ≡ 1 [MOD 7] :=
begin
rw pow_mul,
have h : 8 ≡ 1 [MOD 7] := modeq_of_dvd (by {use -1, norm_num }),
convert h.pow _,
simp,
end
lemma two_pow_three_mul_add_one_mod_seven (m : ℕ) : 2 ^ (3 * m + 1) ≡ 2 [MOD 7] :=
begin
rw pow_add,
exact (two_pow_three_mul_mod_seven m).mul_right _,
end
lemma two_pow_three_mul_add_two_mod_seven (m : ℕ) : 2 ^ (3 * m + 2) ≡ 4 [MOD 7] :=
begin
rw pow_add,
exact (two_pow_three_mul_mod_seven m).mul_right _,
end
/-!
## The question
-/
def problem_predicate (n : ℕ) : Prop := 7 ∣ 2 ^ n - 1
lemma aux (n : ℕ) : problem_predicate n ↔ 2 ^ n ≡ 1 [MOD 7] :=
begin
rw nat.modeq.comm,
apply (modeq_iff_dvd' _).symm,
apply nat.one_le_pow'
end
theorem imo1964_q1a (n : ℕ) (hn : 0 < n) : problem_predicate n ↔ 3 ∣ n :=
begin
rw aux,
split,
{ intro h,
let t := n % 3,
rw [(show n = 3 * (n / 3) + t, from (nat.div_add_mod n 3).symm)] at h,
have ht : t < 3 := nat.mod_lt _ dec_trivial,
interval_cases t with hr; rw hr at h,
{ exact nat.dvd_of_mod_eq_zero hr },
{ exfalso,
have nonsense := (two_pow_three_mul_add_one_mod_seven _).symm.trans h,
rw modeq_iff_dvd at nonsense,
norm_num at nonsense },
{ exfalso,
have nonsense := (two_pow_three_mul_add_two_mod_seven _).symm.trans h,
rw modeq_iff_dvd at nonsense,
norm_num at nonsense } },
{ rintro ⟨m, rfl⟩,
apply two_pow_three_mul_mod_seven }
end
end imo1964_q1
open imo1964_q1
theorem imo1964_q1b (n : ℕ) : ¬ (7 ∣ 2 ^ n + 1) :=
begin
let t := n % 3,
rw [← modeq_zero_iff_dvd, (show n = 3 * (n / 3) + t, from (nat.div_add_mod n 3).symm)],
have ht : t < 3 := nat.mod_lt _ dec_trivial,
interval_cases t with hr; rw hr,
{ rw add_zero,
intro h,
have := h.symm.trans ((two_pow_three_mul_mod_seven _).add_right _),
rw modeq_iff_dvd at this,
norm_num at this },
{ intro h,
have := h.symm.trans ((two_pow_three_mul_add_one_mod_seven _).add_right _),
rw modeq_iff_dvd at this,
norm_num at this },
{ intro h,
have := h.symm.trans ((two_pow_three_mul_add_two_mod_seven _).add_right _),
rw modeq_iff_dvd at this,
norm_num at this },
end
|
/-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import probability.ident_distrib
import measure_theory.integral.interval_integral
import analysis.specific_limits.floor_pow
import analysis.p_series
import analysis.asymptotics.specific_asymptotics
/-!
# The strong law of large numbers
We prove the strong law of large numbers, in `probability_theory.strong_law_ae`:
If `X n` is a sequence of independent identically distributed integrable real-valued random
variables, then `∑ i in range n, X i / n` converges almost surely to `𝔼[X 0]`.
We give here the strong version, due to Etemadi, that only requires pairwise independence.
This file also contains the Lᵖ version of the strong law of large numbers provided by
`probability_theory.strong_law_Lp` which shows `∑ i in range n, X i / n` converges in Lᵖ to
`𝔼[X 0]` provided `X n` is independent identically distributed and is Lᵖ.
## Implementation
We follow the proof by Etemadi
[Etemadi, *An elementary proof of the strong law of large numbers*][etemadi_strong_law],
which goes as follows.
It suffices to prove the result for nonnegative `X`, as one can prove the general result by
splitting a general `X` into its positive part and negative part.
Consider `Xₙ` a sequence of nonnegative integrable identically distributed pairwise independent
random variables. Let `Yₙ` be the truncation of `Xₙ` up to `n`. We claim that
* Almost surely, `Xₙ = Yₙ` for all but finitely many indices. Indeed, `∑ ℙ (Xₙ ≠ Yₙ)` is bounded by
`1 + 𝔼[X]` (see `sum_prob_mem_Ioc_le` and `tsum_prob_mem_Ioi_lt_top`).
* Let `c > 1`. Along the sequence `n = c ^ k`, then `(∑_{i=0}^{n-1} Yᵢ - 𝔼[Yᵢ])/n` converges almost
surely to `0`. This follows from a variance control, as
```
∑_k ℙ (|∑_{i=0}^{c^k - 1} Yᵢ - 𝔼[Yᵢ]| > c^k ε)
≤ ∑_k (c^k ε)^{-2} ∑_{i=0}^{c^k - 1} Var[Yᵢ] (by Markov inequality)
≤ ∑_i (C/i^2) Var[Yᵢ] (as ∑_{c^k > i} 1/(c^k)^2 ≤ C/i^2)
≤ ∑_i (C/i^2) 𝔼[Yᵢ^2]
≤ 2C 𝔼[X^2] (see `sum_variance_truncation_le`)
```
* As `𝔼[Yᵢ]` converges to `𝔼[X]`, it follows from the two previous items and Cesaro that, along
the sequence `n = c^k`, one has `(∑_{i=0}^{n-1} Xᵢ) / n → 𝔼[X]` almost surely.
* To generalize it to all indices, we use the fact that `∑_{i=0}^{n-1} Xᵢ` is nondecreasing and
that, if `c` is close enough to `1`, the gap between `c^k` and `c^(k+1)` is small.
-/
noncomputable theory
open measure_theory filter finset asymptotics
open set (indicator)
open_locale topology big_operators measure_theory probability_theory ennreal nnreal
namespace probability_theory
/-! ### Prerequisites on truncations -/
section truncation
variables {α : Type*}
/-- Truncating a real-valued function to the interval `(-A, A]`. -/
def truncation (f : α → ℝ) (A : ℝ) :=
(indicator (set.Ioc (-A) A) id) ∘ f
variables {m : measurable_space α} {μ : measure α} {f : α → ℝ}
lemma _root_.measure_theory.ae_strongly_measurable.truncation
(hf : ae_strongly_measurable f μ) {A : ℝ} :
ae_strongly_measurable (truncation f A) μ :=
begin
apply ae_strongly_measurable.comp_ae_measurable _ hf.ae_measurable,
exact (strongly_measurable_id.indicator measurable_set_Ioc).ae_strongly_measurable,
end
lemma abs_truncation_le_bound (f : α → ℝ) (A : ℝ) (x : α) :
|truncation f A x| ≤ |A| :=
begin
simp only [truncation, set.indicator, set.mem_Icc, id.def, function.comp_app],
split_ifs,
{ exact abs_le_abs h.2 (neg_le.2 h.1.le) },
{ simp [abs_nonneg] }
end
@[simp]
lemma abs_truncation_le_abs_self (f : α → ℝ) (A : ℝ) (x : α) :
|truncation f A x| ≤ |f x| :=
begin
simp only [truncation, indicator, set.mem_Icc, id.def, function.comp_app],
split_ifs,
{ exact le_rfl },
{ simp [abs_nonneg] },
end
lemma truncation_eq_self {f : α → ℝ} {A : ℝ} {x : α} (h : |f x| < A) :
truncation f A x = f x :=
begin
simp only [truncation, indicator, set.mem_Icc, id.def, function.comp_app, ite_eq_left_iff],
assume H,
apply H.elim,
simp [(abs_lt.1 h).1, (abs_lt.1 h).2.le],
end
lemma truncation_eq_of_nonneg {f : α → ℝ} {A : ℝ} (h : ∀ x, 0 ≤ f x) :
truncation f A = (indicator (set.Ioc 0 A) id) ∘ f :=
begin
ext x,
rcases (h x).lt_or_eq with hx|hx,
{ simp only [truncation, indicator, hx, set.mem_Ioc, id.def, function.comp_app, true_and],
by_cases h'x : f x ≤ A,
{ have : - A < f x, by linarith [h x],
simp only [this, true_and] },
{ simp only [h'x, and_false] } },
{ simp only [truncation, indicator, hx, id.def, function.comp_app, if_t_t]},
end
lemma truncation_nonneg {f : α → ℝ} (A : ℝ) {x : α} (h : 0 ≤ f x) : 0 ≤ truncation f A x :=
set.indicator_apply_nonneg $ λ _, h
lemma _root_.measure_theory.ae_strongly_measurable.mem_ℒp_truncation [is_finite_measure μ]
(hf : ae_strongly_measurable f μ) {A : ℝ} {p : ℝ≥0∞} :
mem_ℒp (truncation f A) p μ :=
mem_ℒp.of_bound hf.truncation (|A|) (eventually_of_forall (λ x, abs_truncation_le_bound _ _ _))
lemma _root_.measure_theory.ae_strongly_measurable.integrable_truncation [is_finite_measure μ]
(hf : ae_strongly_measurable f μ) {A : ℝ} :
integrable (truncation f A) μ :=
by { rw ← mem_ℒp_one_iff_integrable, exact hf.mem_ℒp_truncation }
lemma moment_truncation_eq_interval_integral (hf : ae_strongly_measurable f μ) {A : ℝ}
(hA : 0 ≤ A) {n : ℕ} (hn : n ≠ 0) :
∫ x, (truncation f A x) ^ n ∂μ = ∫ y in (-A)..A, y ^ n ∂(measure.map f μ) :=
begin
have M : measurable_set (set.Ioc (-A) A) := measurable_set_Ioc,
change ∫ x, (λ z, (indicator (set.Ioc (-A) A) id z) ^ n) (f x) ∂μ = _,
rw [← integral_map hf.ae_measurable, interval_integral.integral_of_le, ← integral_indicator M],
{ simp only [indicator, zero_pow' _ hn, id.def, ite_pow] },
{ linarith },
{ exact ((measurable_id.indicator M).pow_const n).ae_strongly_measurable }
end
lemma moment_truncation_eq_interval_integral_of_nonneg (hf : ae_strongly_measurable f μ) {A : ℝ}
{n : ℕ} (hn : n ≠ 0) (h'f : 0 ≤ f) :
∫ x, (truncation f A x) ^ n ∂μ = ∫ y in 0..A, y ^ n ∂(measure.map f μ) :=
begin
have M : measurable_set (set.Ioc 0 A) := measurable_set_Ioc,
have M' : measurable_set (set.Ioc A 0) := measurable_set_Ioc,
rw truncation_eq_of_nonneg h'f,
change ∫ x, (λ z, (indicator (set.Ioc 0 A) id z) ^ n) (f x) ∂μ = _,
rcases le_or_lt 0 A with hA | hA,
{ rw [← integral_map hf.ae_measurable, interval_integral.integral_of_le hA,
← integral_indicator M],
{ simp only [indicator, zero_pow' _ hn, id.def, ite_pow] },
{ exact ((measurable_id.indicator M).pow_const n).ae_strongly_measurable } },
{ rw [← integral_map hf.ae_measurable, interval_integral.integral_of_ge hA.le,
← integral_indicator M'],
{ simp only [set.Ioc_eq_empty_of_le hA.le, zero_pow' _ hn, set.indicator_empty, integral_zero,
zero_eq_neg],
apply integral_eq_zero_of_ae,
have : ∀ᵐ x ∂(measure.map f μ), (0 : ℝ) ≤ x :=
(ae_map_iff hf.ae_measurable measurable_set_Ici).2 (eventually_of_forall h'f),
filter_upwards [this] with x hx,
simp only [indicator, set.mem_Ioc, pi.zero_apply, ite_eq_right_iff, and_imp],
assume h'x h''x,
have : x = 0, by linarith,
simp [this, zero_pow' _ hn] },
{ exact ((measurable_id.indicator M).pow_const n).ae_strongly_measurable } }
end
lemma integral_truncation_eq_interval_integral (hf : ae_strongly_measurable f μ) {A : ℝ}
(hA : 0 ≤ A) :
∫ x, truncation f A x ∂μ = ∫ y in (-A)..A, y ∂(measure.map f μ) :=
by simpa using moment_truncation_eq_interval_integral hf hA one_ne_zero
lemma integral_truncation_eq_interval_integral_of_nonneg (hf : ae_strongly_measurable f μ) {A : ℝ}
(h'f : 0 ≤ f) :
∫ x, truncation f A x ∂μ = ∫ y in 0..A, y ∂(measure.map f μ) :=
by simpa using moment_truncation_eq_interval_integral_of_nonneg hf one_ne_zero h'f
lemma integral_truncation_le_integral_of_nonneg
(hf : integrable f μ) (h'f : 0 ≤ f) {A : ℝ} :
∫ x, truncation f A x ∂μ ≤ ∫ x, f x ∂μ :=
begin
apply integral_mono_of_nonneg (eventually_of_forall (λ x, _)) hf (eventually_of_forall (λ x, _)),
{ exact truncation_nonneg _ (h'f x) },
{ calc truncation f A x ≤ |truncation f A x| : le_abs_self _
... ≤ |f x| : abs_truncation_le_abs_self _ _ _
... = f x : abs_of_nonneg (h'f x) }
end
/-- If a function is integrable, then the integral of its truncated versions converges to the
integral of the whole function. -/
lemma tendsto_integral_truncation {f : α → ℝ} (hf : integrable f μ) :
tendsto (λ A, ∫ x, truncation f A x ∂μ) at_top (𝓝 (∫ x, f x ∂μ)) :=
begin
refine tendsto_integral_filter_of_dominated_convergence (λ x, abs (f x)) _ _ _ _,
{ exact eventually_of_forall (λ A, hf.ae_strongly_measurable.truncation) },
{ apply eventually_of_forall (λ A, _),
apply eventually_of_forall (λ x, _),
rw real.norm_eq_abs,
exact abs_truncation_le_abs_self _ _ _ },
{ apply hf.abs },
{ apply eventually_of_forall (λ x, _),
apply tendsto_const_nhds.congr' _,
filter_upwards [Ioi_mem_at_top (abs (f x))] with A hA,
exact (truncation_eq_self hA).symm },
end
lemma ident_distrib.truncation {β : Type*} [measurable_space β] {ν : measure β}
{f : α → ℝ} {g : β → ℝ} (h : ident_distrib f g μ ν) {A : ℝ} :
ident_distrib (truncation f A) (truncation g A) μ ν :=
h.comp (measurable_id.indicator measurable_set_Ioc)
end truncation
section strong_law_ae
variables {Ω : Type*} [measure_space Ω] [is_probability_measure (ℙ : measure Ω)]
section moment_estimates
lemma sum_prob_mem_Ioc_le
{X : Ω → ℝ} (hint : integrable X) (hnonneg : 0 ≤ X) {K : ℕ} {N : ℕ} (hKN : K ≤ N) :
∑ j in range K, ℙ {ω | X ω ∈ set.Ioc (j : ℝ) N} ≤ ennreal.of_real (𝔼[X] + 1) :=
begin
let ρ : measure ℝ := measure.map X ℙ,
haveI : is_probability_measure ρ := is_probability_measure_map hint.ae_measurable,
have A : ∑ j in range K, ∫ x in j..N, (1 : ℝ) ∂ρ ≤ 𝔼[X] + 1, from calc
∑ j in range K, ∫ x in j..N, (1 : ℝ) ∂ρ
= ∑ j in range K, ∑ i in Ico j N, ∫ x in i..(i+1 : ℕ), (1 : ℝ) ∂ρ :
begin
apply sum_congr rfl (λ j hj, _),
rw interval_integral.sum_integral_adjacent_intervals_Ico ((mem_range.1 hj).le.trans hKN),
assume k hk,
exact continuous_const.interval_integrable _ _,
end
... = ∑ i in range N, ∑ j in range (min (i+1) K), ∫ x in i..(i+1 : ℕ), (1 : ℝ) ∂ρ :
begin
simp_rw [sum_sigma'],
refine sum_bij' (λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ (λ a ha, rfl)
(λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ _ _,
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range, mem_Ico] at hij,
simp only [hij, nat.lt_succ_iff.2 hij.2.1, mem_sigma, mem_range, lt_min_iff, and_self] },
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range, lt_min_iff] at hij,
simp only [hij, nat.lt_succ_iff.1 hij.2.1, mem_sigma, mem_range, mem_Ico, and_self] },
{ rintros ⟨i, j⟩ hij, refl },
{ rintros ⟨i, j⟩ hij, refl },
end
... ≤ ∑ i in range N, (i + 1) * ∫ x in i..(i+1 : ℕ), (1 : ℝ) ∂ρ :
begin
apply sum_le_sum (λ i hi, _),
simp only [nat.cast_add, nat.cast_one, sum_const, card_range, nsmul_eq_mul, nat.cast_min],
refine mul_le_mul_of_nonneg_right (min_le_left _ _) _,
apply interval_integral.integral_nonneg,
{ simp only [le_add_iff_nonneg_right, zero_le_one] },
{ simp only [zero_le_one, implies_true_iff], }
end
... ≤ ∑ i in range N, ∫ x in i..(i+1 : ℕ), (x + 1) ∂ρ :
begin
apply sum_le_sum (λ i hi, _),
have I : (i : ℝ) ≤ (i + 1 : ℕ),
by simp only [nat.cast_add, nat.cast_one, le_add_iff_nonneg_right, zero_le_one],
simp_rw [interval_integral.integral_of_le I, ← integral_mul_left],
apply set_integral_mono_on,
{ exact continuous_const.integrable_on_Ioc },
{ exact (continuous_id.add continuous_const).integrable_on_Ioc },
{ exact measurable_set_Ioc },
{ assume x hx,
simp only [nat.cast_add, nat.cast_one, set.mem_Ioc] at hx,
simp [hx.1.le] }
end
... = ∫ x in 0..N, x + 1 ∂ρ :
begin
rw interval_integral.sum_integral_adjacent_intervals (λ k hk, _),
{ norm_cast },
{ exact (continuous_id.add continuous_const).interval_integrable _ _ }
end
... = ∫ x in 0..N, x ∂ρ + ∫ x in 0..N, 1 ∂ρ :
begin
rw interval_integral.integral_add,
{ exact continuous_id.interval_integrable _ _ },
{ exact continuous_const.interval_integrable _ _ },
end
... = 𝔼[truncation X N] + ∫ x in 0..N, 1 ∂ρ :
by rw integral_truncation_eq_interval_integral_of_nonneg hint.1 hnonneg
... ≤ 𝔼[X] + ∫ x in 0..N, 1 ∂ρ :
add_le_add_right (integral_truncation_le_integral_of_nonneg hint hnonneg) _
... ≤ 𝔼[X] + 1 :
begin
refine add_le_add le_rfl _,
rw interval_integral.integral_of_le (nat.cast_nonneg _),
simp only [integral_const, measure.restrict_apply', measurable_set_Ioc, set.univ_inter,
algebra.id.smul_eq_mul, mul_one],
rw ← ennreal.one_to_real,
exact ennreal.to_real_mono ennreal.one_ne_top prob_le_one,
end,
have B : ∀ a b, ℙ {ω | X ω ∈ set.Ioc a b} = ennreal.of_real (∫ x in set.Ioc a b, (1 : ℝ) ∂ρ),
{ assume a b,
rw [of_real_set_integral_one ρ _,
measure.map_apply_of_ae_measurable hint.ae_measurable measurable_set_Ioc],
refl },
calc ∑ j in range K, ℙ {ω | X ω ∈ set.Ioc (j : ℝ) N}
= ∑ j in range K, ennreal.of_real (∫ x in set.Ioc (j : ℝ) N, (1 : ℝ) ∂ρ) :
by simp_rw B
... = ennreal.of_real (∑ j in range K, ∫ x in set.Ioc (j : ℝ) N, (1 : ℝ) ∂ρ) :
begin
rw ennreal.of_real_sum_of_nonneg,
simp only [integral_const, algebra.id.smul_eq_mul, mul_one, ennreal.to_real_nonneg,
implies_true_iff],
end
... = ennreal.of_real (∑ j in range K, ∫ x in (j : ℝ)..N, (1 : ℝ) ∂ρ) :
begin
congr' 1,
refine sum_congr rfl (λ j hj, _),
rw interval_integral.integral_of_le (nat.cast_le.2 ((mem_range.1 hj).le.trans hKN)),
end
... ≤ ennreal.of_real (𝔼[X] + 1) : ennreal.of_real_le_of_real A
end
lemma tsum_prob_mem_Ioi_lt_top
{X : Ω → ℝ} (hint : integrable X) (hnonneg : 0 ≤ X) :
∑' (j : ℕ), ℙ {ω | X ω ∈ set.Ioi (j : ℝ)} < ∞ :=
begin
suffices : ∀ (K : ℕ), ∑ j in range K, ℙ {ω | X ω ∈ set.Ioi (j : ℝ)} ≤ ennreal.of_real (𝔼[X] + 1),
from (le_of_tendsto_of_tendsto (ennreal.tendsto_nat_tsum _) tendsto_const_nhds
(eventually_of_forall this)).trans_lt ennreal.of_real_lt_top,
assume K,
have A : tendsto (λ (N : ℕ), ∑ j in range K, ℙ {ω | X ω ∈ set.Ioc (j : ℝ) N})
at_top (𝓝 (∑ j in range K, ℙ {ω | X ω ∈ set.Ioi (j : ℝ)})),
{ refine tendsto_finset_sum _ (λ i hi, _),
have : {ω | X ω ∈ set.Ioi (i : ℝ)} = ⋃ (N : ℕ), {ω | X ω ∈ set.Ioc (i : ℝ) N},
{ apply set.subset.antisymm _ _,
{ assume ω hω,
obtain ⟨N, hN⟩ : ∃ (N : ℕ), X ω ≤ N := exists_nat_ge (X ω),
exact set.mem_Union.2 ⟨N, hω, hN⟩ },
{ simp only [set.mem_Ioc, set.mem_Ioi, set.Union_subset_iff, set.set_of_subset_set_of,
implies_true_iff] {contextual := tt} } },
rw this,
apply tendsto_measure_Union,
assume m n hmn x hx,
exact ⟨hx.1, hx.2.trans (nat.cast_le.2 hmn)⟩ },
apply le_of_tendsto_of_tendsto A tendsto_const_nhds,
filter_upwards [Ici_mem_at_top K] with N hN,
exact sum_prob_mem_Ioc_le hint hnonneg hN
end
lemma sum_variance_truncation_le
{X : Ω → ℝ} (hint : integrable X) (hnonneg : 0 ≤ X) (K : ℕ) :
∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * 𝔼[(truncation X j) ^ 2] ≤ 2 * 𝔼[X] :=
begin
set Y := λ (n : ℕ), truncation X n,
let ρ : measure ℝ := measure.map X ℙ,
have Y2 : ∀ n, 𝔼[Y n ^ 2] = ∫ x in 0..n, x ^ 2 ∂ρ,
{ assume n,
change 𝔼[λ x, (Y n x)^2] = _,
rw [moment_truncation_eq_interval_integral_of_nonneg hint.1 two_ne_zero hnonneg] },
calc ∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * 𝔼[Y j ^ 2]
= ∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * ∫ x in 0..j, x ^ 2 ∂ρ :
by simp_rw [Y2]
... = ∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * ∑ k in range j, ∫ x in k..(k+1 : ℕ), x ^ 2 ∂ρ :
begin
congr' 1 with j,
congr' 1,
rw interval_integral.sum_integral_adjacent_intervals,
{ norm_cast },
assume k hk,
exact (continuous_id.pow _).interval_integrable _ _,
end
... = ∑ k in range K, (∑ j in Ioo k K, ((j : ℝ) ^ 2) ⁻¹) * ∫ x in k..(k+1 : ℕ), x ^ 2 ∂ρ :
begin
simp_rw [mul_sum, sum_mul, sum_sigma'],
refine sum_bij' (λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ (λ a ha, rfl)
(λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ _ _,
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range, mem_filter] at hij,
simp [hij, mem_sigma, mem_range, and_self, hij.2.trans hij.1], },
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range, mem_Ioo] at hij,
simp only [hij, mem_sigma, mem_range, and_self], },
{ rintros ⟨i, j⟩ hij, refl },
{ rintros ⟨i, j⟩ hij, refl },
end
... ≤ ∑ k in range K, (2/ (k+1)) * ∫ x in k..(k+1 : ℕ), x ^ 2 ∂ρ :
begin
apply sum_le_sum (λ k hk, _),
refine mul_le_mul_of_nonneg_right (sum_Ioo_inv_sq_le _ _) _,
refine interval_integral.integral_nonneg_of_forall _ (λ u, sq_nonneg _),
simp only [nat.cast_add, nat.cast_one, le_add_iff_nonneg_right, zero_le_one],
end
... ≤ ∑ k in range K, ∫ x in k..(k+1 : ℕ), 2 * x ∂ρ :
begin
apply sum_le_sum (λ k hk, _),
have Ik : (k : ℝ) ≤ (k + 1 : ℕ), by simp,
rw [← interval_integral.integral_const_mul, interval_integral.integral_of_le Ik,
interval_integral.integral_of_le Ik],
refine set_integral_mono_on _ _ measurable_set_Ioc (λ x hx, _),
{ apply continuous.integrable_on_Ioc,
exact continuous_const.mul (continuous_pow 2) },
{ apply continuous.integrable_on_Ioc,
exact continuous_const.mul continuous_id' },
{ calc 2 / (↑k + 1) * x ^ 2 = (x / (k+1)) * (2 * x) : by ring_exp
... ≤ 1 * (2 * x) :
mul_le_mul_of_nonneg_right begin
apply_mod_cast (div_le_one _).2 hx.2,
simp only [nat.cast_add, nat.cast_one],
linarith only [show (0 : ℝ) ≤ k, from nat.cast_nonneg k],
end (mul_nonneg zero_le_two ((nat.cast_nonneg k).trans hx.1.le))
... = 2 * x : by rw one_mul }
end
... = 2 * ∫ x in (0 : ℝ)..K, x ∂ρ :
begin
rw interval_integral.sum_integral_adjacent_intervals (λ k hk, _),
swap, { exact (continuous_const.mul continuous_id').interval_integrable _ _ },
rw interval_integral.integral_const_mul,
norm_cast
end
... ≤ 2 * 𝔼[X] :
mul_le_mul_of_nonneg_left begin
rw ← integral_truncation_eq_interval_integral_of_nonneg hint.1 hnonneg,
exact integral_truncation_le_integral_of_nonneg hint hnonneg,
end zero_le_two
end
end moment_estimates
section strong_law_nonneg
/- This paragraph proves the strong law of large numbers (almost sure version, assuming only
pairwise independence) for nonnegative random variables, following Etemadi's proof. -/
variables (X : ℕ → Ω → ℝ) (hint : integrable (X 0))
(hindep : pairwise (λ i j, indep_fun (X i) (X j)))
(hident : ∀ i, ident_distrib (X i) (X 0))
(hnonneg : ∀ i ω, 0 ≤ X i ω)
include X hint hindep hident hnonneg
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers (with respect to
the truncated expectation) along the sequence `c^n`, for any `c > 1`, up to a given `ε > 0`.
This follows from a variance control. -/
lemma strong_law_aux1 {c : ℝ} (c_one : 1 < c) {ε : ℝ} (εpos : 0 < ε) :
∀ᵐ ω, ∀ᶠ (n : ℕ) in at_top,
|∑ i in range ⌊c^n⌋₊, truncation (X i) i ω - 𝔼[∑ i in range ⌊c^n⌋₊, truncation (X i) i]|
< ε * ⌊c^n⌋₊ :=
begin
/- Let `S n = ∑ i in range n, Y i` where `Y i = truncation (X i) i`. We should show that
`|S k - 𝔼[S k]| / k ≤ ε` along the sequence of powers of `c`. For this, we apply Borel-Cantelli:
it suffices to show that the converse probabilites are summable. From Chebyshev inequality, this
will follow from a variance control `∑' Var[S (c^i)] / (c^i)^2 < ∞`. This is checked in `I2` using
pairwise independence to expand the variance of the sum as the sum of the variances, and then
a straightforward but tedious computation (essentially boiling down to the fact that the sum of
`1/(c ^ i)^2` beyong a threshold `j` is comparable to `1/j^2`).
Note that we have written `c^i` in the above proof sketch, but rigorously one should put integer
parts everywhere, making things more painful. We write `u i = ⌊c^i⌋₊` for brevity. -/
have c_pos : 0 < c := zero_lt_one.trans c_one,
let ρ : measure ℝ := measure.map (X 0) ℙ,
have hX : ∀ i, ae_strongly_measurable (X i) ℙ :=
λ i, (hident i).symm.ae_strongly_measurable_snd hint.1,
have A : ∀ i, strongly_measurable (indicator (set.Ioc (-i : ℝ) i) id) :=
λ i, strongly_measurable_id.indicator measurable_set_Ioc,
set Y := λ (n : ℕ), truncation (X n) n with hY,
set S := λ n, ∑ i in range n, Y i with hS,
let u : ℕ → ℕ := λ n, ⌊c ^ n⌋₊,
have u_mono : monotone u := λ i j hij, nat.floor_mono (pow_le_pow c_one.le hij),
have I1 : ∀ K, ∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * Var[Y j] ≤ 2 * 𝔼[X 0],
{ assume K,
calc ∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * Var[Y j] ≤
∑ j in range K, ((j : ℝ) ^ 2) ⁻¹ * 𝔼[(truncation (X 0) j)^2] :
begin
apply sum_le_sum (λ j hj, _),
refine mul_le_mul_of_nonneg_left _ (inv_nonneg.2 (sq_nonneg _)),
rw (hident j).truncation.variance_eq,
exact variance_le_expectation_sq (hX 0).truncation,
end
... ≤ 2 * 𝔼[X 0] : sum_variance_truncation_le hint (hnonneg 0) K },
let C := (c ^ 5 * (c - 1) ⁻¹ ^ 3) * (2 * 𝔼[X 0]),
have I2 : ∀ N, ∑ i in range N, ((u i : ℝ) ^ 2) ⁻¹ * Var[S (u i)] ≤ C,
{ assume N,
calc
∑ i in range N, ((u i : ℝ) ^ 2) ⁻¹ * Var[S (u i)]
= ∑ i in range N, ((u i : ℝ) ^ 2) ⁻¹ * (∑ j in range (u i), Var[Y j]) :
begin
congr' 1 with i,
congr' 1,
rw [hS, indep_fun.variance_sum],
{ assume j hj,
exact (hident j).ae_strongly_measurable_fst.mem_ℒp_truncation },
{ assume k hk l hl hkl,
exact (hindep hkl).comp (A k).measurable (A l).measurable }
end
... = ∑ j in range (u (N - 1)),
(∑ i in (range N).filter (λ i, j < u i), ((u i : ℝ) ^ 2) ⁻¹) * Var[Y j] :
begin
simp_rw [mul_sum, sum_mul, sum_sigma'],
refine sum_bij' (λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ (λ a ha, rfl)
(λ (p : (Σ (i : ℕ), ℕ)) hp, (⟨p.2, p.1⟩ : (Σ (i : ℕ), ℕ))) _ _ _,
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range] at hij,
simp only [hij.1, hij.2, mem_sigma, mem_range, mem_filter, and_true],
exact hij.2.trans_le (u_mono (nat.le_pred_of_lt hij.1)) },
{ rintros ⟨i, j⟩ hij,
simp only [mem_sigma, mem_range, mem_filter] at hij,
simp only [hij.2.1, hij.2.2, mem_sigma, mem_range, and_self] },
{ rintros ⟨i, j⟩ hij, refl },
{ rintros ⟨i, j⟩ hij, refl },
end
... ≤ ∑ j in range (u (N - 1)), (c ^ 5 * (c - 1) ⁻¹ ^ 3 / j ^ 2) * Var[Y j] :
begin
apply sum_le_sum (λ j hj, _),
rcases @eq_zero_or_pos _ _ j with rfl|hj,
{ simp only [Y, nat.cast_zero, zero_pow', ne.def, bit0_eq_zero, nat.one_ne_zero,
not_false_iff, div_zero, zero_mul],
simp only [nat.cast_zero, truncation_zero, variance_zero, mul_zero] },
apply mul_le_mul_of_nonneg_right _ (variance_nonneg _ _),
convert sum_div_nat_floor_pow_sq_le_div_sq N (nat.cast_pos.2 hj) c_one,
{ simp only [nat.cast_lt] },
{ simp only [one_div] }
end
... = (c ^ 5 * (c - 1) ⁻¹ ^ 3) * ∑ j in range (u (N - 1)), ((j : ℝ) ^ 2) ⁻¹ * Var[Y j] :
by { simp_rw [mul_sum, div_eq_mul_inv], ring_nf }
... ≤ (c ^ 5 * (c - 1) ⁻¹ ^ 3) * (2 * 𝔼[X 0]) :
begin
apply mul_le_mul_of_nonneg_left (I1 _),
apply mul_nonneg (pow_nonneg c_pos.le _),
exact pow_nonneg (inv_nonneg.2 (sub_nonneg.2 c_one.le)) _
end },
have I3 : ∀ N, ∑ i in range N,
ℙ {ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]|} ≤ ennreal.of_real (ε ⁻¹ ^ 2 * C),
{ assume N,
calc ∑ i in range N, ℙ {ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]|}
≤ ∑ i in range N, ennreal.of_real (Var[S (u i)] / (u i * ε) ^ 2) :
begin
refine sum_le_sum (λ i hi, _),
apply meas_ge_le_variance_div_sq,
{ exact mem_ℒp_finset_sum' _
(λ j hj, (hident j).ae_strongly_measurable_fst.mem_ℒp_truncation) },
{ apply mul_pos (nat.cast_pos.2 _) εpos,
refine zero_lt_one.trans_le _,
apply nat.le_floor,
rw nat.cast_one,
apply one_le_pow_of_one_le c_one.le }
end
... = ennreal.of_real (∑ i in range N, Var[S (u i)] / (u i * ε) ^ 2) :
begin
rw ennreal.of_real_sum_of_nonneg (λ i hi, _),
exact div_nonneg (variance_nonneg _ _) (sq_nonneg _),
end
... ≤ ennreal.of_real (ε ⁻¹ ^ 2 * C) :
begin
apply ennreal.of_real_le_of_real,
simp_rw [div_eq_inv_mul, ← inv_pow, mul_inv, mul_comm _ (ε⁻¹), mul_pow, mul_assoc,
← mul_sum],
refine mul_le_mul_of_nonneg_left _ (sq_nonneg _),
simp_rw [inv_pow],
exact I2 N
end },
have I4 : ∑' i, ℙ {ω | (u i * ε : ℝ) ≤ |S (u i) ω - 𝔼[S (u i)]|} < ∞ :=
(le_of_tendsto_of_tendsto' (ennreal.tendsto_nat_tsum _) tendsto_const_nhds I3).trans_lt
ennreal.of_real_lt_top,
filter_upwards [ae_eventually_not_mem I4.ne] with ω hω,
simp_rw [not_le, mul_comm, S, sum_apply] at hω,
exact hω,
end
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers
(with respect to the truncated expectation) along the sequence
`c^n`, for any `c > 1`. This follows from `strong_law_aux1` by varying `ε`. -/
lemma strong_law_aux2 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω, (λ (n : ℕ), ∑ i in range ⌊c^n⌋₊, truncation (X i) i ω
- 𝔼[∑ i in range ⌊c^n⌋₊, truncation (X i) i]) =o[at_top] (λ (n : ℕ), (⌊c^n⌋₊ : ℝ)) :=
begin
obtain ⟨v, -, v_pos, v_lim⟩ :
∃ (v : ℕ → ℝ), strict_anti v ∧ (∀ (n : ℕ), 0 < v n) ∧ tendsto v at_top (𝓝 0) :=
exists_seq_strict_anti_tendsto (0 : ℝ),
have := λ i, strong_law_aux1 X hint hindep hident hnonneg c_one (v_pos i),
filter_upwards [ae_all_iff.2 this] with ω hω,
apply asymptotics.is_o_iff.2 (λ ε εpos, _),
obtain ⟨i, hi⟩ : ∃ i, v i < ε := ((tendsto_order.1 v_lim).2 ε εpos).exists,
filter_upwards [hω i] with n hn,
simp only [real.norm_eq_abs, lattice_ordered_comm_group.abs_abs, nat.abs_cast],
exact hn.le.trans (mul_le_mul_of_nonneg_right hi.le (nat.cast_nonneg _)),
end
omit hindep hnonneg
/-- The expectation of the truncated version of `Xᵢ` behaves asymptotically like the whole
expectation. This follows from convergence and Cesaro averaging. -/
lemma strong_law_aux3 :
(λ n, 𝔼[∑ i in range n, truncation (X i) i] - n * 𝔼[X 0]) =o[at_top] (coe : ℕ → ℝ) :=
begin
have A : tendsto (λ i, 𝔼[truncation (X i) i]) at_top (𝓝 (𝔼[X 0])),
{ convert (tendsto_integral_truncation hint).comp tendsto_coe_nat_at_top_at_top,
ext i,
exact (hident i).truncation.integral_eq },
convert asymptotics.is_o_sum_range_of_tendsto_zero (tendsto_sub_nhds_zero_iff.2 A),
ext1 n,
simp only [sum_sub_distrib, sum_const, card_range, nsmul_eq_mul, sum_apply, sub_left_inj],
rw integral_finset_sum _ (λ i hi, _),
exact ((hident i).symm.integrable_snd hint).1.integrable_truncation,
end
include hindep hnonneg
/- The truncation of `Xᵢ` up to `i` satisfies the strong law of large numbers
(with respect to the original expectation) along the sequence
`c^n`, for any `c > 1`. This follows from the version from the truncated expectation, and the
fact that the truncated and the original expectations have the same asymptotic behavior. -/
lemma strong_law_aux4 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω, (λ (n : ℕ), ∑ i in range ⌊c^n⌋₊, truncation (X i) i ω - ⌊c^n⌋₊ * 𝔼[X 0]) =o[at_top]
(λ (n : ℕ), (⌊c^n⌋₊ : ℝ)) :=
begin
filter_upwards [strong_law_aux2 X hint hindep hident hnonneg c_one] with ω hω,
have A : tendsto (λ (n : ℕ), ⌊c ^ n⌋₊) at_top at_top :=
tendsto_nat_floor_at_top.comp (tendsto_pow_at_top_at_top_of_one_lt c_one),
convert hω.add ((strong_law_aux3 X hint hident).comp_tendsto A),
ext1 n,
simp,
end
omit hindep
/-- The truncated and non-truncated versions of `Xᵢ` have the same asymptotic behavior, as they
almost surely coincide at all but finitely many steps. This follows from a probability computation
and Borel-Cantelli. -/
lemma strong_law_aux5 :
∀ᵐ ω, (λ (n : ℕ), ∑ i in range n, truncation (X i) i ω - ∑ i in range n, X i ω) =o[at_top]
(λ (n : ℕ), (n : ℝ)) :=
begin
have A : ∑' (j : ℕ), ℙ {ω | X j ω ∈ set.Ioi (j : ℝ)} < ∞,
{ convert tsum_prob_mem_Ioi_lt_top hint (hnonneg 0),
ext1 j,
exact (hident j).measure_mem_eq measurable_set_Ioi },
have B : ∀ᵐ ω, tendsto (λ (n : ℕ), truncation (X n) n ω - X n ω) at_top (𝓝 0),
{ filter_upwards [ae_eventually_not_mem A.ne] with ω hω,
apply tendsto_const_nhds.congr' _,
filter_upwards [hω, Ioi_mem_at_top 0] with n hn npos,
simp only [truncation, indicator, set.mem_Ioc, id.def, function.comp_app],
split_ifs,
{ exact (sub_self _).symm },
{ have : - (n : ℝ) < X n ω,
{ apply lt_of_lt_of_le _ (hnonneg n ω),
simpa only [right.neg_neg_iff, nat.cast_pos] using npos },
simp only [this, true_and, not_le] at h,
exact (hn h).elim } },
filter_upwards [B] with ω hω,
convert is_o_sum_range_of_tendsto_zero hω,
ext n,
rw sum_sub_distrib,
end
include hindep
/- `Xᵢ` satisfies the strong law of large numbers along the sequence
`c^n`, for any `c > 1`. This follows from the version for the truncated `Xᵢ`, and the fact that
`Xᵢ` and its truncated version have the same asymptotic behavior. -/
lemma strong_law_aux6 {c : ℝ} (c_one : 1 < c) :
∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range ⌊c^n⌋₊, X i ω) / ⌊c^n⌋₊) at_top (𝓝 (𝔼[X 0])) :=
begin
have H : ∀ (n : ℕ), (0 : ℝ) < ⌊c ^ n⌋₊,
{ assume n,
refine zero_lt_one.trans_le _,
simp only [nat.one_le_cast, nat.one_le_floor_iff, one_le_pow_of_one_le c_one.le n] },
filter_upwards [strong_law_aux4 X hint hindep hident hnonneg c_one,
strong_law_aux5 X hint hident hnonneg] with ω hω h'ω,
rw [← tendsto_sub_nhds_zero_iff, ← asymptotics.is_o_one_iff ℝ],
have L : (λ n : ℕ, ∑ i in range ⌊c^n⌋₊, X i ω - ⌊c^n⌋₊ * 𝔼[X 0]) =o[at_top] (λ n, (⌊c^n⌋₊ : ℝ)),
{ have A : tendsto (λ (n : ℕ), ⌊c ^ n⌋₊) at_top at_top :=
tendsto_nat_floor_at_top.comp (tendsto_pow_at_top_at_top_of_one_lt c_one),
convert hω.sub (h'ω.comp_tendsto A),
ext1 n,
simp only [sub_sub_sub_cancel_left] },
convert L.mul_is_O (is_O_refl (λ (n : ℕ), (⌊c ^ n⌋₊ : ℝ) ⁻¹) at_top);
{ ext1 n,
field_simp [(H n).ne'] },
end
/-- `Xᵢ` satisfies the strong law of large numbers along all integers. This follows from the
corresponding fact along the sequences `c^n`, and the fact that any integer can be sandwiched
between `c^n` and `c^(n+1)` with comparably small error if `c` is close enough to `1`
(which is formalized in `tendsto_div_of_monotone_of_tendsto_div_floor_pow`). -/
lemma strong_law_aux7 :
∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range n, X i ω) / n) at_top (𝓝 (𝔼[X 0])) :=
begin
obtain ⟨c, -, cone, clim⟩ :
∃ (c : ℕ → ℝ), strict_anti c ∧ (∀ (n : ℕ), 1 < c n) ∧ tendsto c at_top (𝓝 1) :=
exists_seq_strict_anti_tendsto (1 : ℝ),
have : ∀ k, ∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range ⌊c k ^ n⌋₊, X i ω) / ⌊c k ^ n⌋₊)
at_top (𝓝 (𝔼[X 0])) := λ k, strong_law_aux6 X hint hindep hident hnonneg (cone k),
filter_upwards [ae_all_iff.2 this] with ω hω,
apply tendsto_div_of_monotone_of_tendsto_div_floor_pow _ _ _ c cone clim _,
{ assume m n hmn,
exact sum_le_sum_of_subset_of_nonneg (range_mono hmn) (λ i hi h'i, hnonneg i ω) },
{ exact hω }
end
end strong_law_nonneg
/-- *Strong law of large numbers*, almost sure version: if `X n` is a sequence of independent
identically distributed integrable real-valued random variables, then `∑ i in range n, X i / n`
converges almost surely to `𝔼[X 0]`. We give here the strong version, due to Etemadi, that only
requires pairwise independence. -/
theorem strong_law_ae
(X : ℕ → Ω → ℝ) (hint : integrable (X 0))
(hindep : pairwise (λ i j, indep_fun (X i) (X j)))
(hident : ∀ i, ident_distrib (X i) (X 0)) :
∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range n, X i ω) / n) at_top (𝓝 (𝔼[X 0])) :=
begin
let pos : ℝ → ℝ := (λ x, max x 0),
let neg : ℝ → ℝ := (λ x, max (-x) 0),
have posm : measurable pos := measurable_id'.max measurable_const,
have negm : measurable neg := measurable_id'.neg.max measurable_const,
have A : ∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range n, (pos ∘ (X i)) ω) / n)
at_top (𝓝 (𝔼[pos ∘ (X 0)])) :=
strong_law_aux7 _ hint.pos_part (λ i j hij, (hindep hij).comp posm posm)
(λ i, (hident i).comp posm) (λ i ω, le_max_right _ _),
have B : ∀ᵐ ω, tendsto (λ (n : ℕ), (∑ i in range n, (neg ∘ (X i)) ω) / n)
at_top (𝓝 (𝔼[neg ∘ (X 0)])) :=
strong_law_aux7 _ hint.neg_part (λ i j hij, (hindep hij).comp negm negm)
(λ i, (hident i).comp negm) (λ i ω, le_max_right _ _),
filter_upwards [A, B] with ω hωpos hωneg,
convert hωpos.sub hωneg,
{ simp only [← sub_div, ← sum_sub_distrib, max_zero_sub_max_neg_zero_eq_self] },
{ simp only [←integral_sub hint.pos_part hint.neg_part, max_zero_sub_max_neg_zero_eq_self] }
end
end strong_law_ae
section strong_law_Lp
variables {Ω : Type*} [measure_space Ω] [is_probability_measure (ℙ : measure Ω)]
/-- *Strong law of large numbers*, Lᵖ version: if `X n` is a sequence of independent
identically distributed real-valued random variables in Lᵖ, then `∑ i in range n, X i / n`
converges in Lᵖ to `𝔼[X 0]`. -/
theorem strong_law_Lp
{p : ℝ≥0∞} (hp : 1 ≤ p) (hp' : p ≠ ∞)
(X : ℕ → Ω → ℝ) (hℒp : mem_ℒp (X 0) p)
(hindep : pairwise (λ i j, indep_fun (X i) (X j)))
(hident : ∀ i, ident_distrib (X i) (X 0)) :
tendsto (λ n, snorm (λ ω, (∑ i in range n, X i ω) / n - 𝔼[X 0]) p ℙ) at_top (𝓝 0) :=
begin
have hmeas : ∀ i, ae_strongly_measurable (X i) ℙ :=
λ i, (hident i).ae_strongly_measurable_iff.2 hℒp.1,
have hint : integrable (X 0) ℙ := hℒp.integrable hp,
have havg : ∀ n, ae_strongly_measurable (λ ω, (∑ i in range n, X i ω) / n) ℙ,
{ intro n,
simp_rw div_eq_mul_inv,
exact ae_strongly_measurable.mul_const (ae_strongly_measurable_sum _ (λ i _, hmeas i)) _ },
refine tendsto_Lp_of_tendsto_in_measure _ hp hp' havg (mem_ℒp_const _) _
(tendsto_in_measure_of_tendsto_ae havg (strong_law_ae _ hint hindep hident)),
rw (_ : (λ n ω, (∑ i in range n, X i ω) / ↑n) = λ n, (∑ i in range n, X i) / ↑n),
{ exact (uniform_integrable_average hp $
mem_ℒp.uniform_integrable_of_ident_distrib hp hp' hℒp hident).2.1 },
{ ext n ω,
simp only [pi.coe_nat, pi.div_apply, sum_apply] }
end
end strong_law_Lp
end probability_theory
|
/- Copyright (c) 2020 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Orlando Cau
-/
import linear_algebra.basic linear_algebra.finite_dimensional
import algebra.module
/-!
We fix a notation composition of linear_map.
-/
notation f ` ⊚ `:80 g:80 := linear_map.comp f g
universe variables u v w
open linear_map
/-- A representation of a group `G` on an `R`-module `M` is a group homomorphism from `G` to
`GL(M)`.
-/
def group_representation (G R M : Type*) [group G] [ring R] [add_comm_group M] [module R M] :
Type* := G →* M →ₗ[R] M
variables {G : Type u} [group G]
{R : Type v} [ring R]
{M : Type w}[add_comm_group M] [module R M]
instance : has_coe_to_fun (group_representation G R M) := ⟨_, λ ρ , ρ.to_fun⟩
variables (ρ : group_representation G R M)
@[simp]lemma map_comp (s t : G) : ρ (s * t) = ρ s ⊚ ρ t := ρ.map_mul _ _
def gr.to_equiv' (g : G) : M ≃ₗ[R] M := { to_fun := ρ g,
add := (ρ g).map_add ,
smul := (ρ g).map_smul,
inv_fun := (ρ g⁻¹ ),
left_inv := begin intro, change (ρ g⁻¹ ⊚ ρ g) x = _, erw ← map_comp, rw inv_mul_self, erw ρ.map_one, exact rfl end,
right_inv := begin intro, change (ρ g ⊚ ρ g⁻¹ ) x = _, erw ← map_comp, rw mul_inv_self, erw ρ.map_one, exact rfl end, }
lemma gr.to_equiv : G →* M ≃ₗ[R] M := { to_fun := gr.to_equiv' ρ ,
map_one' := begin
unfold gr.to_equiv', congr,erw ρ.map_one, exact rfl, rw one_inv, rw ρ.map_one, exact rfl,
end,
map_mul' := begin
intros, unfold gr.to_equiv', congr, rw map_comp, exact rfl,
rw mul_inv_rev, rw map_comp, exact rfl,
end }
@[simp] lemma rho_symm_apply (x : M)(g : G) : ρ g ((gr.to_equiv ρ g).inv_fun x) = x := begin
dunfold gr.to_equiv, change (ρ g ⊚ ρ g⁻¹) x = x, rw ← map_comp, rw mul_inv_self, rw ρ.map_one, exact rfl,
end
@[simp] lemma symm_eq_inv (ρ : group_representation G R M) (g : G) : ρ g⁻¹ = (gr.to_equiv ρ g).symm :=
begin
ext, conv_lhs{
erw ← rho_symm_apply ρ x g,
},
change (ρ g⁻¹ * ρ g) _ = _,
erw ← ρ.map_mul, rw inv_mul_self, rw ρ.map_one, exact rfl,
end
|
module HWfuncapp
using FastGaussQuadrature # to get chebyshevnodes
# you dont' have to use PyPlot. I did much of it in PyPlot, hence
# you will see me often qualify code with `PyPlot.plot` or so.
# using PyPlot
import ApproXD: getBasis, BSpline
using Distributions
using BasisMatrices
using LinearAlgebra, SpecialFunctions
using ApproxFun
using Plots
export q1, q2, q3, q7
ChebyT(x,deg) = cos(acos(x)*deg)
unitmap(x,lb,ub) = 2 .* (x .- lb) ./ (ub .- lb) .- 1 #[a,b] -> [-1,1]
abmap(x,lb,ub) = 0.5 .* (ub .+ lb) .+ 0.5 .* (ub .- lb) .* x # [-1,1] -> [a,b]
function chebpol(x, n)
Φ = zeros(size(x,1),n) # chebyshev polynomials basis
for i in 1:size(x,1)
for j in 1:n
Φ[i,j] = ChebyT(x[i],j-1)
# Φ[i,j] = abmap(Φ[i,j], lb, ub)
end
end
return Φ
end
function q1(n=15)
lb = -3
ub = 3
x = range(lb, stop = ub ,length = n)
# generate Chebyshev nodes
S, y = gausschebyshev(n)
# Scale Chebyshev nodes on -3 to 3 domain
ϕ = abmap(S, lb, ub) # Chebyshev nodes on -3, 3
# define function
f(x) = x .+ 2x.^2 - exp.(-x)
# Evaluate function at chebyshev nodes
Y = f(ϕ)
# Build Function grid
Φ = chebpol(S, n)
# Estimate parameters of approximation
c = Φ\Y
# Evaluate approximation
Yhat = Φ*c
# Test accuracy on larger sample
n_new = range(lb, stop = ub ,length = 100)
# Evaluate function
Y_new = f(n_new)
# Build Polynomial grid
Φ_new = chebpol(unitmap(n_new,lb,ub), n)
# Evaluate using fitted coefficients
Yhat_new = Φ_new*c
# Plot
p = Any[]
push!(p,Plots.plot(n_new, [Y_new, Yhat_new], label=["True value" "Approximation"], marker=([:none :diamond])))
err = Y_new .- Yhat_new
push!(p, Plots.plot(n_new, err, label="Approximation error"))
Plots.plot(p...)
# without using PyPlot, just erase the `PyPlot.` part
Plots.savefig(Plots.plot(p...), joinpath(dirname(@__FILE__),"..","q1.png"))
return Dict("error"=>maximum(abs,err))
end
function q2(b=4)
@assert b > 0
n = 15
# use ApproxFun.jl to do the same:
deg = n - 1
ub = b
lb = -b
S = Chebyshev(lb..ub)
p = range(lb,stop=ub,length=n) # a non-default grid
# define function
f(x) = x .+ 2x.^2 - exp.(-x)
# evaluate function
v = f(p) # values at the non-default grid
V = Array{Float64}(undef,n,deg + 1) # Create a Vandermonde matrix by evaluating the basis at the grid
for k = 1:deg+1
V[:,k] = Fun(S,[zeros(k-1);1]).(p) # Evaluate Chebyshev basis at p
# [zeros(k-1);1] is the identity matrix, all observations have same k degree Chebyshev basis function, evaluated at a different point
end
V
g = Fun(S,V\v);
@show g(1.1)
@show f(1.1)
n_new = range(lb, stop = ub, length = 100)
p = Any[]
push!(p,Plots.plot(n_new, [f(n_new), g.(n_new)], label=["True value" "Approximation"], marker=([:none :diamond])))
err = f(n_new) .- g.(n_new)
push!(p, Plots.plot(n_new, err, label="Approximation error"))
fig = Plots.plot(p...)
Plots.savefig(fig,joinpath(dirname(@__FILE__),"..","q2.png"))
end
function q3(b=10)
x = Fun(identity,-b..b)
f = sin(x^2)
g = cos(x)
h = f - g
r = roots(h)
Plots.plot(h, label="h(x)")
Plots.scatter!(r,h.(r), label="Roots")
Plots.savefig(joinpath(dirname(@__FILE__),"..","q3.png"))
# xbis = Fun(identity,-b..0)
g = cumsum(h) # indefinite integral
g = g + h(-b) # definite integral with constant of integration
integral = norm(g(0) - g(-b)) # definite integral from -b to 0
# p is your plot
return (integral)
end
# optinal
function q4()
return fig
end
# I found having those useful for q5
mutable struct ChebyType
f::Function # fuction to approximate
nodes::Union{Vector,LinRange} # evaluation points
basis::Matrix # basis evaluated at nodes
coefs::Vector # estimated coefficients
deg::Int # degree of chebypolynomial
lb::Float64 # bounds
ub::Float64
# constructor
function ChebyType(_nodes::Union{Vector,LinRange},_deg,_lb,_ub,_f::Function)
n = length(_nodes)
y = _f(_nodes)
_basis = Float64[ChebyT(unitmap(_nodes[i],_lb,_ub),j) for i=1:n,j=0:_deg]
_coefs = _basis \ y # type `?\` to find out more about the backslash operator. depending the args given, it performs a different operation
# create a ChebyComparer with those values
new(_f,_nodes,_basis,_coefs,_deg,_lb,_ub)
end
end
# function to predict points using info stored in ChebyType
function predict(Ch::ChebyType,x_new)
true_new = Ch.f(x_new)
basis_new = Float64[ChebyT(unitmap(x_new[i],Ch.lb,Ch.ub),j) for i=1:length(x_new),j=0:Ch.deg]
basis_nodes = Float64[ChebyT(unitmap(Ch.nodes[i],Ch.lb,Ch.ub),j) for i=1:length(Ch.nodes),j=0:Ch.deg]
preds = basis_new * Ch.coefs
preds_nodes = basis_nodes * Ch.coefs
return Dict("x"=> x_new,"truth"=>true_new, "preds"=>preds, "preds_nodes" => preds_nodes)
end
function q5(deg=(5,9,15),lb=-1.0,ub=1.0)
runge(x) = 1.0 ./ (1 .+ 25 .* x.^2)
PyPlot.savefig(joinpath(dirname(@__FILE__),"..","q5.png"))
end
function q6()
# compare 2 knot vectors with runge's function
PyPlot.savefig(joinpath(dirname(@__FILE__),"..","q6.png"))
end
function q7(n=13)
f(x) = abs.(x).^0.5
# Regular grid
bs = BSpline(13,3,-1,1) #13 knots, degree 3 in [0,1], no multiplicity
# 3 knots grid
multiknots = vcat(range(-1,stop = -0.1,length = 5),0,0,0, range(0.1,stop = 1,length =5))
bs2 = BSpline(multiknots, 3)
# Evaluate function of interest
x = range(-1,stop =1.0, length = 65)
Y = f(x)
# Evaluate basis on grid
B = Array(getBasis(collect(x),bs))
B2 = Array(getBasis(collect(x),bs2))
# Solve for parameters
c = B\Y
c2 = B2\Y
# Compute fitted values
Yhat = B*c
Yhatbis = B2*c2
# Compute fitting errors
err = Y .- Yhat
errbis = Y .- Yhatbis
# Plot
p = Any[]
push!(p,Plots.plot(x, Y, title = "True function"))
push!(p,Plots.plot(x, [Yhat, Yhatbis], label = ["uniform" "multiplicity"], title="Approximations"))
push!(p,Plots.plot(x, [err, errbis], label = ["uniform" "multiplicity"], title="Errors"))
return Plots.plot(p...)
Plots.savefig(joinpath(dirname(@__FILE__),"..","q7.png"))
end
# function to run all questions
function runall()
@info("running all questions of HW-funcapprox:")
q1(15)
q2(3)
q3(10)
q4()
q5()
q6()
q7()
end
end # module
|
\documentclass{llncs}
\usepackage{makeidx} % allows for indexgeneration
\usepackage{url}
\urlstyle{same}
%\usepackage{booktabs}
%\usepackage{citesort}
%\usepackage{graphicx} % For includegraphics
%\usepackage{array}
\begin{document}
\title{A Tool for Optimizing Runtime Parameters of Open MPI}
\author{
Mohamad Chaarawi~\inst{1,2},
Jeffrey M.\ Squyres~\inst{2},
Edgar Gabriel~\inst{1},
Saber Feki~\inst{1}
}
\institute{
Parallel Software Technologies Laboratory, \\
Department of Computer Science, University of Houston,\\
\email{\{mschaara, gabriel, sfeki\}@cs.uh.edu} \\
~\\
\and
Cisco Systems, San Jose, CA USA \\
\email{[email protected]} \\
}
\maketitle
\begin{abstract}
\input{abstract}
\end{abstract}
\section{Introduction}
\label{sec:intro}
\input{intro}
\section{Concept}
\label{sec:concept}
\input{concept}
\section{Implementation}
\label{sec:impl}
\input{implementation}
\section{Performance Evaluation}
\label{sec:eval}
\input{eval}
\section{Summary}
\label{sec:summary}
\input{summary}
~\\
\noindent {\fontsize{10}{10} {\bf Acknowledgments.} This research was funded in part by a gift from the Silicon Valley Community Foundation, on behalf of the Cisco Collaborative Research Initiative of Cisco Systems.
%\begin{thebibliography}
\bibliographystyle{plain}
\bibliography{paper}
%\end{thebibliography}
\noindent
\end{document}
|
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import Callback
###################################################################
### Callback method for reducing learning rate during training ###
###################################################################
class AdvancedLearnignRateScheduler(Callback):
def __init__(self, monitor='val_loss', patience=0, verbose=0, mode='auto', decayRatio=0.1, warmup_batches=-1, init_lr=0.001):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.wait = 0
self.decayRatio = decayRatio
self.warmup_batches = warmup_batches
self.batch_count = 0
self.init_lr = init_lr
if mode not in ['auto', 'min', 'max']:
# warnings.warn('Mode %s is unknown, '
# 'fallback to auto mode.'
# % (self.mode), RuntimeWarning)
print('Mode %s is unknown, '
'fallback to auto mode.'
% (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
current_lr = K.get_value(self.model.optimizer.lr)
print("\nLearning rate:", current_lr)
if current is None:
# warnings.warn('AdvancedLearnignRateScheduler'
# ' requires %s available!' %
# (self.monitor), RuntimeWarning)
print('AdvancedLearnignRateScheduler'
' requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate' % (epoch))
assert hasattr(self.model.optimizer, 'lr'), \
'Optimizer must have a "lr" attribute.'
current_lr = K.get_value(self.model.optimizer.lr)
new_lr = current_lr * self.decayRatio
self.init_lr = self.init_lr * self.decayRatio
K.set_value(self.model.optimizer.lr, new_lr)
self.wait = 0
self.wait += 1
def on_batch_begin(self, batch, logs=None):
if self.batch_count <= self.warmup_batches:
lr = self.batch_count*self.init_lr/self.warmup_batches
K.set_value(self.model.optimizer.lr, lr)
# if self.verbose > 0:
# print('\nBatch %05d: WarmUpLearningRateScheduler setting learning '
# 'rate to %s.' % (self.batch_count + 1, lr))
def on_batch_end(self, batch, logs=None):
self.batch_count = self.batch_count + 1
class AdvancedLearnignRateScheduler_WeightUpdate(Callback):
def __init__(self, weight_path, monitor='val_loss', patience=0, verbose=0, mode='auto', decayRatio=0.1, warmup_batches=-1, init_lr=None):
super(Callback, self).__init__()
self.weight_path = weight_path
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.wait = 0
self.decayRatio = decayRatio
self.warmup_batches = warmup_batches
self.batch_count = 0
self.init_lr = init_lr
if mode not in ['auto', 'min', 'max']:
# warnings.warn('Mode %s is unknown, '
# 'fallback to auto mode.'
# % (self.mode), RuntimeWarning)
print('Mode %s is unknown, '
'fallback to auto mode.'
% (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
current_lr = K.get_value(self.model.optimizer.lr)
print("\nLearning rate:", current_lr)
if current is None:
# warnings.warn('AdvancedLearnignRateScheduler'
# ' requires %s available!' %
# (self.monitor), RuntimeWarning)
print('AdvancedLearnignRateScheduler'
' requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate' % (epoch))
assert hasattr(self.model.optimizer, 'lr'), \
'Optimizer must have a "lr" attribute.'
current_lr = K.get_value(self.model.optimizer.lr)
new_lr = current_lr * self.decayRatio
if self.init_lr is not None:
self.init_lr = self.init_lr * self.decayRatio
K.set_value(self.model.optimizer.lr, new_lr)
self.wait = 0
self.model.load_weights(self.weight_path)
print('best model weight loaded')
self.wait += 1
def on_batch_begin(self, batch, logs=None):
if self.batch_count <= self.warmup_batches:
if self.init_lr is not None:
lr = self.batch_count * self.init_lr / self.warmup_batches
K.set_value(self.model.optimizer.lr, lr)
# if self.verbose > 0:
# print('\nBatch %05d: WarmUpLearningRateScheduler setting learning '
# 'rate to %s.' % (self.batch_count + 1, lr))
def on_batch_end(self, batch, logs=None):
self.batch_count = self.batch_count + 1 |
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.Common.FOL.Existential.Syntax where
-- We add 3 to the fixities of the Agda standard library 0.8.1 (see
-- Relation.Binary.Core).
infix 7 _≡_
postulate
D : Set
_≡_ : D → D → Set
refl : ∀ {d} → d ≡ d
d : D
module ∃₁ where
-- We add 3 to the fixities of the Agda standard library 0.8.1 (see
-- Data/Product.agda, Data/Sum.agda and Relation/Nullary/Core.agda).
infixr 7 _,_
infix 5 ∃
data ∃ (P : D → Set) : Set where
_,_ : (x : D) → P x → ∃ P
syntax ∃ (λ x → e) = ∃[ x ] e
t₁ : ∃ λ x → x ≡ x
t₁ = d , refl
t₂ : ∃[ x ] x ≡ x
t₂ = d , refl
t₃ : ∃ λ x → ∃ λ y → x ≡ y
t₃ = d , d , refl
t₄ : ∃[ x ] ∃[ y ] x ≡ y
t₄ = d , d , refl
module ∃₂ where
infixr 7 _,_,_
data ∃₂ (P : D → D → Set) : Set where
_,_,_ : (x y : D) → P x y → ∃₂ P
-- Agda issue: 536
-- syntax ∃₂ (λ x y → e) = ∃₂[ x , y ] e
t₁ : ∃₂ λ x y → x ≡ y
t₁ = d , d , refl
|
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage{pgfplots}
\usepackage{fancyhdr}
\usepackage{enumitem}
\usepackage{tikz}
\usepackage{xparse}
\usepackage{siunitx}
\pgfplotsset{width=10cm,compat=1.9}
\pagestyle{fancy}
\fancyhf{}
\lhead{Steven Glasford}
\chead{Homework 1.4}
\rhead{Page \thepage}
\title{Homework 1.4}
\author{Steven Glasford}
\date{\parbox{\linewidth}{\centering%
\today\endgraf\medskip
Math-451-M001}}
\newcommand{\rpm}{\sbox0{$1$}\sbox2{$\scriptstyle\pm$}
\raise\dimexpr(\ht0-\ht2)/2\relax\box2 }
\newlist{steps}{enumerate}{1}
\setlist[steps, 1]{label = Step \arabic*:}
\ExplSyntaxOn
\newcommand*{\prlen}[1]{%
% round to 1 digit:
\pgfmathparse{round(10)/10.0}%
%\pgfkeys{/pgf/number format/precision=1}
%\pgfmathresult
\pgfmathprintnumber[fixed, precision=2]{\pgfmathresult}
}
\ExplSyntaxOff
\begin{document}
\maketitle
I choose to do problems 3 and 9 from the options of 3, 4, 5 and 9.
\section{Problem 3}
\begin{enumerate}[label=\alph*]
\item
\begin{tikzpicture}
\begin{axis}[
axis lines = left,
xlabel = Time in $t$ day,
ylabel = Price per pig $P(t)$,
]
\addplot [
domain=0:15,
samples=100,
color=red,
]{.65 - .01*x + .00004*x^2};
\addlegendentry{New: $.65-.01t+.00004t^2$}
\addplot[
domain=0:15,
samples=100,
color=blue,
]{.65-.01*x};
\addlegendentry{Old: $.65-.01*x$}
\end{axis}
\end{tikzpicture}
The graph above shows the price per pig using both the old linear model (blue) and the newer model (the red line). The red line is more accurate to the actual price per pig in real life. But as is obvious in the graph the values for the old equation are very similar to the more accurate model within the appropriate time domain.
\item
\begin{center}
\begin{tabular}{ |c|c|c| }
\hline
Variables & Constants & Assumptions \\
\hline
$t$ - Time (Days) & $w_0 = 200$ initial weight (pounds) & $w=w_0+5t$ \\
$f$ - profit (in dollars) & & $p=.65-.01t+.00004t^2$ \\
$w$ - Weight (pounds) && $c=.45t$ \\
$p$ - Price per pound && $r=p*w$\\
$c$ - cost (in dollars) && $f=r-c$\\
$r$ - Revenue (in dollars) &&\\
\hline
\end{tabular}
\end{center}
If we want to maximize $f(t)$ then we should try to add all of the assumptions into a single equation, this way all of the assumptions are considered. We will try to use $t$ as the isolating factor. If $$f=r-c$$ \parbox{\linewidth}{\centering%
$r=pw$ \hspace*{3cm} $c=.45t$ \endgraf \bigskip
$f=pw-.45t$ \endgraf\bigskip
$p=.65-.01t$ \hspace*{3cm}
$w=w_0+5*t$ \endgraf \bigskip
$f=(.65-.01t+.00004t^2)(w_0+5t)-.45t$ \endgraf\bigskip
$w_0=200$ \endgraf \bigskip
$f=(.65-.01t+.00004t^2)(200+5t)-.45t$ \endgraf\bigskip
}
Which simplifies to:$$f(t)=.0002t^3-.042t^2+.8t+130$$
Now we need to take the derivative of $f(t)$ and find where it is equal to zero. $$\frac{df}{dt}=\frac{3t^2}{5000}-\frac{21t}{250}+\frac{4}{5}$$
\begin{tikzpicture}
\begin{axis}[
axis lines = left,
xlabel = Time in $t$ day,
ylabel = $y$,
]
\addplot[
domain=0:20,
samples=100,
color=blue,
]{(3*x^2-420*x+4000)/5000};
\addlegendentry{$\frac{df}{dt}$}
\draw[ultra thin] (axis cs:\pgfkeysvalueof{/pgfplots/xmin},0) -- (axis cs:\pgfkeysvalueof{/pgfplots/xmax},0);
\end{axis}
\end{tikzpicture}
\endgraf
Using the quadratic equation we try to determine the extreme somewhere between 8 and 14, as that point will be a maximum, this is known since the derivative is a quadratic and that point the derivative goes from positive to negative, indicating a maxima. The other root would in turn be a minima.
$$\frac{df}{dt}=0=\frac{3t^2-420t+4000}{5000}$$ $$t=\frac{10(21+\sqrt{321})}{3},\frac{10(21-\sqrt{321})}{3}$$
$$t \approx \pgfmathparse{(10*(21+321^(1/2)))/3}\pgfmathresult , \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult $$
\textbf{Therefore, the best day to sell your pig is at about 10 days.}
\item \underline{Sensitivity analysis}
\endgraf
$$p(t)=.65-.01t+.00004t^2$$
$$f(t)=.0002t^3-.042t^2+.8t+130$$
$$y=(.65-.01x+xt^2)(200+5x)-.45x$$
$$y=5rx^3-.05x^2+200rx^2+.8x+130=0$$
$$\frac{dy}{dx}=15rx^2+400rx-\frac{x}{10}+\frac{4}{5}=0$$
$$x=\frac{-4000r+1+ 10\sqrt{160000r^2-128r+1/100}}{300r}$$
$$\frac{dx}{dr}=\frac{-6400r+1+ \sqrt{16000000r^2-12800r+1}}{300r^2\sqrt{16000000r^2-12800r+1}}$$
\begin{center}
\begin{tabular}{ |c|c|c| }
\hline
r & x & $\frac{\Delta x}{\Delta r} * 100$ \\
\hline
.00002 & 297.709385518&
%\pgfkeys{/pgf/fpu} \pgfmathparse{}\pgfmathresult
%\edef\tmp{\pgfmathresult}
%\pgfmathresult
%\pgfkeys{/pgf/fpu=false} &
%1/(-6400*.00004+1+sqrt(16000000*.00004^2-128*.00004+1))/(300*.00004^2*sqrt(16000000*.00004^2-12800*.00004+1))
%(-6400*r+1+\sqrt{16000000*r^2-128*r+1})/(300*r^2*\sqrt{16000000*r^2-12800r+1})
\\
.00003 & 185.997481072 & -1117119044.46
%\pgfmathparse{((-4000*.00003+1+10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003)-(-4000*.00002+1+10*(160000*.00002^2-128*.00002+1/100)^(1/2))/(300*.00002))/(.00003-.00002)*100}\pgfmathresult &
\\
.00004 &129.721576224&-562759048.485
% \pgfmathparse{(-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)}\pgfmathresult &
% \pgfmathparse{(-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)}\pgfmathresult &
% \pgfmathparse{((-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)-(-4000*.00003+1+10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003))/(.00004-.00003)*100}\pgfmathresult &
% \pgfmathparse{((-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)-(-4000*.00003+1-10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003))/(.00004-.00003)*100}\pgfmathresult
\\
.00005 & 95.4970354689 &-342245407.55
%\pgfmathparse{(-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)}\pgfmathresult &
% \pgfmathparse{(-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)}\pgfmathresult &
% \pgfmathparse{((-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)-(-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004))/(.00005-.00004)*100}\pgfmathresult&
% \pgfmathparse{((-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)-(-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004))/(.00005-.00004)*100}\pgfmathresult
\\
.00006 &72.1191645491&-233778709.199
%\pgfmathparse{(-4000*.00006+1+10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)}\pgfmathresult &
% \pgfmathparse{(-4000*.00006+1-10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)}\pgfmathresult &
% \pgfmathparse{((-4000*.00006+1+10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)-(-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005))/(.00006-.00005)*100}\pgfmathresult&
% \pgfmathparse{((-4000*.00006+1-10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)-(-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005))/(.00006-.00005)*100}\pgfmathresult
\\
\hline
\end{tabular}
\end{center}
\textbf{Therefore the best sort of sensitivity is somewhere between .00004 and .00005}
%% \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult
\item The robustness is fairly good since the values obtained from both the linear and the quadratic models are roughly similar.
\end{enumerate}
\section{Problem 9}
\begin{enumerate}[label=\alph*]
\endgraf
\item\endgraf\bigskip
\begin{steps}
\endgraf
\item \emph{Ask the Question, determine variables, constants, assumptions}\endgraf
\begin{center}
\begin{tabular}{ |c|c|c|c| }
\hline
Variables & Constants & Assumptions \\
\hline
$s$ - Subscribers & $i_0 = 1.50$ & $f=sp$\\
$p$ - Subscription price & $s_0=80000$&$s=s_0-50000(p-p_0)$\\
$f$ - Profits &&$p\geq0$\\
&& $s\geq0$\\
\hline
\end{tabular}
\end{center}
\item \emph{Select the model}\endgraf
One-variable optimization
\item \emph{Formulate the model} \endgraf
$$f=sp$$
$$s=s_0-50000(p-p_0)$$
$$f=p(s_0-50000(p-p_0))$$
\parbox{\linewidth}{\centering%
$s_0=80000$ \hspace*{3cm} $p_0=1.50$ \endgraf \bigskip
}
$$f=p(80000-50000(p-1.5))$$
or simplified
$$f=-50000p^2+155000p$$
\item \emph{Solve the model}\endgraf
Find the extrema for f:
$$\frac{df}{dp}=0=-100000p+155000$$
$$p=1.55$$
Determine if 1.55 is a max or min:
\begin{tikzpicture}
\begin{axis}[
axis lines = left,
xlabel = Price per paper,
]
\addplot [
domain=0:2,
samples=100,
color=red,
]{-100000*x+155000};
\addlegendentry{$\frac{df}{dp}$}
\draw[ultra thin] (axis cs:\pgfkeysvalueof{/pgfplots/xmin},0) -- (axis cs:\pgfkeysvalueof{/pgfplots/xmax},0);
\end{axis}
\end{tikzpicture}
Since the derivative of $f$ is going from positive to negative at the point 1.55, \textbf{$p=1.55$ is a Maximum.}\bigskip
\item \emph{Answer the question}\endgraf
The best price to sell the newspaper is \$1.55 for a total of \$$120125$ per week, and a total of $77500$ subscribers.
\end{steps}
%% \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult
\item \endgraf \bigskip
\emph{What if the newspaper lost a different number of subscribers, other than 5000?}
$$f=p(80000-r*10(p-1.5))$$
$$\frac{df}{dp}=0=-20rp+15r+80000$$
$$p=\frac{80000+15r}{20r}$$
replace $p$ in the $f$ equation:
$$f=\frac{80000+15r}{20r}\left(80000-10r\left(\frac{80000+15r}{20r}-1.5\right)\right)$$
Which doesn't really simplify down to anything worth mentioning.
\begin{center}
\begin{tabular}{ |c|c| }
\hline
Lost Subscribers ($r$) & Max Profit \\
\hline
3000 &\$130208.33\\
4000 &\$122500.00\\
5000 &\$120125.00\\
6000 &\$120416.67\\
7000 &\$122232.14\\
\hline
\end{tabular}
\end{center}
\item
$$f=\frac{80000+15r}{20r}\left(80000-10r\left(\frac{80000+15r}{20r}-1.5\right)\right)$$
$$\frac{df}{dr}=\frac{45r^2-1280000000}{8r^2}$$
Sensitivity=$\frac{df}{dr}*\frac{r}{x}=S(p,n)$
$$S(p,n)=\frac{45n^2-1280000000}{8n^2}\left(n/\left(\left(\frac{80000+15n}{20n}\left(80000-10n\left(\frac{80000+15n}{20n}-1.5\right)\right)\right)\right)\right)$$
\item \emph{The Newspaper shouldn't need to change its prices.} The price is already near the optimal rate, but the optimal rate is just \$125 more than the basic, which is basically nothing when considering the amount of profit they received.
\end{enumerate}
\end{document} |
# GetShockSpan3Server3Seg4Data.r
source("../common/DataUtil.r")
library(ggplot2)
source("../common/PlotUtil.r")
# P A R A M S
nShockspan <- 3
nServerDefaultLife <- 3
nSegments <- 4
nShockImpactMin <- 100
nShockImpactMax <- 100
# S T A N D A R D P R O C E S S I N G F O R S H O C K S
source("../common/shockformat.r")
source("../common/shocktitles.r")
# P L O T D A T A
gp <- ggplot(data=trows
, aes(x=shockfreq,y=safe(losspct), color=factor(copies))
)
gp <- gp + labs(color=sLegendLabel)
gp <- fnPlotLogScales(gp, y="YES"
# ,xbreaks=c(50,67,75,80,90,100)
,xbreaks=c(5000,10000,15000,20000,25000,30000,40000,50000)
,ybreaks=c(0.01,0.10,1.00,10,100)
)
#gp <- gp + scale_x_reverse(breaks=c(50,67,75,80,90,100))
gp <- gp + geom_line(
size=3
, show.legend=TRUE
)
gp <- gp + geom_point(data=trows
, size=6
, show.legend=TRUE
, color="black"
)
gp <- gp + theme(legend.position=c(0.15,0.3))
gp <- gp + theme(legend.background=element_rect(fill="lightgray",
size=0.5, linetype="solid"))
gp <- gp + theme(legend.key.size=unit(0.3, "in"))
gp <- gp + theme(legend.key.width=unit(0.6, "in"))
gp <- gp + theme(legend.text=element_text(size=16))
gp <- gp + theme(legend.title=element_text(size=14))
gp <- gp + scale_color_discrete(labels=lLegendItemLabels)
gp <- fnPlotTitles(gp
, titleline=sTitleLine
, xlabel=sXLabel
, ylabel=sYLabel
)
# Label the percentage lines out on the right side.
xlabelposition <- log10(800)
gp <- fnPlotPercentLine(gp, xloc=xlabelposition)
gp <- fnPlotMilleLine(gp, xloc=xlabelposition)
gp <- fnPlotSubMilleLine(gp, xloc=xlabelposition)
plot(gp)
fnPlotMakeFile(gp, sPlotFile)
# Unwind any remaining sink()s to close output files.
while (sink.number() > 0) {sink()}
|
State Before: K : Type u_1
inst✝¹ : DivisionRing K
inst✝ : CharZero K
a b : ℕ
⊢ ↑(choose a b) = Polynomial.eval (↑(a - (b - 1))) (pochhammer K b) / ↑b ! State After: no goals Tactic: rw [eq_div_iff_mul_eq (cast_ne_zero.2 b.factorial_ne_zero : (b ! : K) ≠ 0), ← cast_mul,
mul_comm, ← descFactorial_eq_factorial_mul_choose, ← cast_descFactorial] |
"
Перечислить по памяти зарезервированные слова языка R. Сколько их всего?
Объяснить смысл NULL, Inf, NA, NaN. Получить эти значения несколькими способами в
результате выполнения каких-либо операции
"
#NA Обращение к несуществующему элементу вектора
my.list <- list(Text=c("A", "B", "C"))
print(my.list)
print(my.list$Text[4])
#NULL Обращение к несуществующему элементу списка
print(my.list$Kot[4])
#Inf при делении на 0
r <- 10/0
print(r)
#NaN – неопределенный результат
result <- 1+NaN
print(result)
|
lemma strip_while_not_0_Cons_eq [simp]: "strip_while (\<lambda>x. x = 0) (x # xs) = x ## strip_while (\<lambda>x. x = 0) xs" |
State Before: p : ℕ
hp : Fact (Nat.Prime p)
n : ℕ
a : ℤ
⊢ ↑p ^ n ∣ a ↔ a = 0 ∨ n ≤ padicValInt p a State After: no goals Tactic: rw [padicValInt, ← Int.natAbs_eq_zero, ← padicValNat_dvd_iff, ← Int.coe_nat_dvd_left,
Int.coe_nat_pow] |
import data.nat.basic
open nat
def island_rules : ℕ → ℕ → (ℕ → Prop)
| 0 b := λ bb, (bb = b ∨ bb = b - 1) ∧ bb > 0
| (succ d) b := (λ bb, (island_rules d b) bb ∧
((∀ c, (island_rules d b) c → c = b) ↔ (∀ c, (island_rules d bb) c → c = bb)))
theorem init_island {d b bb} : (island_rules d b) bb → (bb = b ∨ bb = b - 1) ∧ bb > 0 := begin
induction d with d hi,unfold island_rules,assume h,exact h,
unfold island_rules,assume h,exact hi h.left
end
theorem blue_eyed_islander : ∀ d b : ℕ, b > 0 → (d + 1 ≥ b ↔ (∀ bb:ℕ, (island_rules d b) bb → bb = b)):=begin
assume d,induction d with d hi,assume b,
simp[island_rules],
cases b,simp[(dec_trivial:¬0>0)],
cases b,assume h,simp[(dec_trivial:1≥1)],assume bb hbb,cases hbb,assume hbb1,rw hbb,
rw hbb,simp[(dec_trivial:¬0>0)],
assume h,simp[(dec_trivial:¬ 1 ≥ succ (succ b))],assume hbb,
have h1:=hbb (succ b),simp[(dec_trivial:succ b>0),succ_ne_self (succ b)] at h1,
exact (succ_ne_self b).symm h1,
assume b hb,apply iff.intro,
assume hd,cases lt_or_eq_of_le hd,unfold island_rules,assume bb hbb,
have hd1 : succ d ≥ b:=le_of_succ_le_succ (succ_le_of_lt h),rw add_one at hi,
exact iff.elim_left (hi b hb) hd1 bb hbb.left,
have hi1:= hi b hb,rw succ_add at h,rw h at hi1,
have hd1:¬d+1 ≥ succ(d+1) :=not_le_of_gt (lt_succ_self (d+1)),
simp[hd1] at hi1,unfold island_rules,assume bb hbb,
have hbb1:=init_island hbb.left,
cases hbb1.left with hbb2 hbb2,assumption,
have hbbd:d + 1 ≥ bb,rw[eq_comm,nat.sub_eq_iff_eq_add (succ_le_of_lt hb)] at hbb2,
rw [hbb2,succ_add,add_one bb] at hd,exact le_of_succ_le_succ hd,
exact iff.elim_right hbb.right (iff.elim_left (hi bb hbb1.right) hbbd) bb hbb.left,
assume hbb,apply by_contradiction,assume hd,have hd1:=lt_of_not_ge hd,
rw succ_add at hd1,have hd2:¬d + 1 ≥ b:=not_le_of_gt (lt_of_succ_lt hd1),
have hi1:= hi b hb,simp [hd2] at hi1,unfold island_rules at hbb,
rw classical.not_forall at hi1,cases hi1 with bb hbb1,
rw @not_imp _ _ (classical.prop_decidable _)at hbb1,
have hbb2:=hbb bb,
have hbb3:= init_island hbb1.left,simp[hbb1.right] at hbb3,
have hbb4:=hi bb hbb3.right,
rw [eq_comm,nat.sub_eq_iff_eq_add (succ_le_of_lt hb)] at hbb3,
rw [hbb3.left,add_one bb] at hd1,have hbbd:¬d + 1 ≥ bb:= not_le_of_gt (lt_of_succ_lt_succ hd1),
simp[hbbd] at hbb4,
have:¬∀ (c : ℕ), island_rules d b c → c = b,assume hbb4,have:=hbb4 bb hbb1.left,
rw [hbb3.left,add_one,eq_comm] at this,exact succ_ne_self bb this,
simp[hbb4,hbb1.left,this] at hbb2,
rw [hbb3.left,add_one,eq_comm] at hbb2,exact succ_ne_self bb hbb2,
end
#print blue_eyed_islander |
// (C) Copyright John Maddock 2006.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MATH_TOOLS_SIGN_HPP
#define BOOST_MATH_TOOLS_SIGN_HPP
#include <boost/math/tools/config.hpp>
#include <boost/math/special_functions/math_fwd.hpp>
namespace boost{ namespace math{
template <class T>
inline int sign BOOST_NO_MACRO_EXPAND(const T& z)
{
return (z == 0) ? 0 : (z < 0) ? -1 : 1;
}
template <class T>
inline int signbit BOOST_NO_MACRO_EXPAND(const T& z)
{
return (z < 0) ? 1 : 0;
}
template <class T>
inline T copysign BOOST_NO_MACRO_EXPAND(const T& x, const T& y)
{
BOOST_MATH_STD_USING
return fabs(x) * boost::math::sign(y);
}
} // namespace math
} // namespace boost
#endif // BOOST_MATH_TOOLS_SIGN_HPP
|
module Test.Suite
import IdrTest.Test
import Test.Markdown.Format.HtmlTest
import Test.Markdown.LexerTest
import Test.MarkdownTest
suite : IO ()
suite = do
runSuites
[ Test.Markdown.LexerTest.suite
, Test.MarkdownTest.suite
, Test.Markdown.Format.HtmlTest.suite
]
|
\sec{Linear ODEs}
\subsection{Introduction}
\begin{defn}[Linear differential equations]
A first order differential equation of the type
\[\dfrac{dy}{dx} + p(x)y = g(x)\]
is called a \defin{linear differential equation} in standard form.
\end{defn}
We shall assume that $p(x)$ and $g(x)$ are continuous on an open interval $I \subset \mathbb{R}.$\\
\begin{mdframed}[style=boxstyle, frametitle={Solving such an ODE}]
Verify that $\exp\left(\displaystyle\int p(x) dx\right)$ is an integrating factor of the above ODE.\\
Thus, we can now turn back to technique of the previous section.
\end{mdframed}
\subsection{Bernoulli's DE}
\begin{defn}[Bernoulli's DE]
A first order differential equation of the type
\[\dfrac{dy}{dx} + p(x)y = q(x)y^n\]
is called a \defin{Bernoulli's differential equation}.
\end{defn}
Note that if $n = 0$ or $1,$ then it is a linear DE as well and we already know how to solve that. Thus, we assume that $n \notin \{0, 1\}.$
\newpage
\begin{mdframed}[style=boxstyle, frametitle={Solving such an ODE}]
Substitute \[u(x) = \dfrac{1}{y^{n-1}}.\]
Calculate the derivative and rearrange the original ODE to obtain:
\[\dfrac{1}{1 - n}\dfrac{du}{dx} + p(x)u(x) = q(x).\]
Now, we are back to the case of a linear DE which we can solve.
\end{mdframed}
\exercise{%
Solve the following DE:
\[6y^2\dfrac{dx}{dy} - yx = 2x^4.\]} |
module SplayHeap
import Heap
%default total
%access private
export
data SplayHeap a = E | T (SplayHeap a) a (SplayHeap a)
partition : Ord a => a -> SplayHeap a -> (SplayHeap a, SplayHeap a)
partition pivot E = (E, E)
partition pivot t@(T a x b) =
if x <= pivot then
case b of
E => (t, E)
T b1 y b2 =>
if y <= pivot then
let (small, big) = partition pivot $ assert_smaller t b2
in (T (T a x b1) y small, big)
else
let (small, big) = partition pivot $ assert_smaller t b1
in (T a x small, T big y b2)
else
case a of
E => (E, t)
T a1 y a2 =>
if y <= pivot then
let (small, big) = partition pivot $ assert_smaller t a2
in (T a1 y small, T big x b)
else
let (small, big) = partition pivot $ assert_smaller t a1
in (small, T (T big y a2) x b)
export
Heap SplayHeap where
empty = E
isEmpty E = True
isEmpty _ = False
insert x t = let (a, b) = partition x t in T a x b
merge E t = t
merge (T a x b) t = let (ta, tb) = partition x t
in T (assert_total $ merge ta a) x (assert_total $ merge tb b)
findMin E = idris_crash "empty heap"
findMin (T E x b) = x
findMin (T a x b) = findMin a
deleteMin E = idris_crash "empty heap"
deleteMin (T E x b) = b
deleteMin (T (T E x b) y c) = T b y c
deleteMin (T (T a x b) y c) = T (deleteMin a) x (T b y c)
|
/-
Copyright (c) 2020 Devon Tuma. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Devon Tuma
-/
import ring_theory.ideal.quotient
import ring_theory.polynomial.quotient
/-!
# Jacobson radical
The Jacobson radical of a ring `R` is defined to be the intersection of all maximal ideals of `R`.
This is similar to how the nilradical is equal to the intersection of all prime ideals of `R`.
We can extend the idea of the nilradical to ideals of `R`,
by letting the radical of an ideal `I` be the intersection of prime ideals containing `I`.
Under this extension, the original nilradical is the radical of the zero ideal `⊥`.
Here we define the Jacobson radical of an ideal `I` in a similar way,
as the intersection of maximal ideals containing `I`.
## Main definitions
Let `R` be a commutative ring, and `I` be an ideal of `R`
* `jacobson I` is the jacobson radical, i.e. the infimum of all maximal ideals containing I.
* `is_local I` is the proposition that the jacobson radical of `I` is itself a maximal ideal
## Main statements
* `mem_jacobson_iff` gives a characterization of members of the jacobson of I
* `is_local_of_is_maximal_radical`: if the radical of I is maximal then so is the jacobson radical
## Tags
Jacobson, Jacobson radical, Local Ideal
-/
universes u v
namespace ideal
variables {R : Type u} {S : Type v}
open_locale polynomial
section jacobson
section ring
variables [ring R] [ring S] {I : ideal R}
/-- The Jacobson radical of `I` is the infimum of all maximal (left) ideals containing `I`. -/
def jacobson (I : ideal R) : ideal R :=
Inf {J : ideal R | I ≤ J ∧ is_maximal J}
lemma le_jacobson : I ≤ jacobson I :=
λ x hx, mem_Inf.mpr (λ J hJ, hJ.left hx)
@[simp] lemma jacobson_idem : jacobson (jacobson I) = jacobson I :=
le_antisymm (Inf_le_Inf (λ J hJ, ⟨Inf_le hJ, hJ.2⟩)) le_jacobson
@[simp] lemma jacobson_top : jacobson (⊤ : ideal R) = ⊤ :=
eq_top_iff.2 le_jacobson
@[simp] theorem jacobson_eq_top_iff : jacobson I = ⊤ ↔ I = ⊤ :=
⟨λ H, classical.by_contradiction $ λ hi, let ⟨M, hm, him⟩ := exists_le_maximal I hi in
lt_top_iff_ne_top.1
(lt_of_le_of_lt (show jacobson I ≤ M, from Inf_le ⟨him, hm⟩) $
lt_top_iff_ne_top.2 hm.ne_top) H,
λ H, eq_top_iff.2 $ le_Inf $ λ J ⟨hij, hj⟩, H ▸ hij⟩
lemma jacobson_eq_bot : jacobson I = ⊥ → I = ⊥ :=
λ h, eq_bot_iff.mpr (h ▸ le_jacobson)
lemma jacobson_eq_self_of_is_maximal [H : is_maximal I] : I.jacobson = I :=
le_antisymm (Inf_le ⟨le_of_eq rfl, H⟩) le_jacobson
@[priority 100]
instance jacobson.is_maximal [H : is_maximal I] : is_maximal (jacobson I) :=
⟨⟨λ htop, H.1.1 (jacobson_eq_top_iff.1 htop),
λ J hJ, H.1.2 _ (lt_of_le_of_lt le_jacobson hJ)⟩⟩
theorem mem_jacobson_iff {x : R} : x ∈ jacobson I ↔ ∀ y, ∃ z, z * y * x + z - 1 ∈ I :=
⟨λ hx y, classical.by_cases
(assume hxy : I ⊔ span {y * x + 1} = ⊤,
let ⟨p, hpi, q, hq, hpq⟩ := submodule.mem_sup.1 ((eq_top_iff_one _).1 hxy) in
let ⟨r, hr⟩ := mem_span_singleton'.1 hq in
⟨r, by rw [mul_assoc, ←mul_add_one, hr, ← hpq, ← neg_sub, add_sub_cancel]; exact I.neg_mem hpi⟩)
(assume hxy : I ⊔ span {y * x + 1} ≠ ⊤,
let ⟨M, hm1, hm2⟩ := exists_le_maximal _ hxy in
suffices x ∉ M, from (this $ mem_Inf.1 hx ⟨le_trans le_sup_left hm2, hm1⟩).elim,
λ hxm, hm1.1.1 $ (eq_top_iff_one _).2 $ add_sub_cancel' (y * x) 1 ▸ M.sub_mem
(le_sup_right.trans hm2 $ subset_span rfl)
(M.mul_mem_left _ hxm)),
λ hx, mem_Inf.2 $ λ M ⟨him, hm⟩, classical.by_contradiction $ λ hxm,
let ⟨y, i, hi, df⟩ := hm.exists_inv hxm, ⟨z, hz⟩ := hx (-y) in
hm.1.1 $ (eq_top_iff_one _).2 $ sub_sub_cancel (z * -y * x + z) 1 ▸ M.sub_mem
(by { rw [mul_assoc, ←mul_add_one, neg_mul, ← (sub_eq_iff_eq_add.mpr df.symm), neg_sub,
sub_add_cancel],
exact M.mul_mem_left _ hi }) (him hz)⟩
lemma exists_mul_sub_mem_of_sub_one_mem_jacobson {I : ideal R} (r : R)
(h : r - 1 ∈ jacobson I) : ∃ s, s * r - 1 ∈ I :=
begin
cases mem_jacobson_iff.1 h 1 with s hs,
use s,
simpa [mul_sub] using hs
end
/-- An ideal equals its Jacobson radical iff it is the intersection of a set of maximal ideals.
Allowing the set to include ⊤ is equivalent, and is included only to simplify some proofs. -/
theorem eq_jacobson_iff_Inf_maximal :
I.jacobson = I ↔ ∃ M : set (ideal R), (∀ J ∈ M, is_maximal J ∨ J = ⊤) ∧ I = Inf M :=
begin
use λ hI, ⟨{J : ideal R | I ≤ J ∧ J.is_maximal}, ⟨λ _ hJ, or.inl hJ.right, hI.symm⟩⟩,
rintros ⟨M, hM, hInf⟩,
refine le_antisymm (λ x hx, _) le_jacobson,
rw [hInf, mem_Inf],
intros I hI,
cases hM I hI with is_max is_top,
{ exact (mem_Inf.1 hx) ⟨le_Inf_iff.1 (le_of_eq hInf) I hI, is_max⟩ },
{ exact is_top.symm ▸ submodule.mem_top }
end
theorem eq_jacobson_iff_Inf_maximal' :
I.jacobson = I ↔ ∃ M : set (ideal R), (∀ (J ∈ M) (K : ideal R), J < K → K = ⊤) ∧ I = Inf M :=
eq_jacobson_iff_Inf_maximal.trans
⟨λ h, let ⟨M, hM⟩ := h in ⟨M, ⟨λ J hJ K hK, or.rec_on (hM.1 J hJ) (λ h, h.1.2 K hK)
(λ h, eq_top_iff.2 (le_of_lt (h ▸ hK))), hM.2⟩⟩,
λ h, let ⟨M, hM⟩ := h in ⟨M, ⟨λ J hJ, or.rec_on (classical.em (J = ⊤)) (λ h, or.inr h)
(λ h, or.inl ⟨⟨h, hM.1 J hJ⟩⟩), hM.2⟩⟩⟩
/-- An ideal `I` equals its Jacobson radical if and only if every element outside `I`
also lies outside of a maximal ideal containing `I`. -/
lemma eq_jacobson_iff_not_mem :
I.jacobson = I ↔ ∀ x ∉ I, ∃ M : ideal R, (I ≤ M ∧ M.is_maximal) ∧ x ∉ M :=
begin
split,
{ intros h x hx,
erw [← h, mem_Inf] at hx,
push_neg at hx,
exact hx },
{ refine λ h, le_antisymm (λ x hx, _) le_jacobson,
contrapose hx,
erw mem_Inf,
push_neg,
exact h x hx }
end
theorem map_jacobson_of_surjective {f : R →+* S} (hf : function.surjective f) :
ring_hom.ker f ≤ I → map f (I.jacobson) = (map f I).jacobson :=
begin
intro h,
unfold ideal.jacobson,
have : ∀ J ∈ {J : ideal R | I ≤ J ∧ J.is_maximal}, f.ker ≤ J := λ J hJ, le_trans h hJ.left,
refine trans (map_Inf hf this) (le_antisymm _ _),
{ refine Inf_le_Inf (λ J hJ, ⟨comap f J, ⟨⟨le_comap_of_map_le hJ.1, _⟩,
map_comap_of_surjective f hf J⟩⟩),
haveI : J.is_maximal := hJ.right,
exact comap_is_maximal_of_surjective f hf },
{ refine Inf_le_Inf_of_subset_insert_top (λ j hj, hj.rec_on (λ J hJ, _)),
rw ← hJ.2,
cases map_eq_top_or_is_maximal_of_surjective f hf hJ.left.right with htop hmax,
{ exact htop.symm ▸ set.mem_insert ⊤ _ },
{ exact set.mem_insert_of_mem ⊤ ⟨map_mono hJ.1.1, hmax⟩ } },
end
lemma map_jacobson_of_bijective {f : R →+* S} (hf : function.bijective f) :
map f (I.jacobson) = (map f I).jacobson :=
map_jacobson_of_surjective hf.right
(le_trans (le_of_eq (f.injective_iff_ker_eq_bot.1 hf.left)) bot_le)
lemma comap_jacobson {f : R →+* S} {K : ideal S} :
comap f (K.jacobson) = Inf (comap f '' {J : ideal S | K ≤ J ∧ J.is_maximal}) :=
trans (comap_Inf' f _) (Inf_eq_infi).symm
theorem comap_jacobson_of_surjective {f : R →+* S} (hf : function.surjective f) {K : ideal S} :
comap f (K.jacobson) = (comap f K).jacobson :=
begin
unfold ideal.jacobson,
refine le_antisymm _ _,
{ refine le_trans (comap_mono (le_of_eq (trans top_inf_eq.symm Inf_insert.symm))) _,
rw [comap_Inf', Inf_eq_infi],
refine infi_le_infi_of_subset (λ J hJ, _),
have : comap f (map f J) = J := trans (comap_map_of_surjective f hf J)
(le_antisymm (sup_le_iff.2 ⟨le_of_eq rfl, le_trans (comap_mono bot_le) hJ.left⟩) le_sup_left),
cases map_eq_top_or_is_maximal_of_surjective _ hf hJ.right with htop hmax,
{ refine ⟨⊤, ⟨set.mem_insert ⊤ _, htop ▸ this⟩⟩ },
{ refine ⟨map f J, ⟨set.mem_insert_of_mem _
⟨le_map_of_comap_le_of_surjective f hf hJ.1, hmax⟩, this⟩⟩ } },
{ rw comap_Inf,
refine le_infi_iff.2 (λ J, (le_infi_iff.2 (λ hJ, _))),
haveI : J.is_maximal := hJ.right,
refine Inf_le ⟨comap_mono hJ.left, comap_is_maximal_of_surjective _ hf⟩ }
end
@[mono] lemma jacobson_mono {I J : ideal R} : I ≤ J → I.jacobson ≤ J.jacobson :=
begin
intros h x hx,
erw mem_Inf at ⊢ hx,
exact λ K ⟨hK, hK_max⟩, hx ⟨trans h hK, hK_max⟩
end
end ring
section comm_ring
variables [comm_ring R] [comm_ring S] {I : ideal R}
lemma radical_le_jacobson : radical I ≤ jacobson I :=
le_Inf (λ J hJ, (radical_eq_Inf I).symm ▸ Inf_le ⟨hJ.left, is_maximal.is_prime hJ.right⟩)
lemma is_radical_of_eq_jacobson (h : jacobson I = I) : I.is_radical :=
radical_le_jacobson.trans h.le
lemma is_unit_of_sub_one_mem_jacobson_bot (r : R)
(h : r - 1 ∈ jacobson (⊥ : ideal R)) : is_unit r :=
begin
cases exists_mul_sub_mem_of_sub_one_mem_jacobson r h with s hs,
rw [mem_bot, sub_eq_zero, mul_comm] at hs,
exact is_unit_of_mul_eq_one _ _ hs
end
lemma mem_jacobson_bot {x : R} : x ∈ jacobson (⊥ : ideal R) ↔ ∀ y, is_unit (x * y + 1) :=
⟨λ hx y, let ⟨z, hz⟩ := (mem_jacobson_iff.1 hx) y in
is_unit_iff_exists_inv.2 ⟨z, by rwa [add_mul, one_mul, ← sub_eq_zero, mul_right_comm,
mul_comm _ z, mul_right_comm]⟩,
λ h, mem_jacobson_iff.mpr (λ y, (let ⟨b, hb⟩ := is_unit_iff_exists_inv.1 (h y) in
⟨b, (submodule.mem_bot R).2 (hb ▸ (by ring))⟩))⟩
/-- An ideal `I` of `R` is equal to its Jacobson radical if and only if
the Jacobson radical of the quotient ring `R/I` is the zero ideal -/
theorem jacobson_eq_iff_jacobson_quotient_eq_bot :
I.jacobson = I ↔ jacobson (⊥ : ideal (R ⧸ I)) = ⊥ :=
begin
have hf : function.surjective (quotient.mk I) := submodule.quotient.mk_surjective I,
split,
{ intro h,
replace h := congr_arg (map (quotient.mk I)) h,
rw map_jacobson_of_surjective hf (le_of_eq mk_ker) at h,
simpa using h },
{ intro h,
replace h := congr_arg (comap (quotient.mk I)) h,
rw [comap_jacobson_of_surjective hf, ← (quotient.mk I).ker_eq_comap_bot] at h,
simpa using h }
end
/-- The standard radical and Jacobson radical of an ideal `I` of `R` are equal if and only if
the nilradical and Jacobson radical of the quotient ring `R/I` coincide -/
theorem radical_eq_jacobson_iff_radical_quotient_eq_jacobson_bot :
I.radical = I.jacobson ↔ radical (⊥ : ideal (R ⧸ I)) = jacobson ⊥ :=
begin
have hf : function.surjective (quotient.mk I) := submodule.quotient.mk_surjective I,
split,
{ intro h,
have := congr_arg (map (quotient.mk I)) h,
rw [map_radical_of_surjective hf (le_of_eq mk_ker),
map_jacobson_of_surjective hf (le_of_eq mk_ker)] at this,
simpa using this },
{ intro h,
have := congr_arg (comap (quotient.mk I)) h,
rw [comap_radical, comap_jacobson_of_surjective hf, ← (quotient.mk I).ker_eq_comap_bot] at this,
simpa using this }
end
lemma jacobson_radical_eq_jacobson :
I.radical.jacobson = I.jacobson :=
le_antisymm (le_trans (le_of_eq (congr_arg jacobson (radical_eq_Inf I)))
(Inf_le_Inf (λ J hJ, ⟨Inf_le ⟨hJ.1, hJ.2.is_prime⟩, hJ.2⟩))) (jacobson_mono le_radical)
end comm_ring
end jacobson
section polynomial
open polynomial
variables [comm_ring R]
lemma jacobson_bot_polynomial_le_Inf_map_maximal :
jacobson (⊥ : ideal R[X]) ≤ Inf (map (C : R →+* R[X]) '' {J : ideal R | J.is_maximal}) :=
begin
refine le_Inf (λ J, exists_imp_distrib.2 (λ j hj, _)),
haveI : j.is_maximal := hj.1,
refine trans (jacobson_mono bot_le) (le_of_eq _ : J.jacobson ≤ J),
suffices : (⊥ : ideal (polynomial (R ⧸ j))).jacobson = ⊥,
{ rw [← hj.2, jacobson_eq_iff_jacobson_quotient_eq_bot],
replace this :=
congr_arg (map (polynomial_quotient_equiv_quotient_polynomial j).to_ring_hom) this,
rwa [map_jacobson_of_bijective _, map_bot] at this,
exact (ring_equiv.bijective (polynomial_quotient_equiv_quotient_polynomial j)) },
refine eq_bot_iff.2 (λ f hf, _),
simpa [(λ hX, by simpa using congr_arg (λ f, coeff f 1) hX : (X : (R ⧸ j)[X]) ≠ 0)]
using eq_C_of_degree_eq_zero (degree_eq_zero_of_is_unit ((mem_jacobson_bot.1 hf) X)),
end
lemma jacobson_bot_polynomial_of_jacobson_bot (h : jacobson (⊥ : ideal R) = ⊥) :
jacobson (⊥ : ideal R[X]) = ⊥ :=
begin
refine eq_bot_iff.2 (le_trans jacobson_bot_polynomial_le_Inf_map_maximal _),
refine (λ f hf, ((submodule.mem_bot _).2 (polynomial.ext (λ n, trans _ (coeff_zero n).symm)))),
suffices : f.coeff n ∈ ideal.jacobson ⊥, by rwa [h, submodule.mem_bot] at this,
exact mem_Inf.2 (λ j hj, (mem_map_C_iff.1 ((mem_Inf.1 hf) ⟨j, ⟨hj.2, rfl⟩⟩)) n),
end
end polynomial
section is_local
variables [comm_ring R]
/-- An ideal `I` is local iff its Jacobson radical is maximal. -/
class is_local (I : ideal R) : Prop := (out : is_maximal (jacobson I))
theorem is_local_iff {I : ideal R} : is_local I ↔ is_maximal (jacobson I) :=
⟨λ h, h.1, λ h, ⟨h⟩⟩
theorem is_local_of_is_maximal_radical {I : ideal R} (hi : is_maximal (radical I)) : is_local I :=
⟨have radical I = jacobson I,
from le_antisymm (le_Inf $ λ M ⟨him, hm⟩, hm.is_prime.radical_le_iff.2 him)
(Inf_le ⟨le_radical, hi⟩),
show is_maximal (jacobson I), from this ▸ hi⟩
theorem is_local.le_jacobson {I J : ideal R} (hi : is_local I) (hij : I ≤ J) (hj : J ≠ ⊤) :
J ≤ jacobson I :=
let ⟨M, hm, hjm⟩ := exists_le_maximal J hj in
le_trans hjm $ le_of_eq $ eq.symm $ hi.1.eq_of_le hm.1.1 $ Inf_le ⟨le_trans hij hjm, hm⟩
theorem is_local.mem_jacobson_or_exists_inv {I : ideal R} (hi : is_local I) (x : R) :
x ∈ jacobson I ∨ ∃ y, y * x - 1 ∈ I :=
classical.by_cases
(assume h : I ⊔ span {x} = ⊤,
let ⟨p, hpi, q, hq, hpq⟩ := submodule.mem_sup.1 ((eq_top_iff_one _).1 h) in
let ⟨r, hr⟩ := mem_span_singleton.1 hq in
or.inr ⟨r, by rw [← hpq, mul_comm, ← hr, ← neg_sub, add_sub_cancel]; exact I.neg_mem hpi⟩)
(assume h : I ⊔ span {x} ≠ ⊤,
or.inl $ le_trans le_sup_right (hi.le_jacobson le_sup_left h) $ mem_span_singleton.2 $
dvd_refl x)
end is_local
theorem is_primary_of_is_maximal_radical [comm_ring R] {I : ideal R} (hi : is_maximal (radical I)) :
is_primary I :=
have radical I = jacobson I,
from le_antisymm (le_Inf $ λ M ⟨him, hm⟩, hm.is_prime.radical_le_iff.2 him)
(Inf_le ⟨le_radical, hi⟩),
⟨ne_top_of_lt $ lt_of_le_of_lt le_radical (lt_top_iff_ne_top.2 hi.1.1),
λ x y hxy, ((is_local_of_is_maximal_radical hi).mem_jacobson_or_exists_inv y).symm.imp
(λ ⟨z, hz⟩, by rw [← mul_one x, ← sub_sub_cancel (z * y) 1, mul_sub, mul_left_comm]; exact
I.sub_mem (I.mul_mem_left _ hxy) (I.mul_mem_left _ hz))
(this ▸ id)⟩
end ideal
|
import tactic --hide
/-Lemma
If $P$ is a logical statement then $P$ implies $(P \implies \mathrm{false}) \implies \mathrm{false}$
-/
lemma P_to_P_to_false_to_false (P : Prop) : P → ((P → false) → false) :=
begin
intros hp hpf,
apply hpf,
exact hp,
end |
Production
|
In Canada you can now buy Bitcoins with Visa® or MasterCard® using Western Union – limit is up to $950 per transaction for this payment method.
You can send more money with other methods and if you bank with ScotiaBank or BMO you can also use their services to send money through online banking, including a mobile device with ScotiaBank Mobile Banking. Western Union Bitcoin purchases can be placed through several sellers on the www.LocalBitcoins.com marketplace. |
import Mathlib.Logic.Basic
namespace Bool
section
theorem and_of_true {p q} (hp : p = true) (hq : q = true) : (p && q) = true
:= hp.symm ▸ hq.symm ▸ rfl
theorem and_left : {p q : Bool} → (p && q) = true → p = true
| true, _, _ => rfl
| false, _, h => false_and _ ▸ h
theorem and_right : {p q : Bool} → (p && q) = true → q = true
| _, true, _ => rfl
| _, false, h => and_false _ ▸ h
end
end Bool
-- section
-- instance (p : Fin k → Prop) [Decidable (∀ k, ¬p k)] : Decidable (∃ k, p k) :=
-- if h : ∀ k, ¬p k then isFalse $ not_exists.mpr h
-- else isTrue $ let ⟨x, hx⟩ := not_forall.mp h; ⟨x, of_not_not hx⟩
-- end |
@[noinline] def f (x : Bool) := x
@[noinline] def g (x y : Bool) := x
def h (x : Bool) (xs : List Nat) : List Bool :=
match x with
| true =>
let z := f true
let y := f false
xs.map fun x => g y z
| false =>
let y := f false
let z := f true
xs.map fun x => g y z
theorem ex1 : h true [1] = h false [1] := rfl
#eval h true [1]
#eval h false [1]
theorem ex2 : (h true [1] == h false [1]) = true :=
by native_decide
@[noinline] def f2 (a : String) := a
@[noinline] def g2 (a : String) (x : Bool) := a
def h2 (x : Bool) (xs : List Nat) : List String :=
match x with
| false =>
let a := f2 "a"
let y := f false
xs.map fun x => g2 a y
| true =>
let y := f false
let a := f2 "a"
xs.map fun x => g2 a y
#eval h2 true [1]
#eval h2 false [1]
|
record Foo where
constructor MkFoo
a : Nat
b : Nat
foo1 : Foo
foo1 = MkFoo
{ a = 1
, a = 2
, b = 3
}
foo2 : Foo
foo2 =
{ a := 3
, a := 4
, b := 2
, b := 1
} foo1
|
[STATEMENT]
lemma wprepare_loop_goon_Oc_nonempty[simp]: "wprepare_loop_goon m lm (b, Oc # list) \<Longrightarrow> b \<noteq> []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wprepare_loop_goon m lm (b, Oc # list) \<Longrightarrow> b \<noteq> []
[PROOF STEP]
apply(simp add: wprepare_loop_goon.simps
wprepare_loop_goon_in_middle.simps
wprepare_loop_goon_on_rightmost.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>rn. rev b @ Oc # list = Oc # Oc \<up> m @ Bk # Bk # <lm> @ Bk \<up> rn \<and> b \<noteq> [] \<and> (\<exists>mr. (\<exists>lm1. if lm1 = [] then Oc # list = Oc \<up> mr @ Bk \<up> rn else Oc # list = Oc \<up> mr @ Bk # <lm1> @ Bk \<up> rn) \<and> 0 < mr) \<Longrightarrow> b \<noteq> []
[PROOF STEP]
apply(auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
\documentclass[../../Instruction_PupilCapture]{subfiles}
% Hier müssen keine Packages geladen werden, es werden automatisch die von masterdoc geladen,
% sowie die Konfigurationen.
\graphicspath{{img/}{img/}}
\begin{document}
\chapter{Usage of Pupil Capture}
The following is a short instruction how use Pupil Capture in conjunction with the framework.
\begin{enumerate}
\item Select \textit{Unity Stream} as Capture Selection in the world window and activate the Unity Capture (Figure \ref{fig:screenshot001}).
\begin{figure}[htp]
\centering
\includegraphics[width=0.5\linewidth]{img/screenshot001}
\caption{Section \textit{Capture Selection} for use of \textit{Unity Stream}}
\label{fig:screenshot001}
\end{figure}
\begin{figure}[htp]
\centering
\includegraphics[width=0.5\linewidth]{screenshot003}
\caption{Selected \textit{Unity Stream} and click on \textit{Activate Unity Capture}}
\label{fig:screenshot003}
\end{figure}
\item Open an eye window by activating \textit{Detect Eye 0} or \textit{Detect Eye 1}. Use one for monocular eye tracking or two for binocular (Figure \ref{fig:screenshot002}). Check the \textit{detection \& mapping mode} is on \textit{2d}.
\begin{figure}[htp]
\centering
\includegraphics[width=0.5\linewidth]{screenshot002}
\caption{General settings with the one eye activated for monocular eye tracking}
\label{fig:screenshot002}
\end{figure}
\item Select the USB camera for eye tracking by selecting \textit{Local USB} as Capture Selection in the eye window. Afterwards select the camera in the \textit{Activate source} list (Figure \ref{fig:screenshot004}).
\begin{figure}[h!]
\centering
\includegraphics[width=0.5\linewidth]{screenshot004}
\caption{Activating \textit{Local USB} in on the eye window}
\label{fig:screenshot004}
\end{figure}\clearpage
\item Now set the mode to \textit{ROI} in the general settings of the eye window (Figure \ref{fig:screenshot005}). After this the test person look to the left, to the right, up and down. Decrease the size of the shown rectangle while the pupil has to be always inside of the rectangle. You can change the size of the rectangle by grabbing the circles in the corners. Figure \ref{roi} shows how the rectangle should be fitted around the eye.
\begin{figure}[h!]
\centering
\includegraphics[width=0.5\linewidth]{img/screenshot005}
\caption{Select Mode \textit{ROI}}
\label{fig:screenshot005}
\end{figure}
\begin{figure}[h!]
\centering
\includegraphics[width=0.5\linewidth]{ROI}
\caption{The rectangle fitted around the eye}
\label{roi}
\end{figure}\clearpage
\item Test which settings for \textit{Pupil intensity range}, \textit{Pupil min}, and \textit{Pupil max} result in the most accurate eye tracking by using the slider (Figure \ref{PupilSettings})
\begin{figure}[h!]
\centering
\includegraphics[scale=0.8]{PupilSettings.png}
\caption{The settings for the pupil detection}
\label{PupilSettings}
\end{figure}
\item For calibrating the eye tracking you have to switch over to the world window. Choose the right calibration (\textit{Manual Marker Calibration}) as shown in figure \ref{fig:screenshot006}.
\begin{figure}[h!]
\centering
\includegraphics[width=0.5\linewidth]{img/screenshot006}
\caption{Selection calibration method}
\label{fig:screenshot006}
\end{figure}
Then press the left button (\textit{X}) on the remote control and let the test person look around to place the appeared marker in the center of the screen.
\item Click the \textit{C} on the left hand side of the world window or use the \textit{c} key to start the calibration. Now a small circle will appear in the upper left hand corner (Figure \ref{Calibration}) and fill itself when the marker is detected and the subject looking at it.
\begin{figure}[h!]
\centering
\includegraphics[width=0.8\linewidth]{img/Calibration}
\caption{The circle indicating during the calibration}
\label{Calibration}
\end{figure}
\item Every time the circle is full, press the left button (\textit{X} on the remote control) again, to move the marker to the next position. After nine positions, click \textit{C} again to complete the calibration. You can press the right button (triangle) to make the marker disappear.
\item Check the accuracy and repeat the calibration if necessary.
\end{enumerate}
\end{document} |
(*
Title: Random_Permutations.thy
Author: Manuel Eberl, TU München
Random permutations and folding over them.
This provides the basic theory for the concept of doing something
in a random order, e.g. inserting elements from a fixed set into a
data structure in random order.
*)
section \<open>Random Permutations\<close>
theory Random_Permutations
imports
"~~/src/HOL/Probability/Probability_Mass_Function"
"HOL-Library.Multiset_Permutations"
begin
text \<open>
Choosing a set permutation (i.e. a distinct list with the same elements as the set)
uniformly at random is the same as first choosing the first element of the list
and then choosing the rest of the list as a permutation of the remaining set.
\<close>
lemma random_permutation_of_set:
assumes "finite A" "A \<noteq> {}"
shows "pmf_of_set (permutations_of_set A) =
do {
x \<leftarrow> pmf_of_set A;
xs \<leftarrow> pmf_of_set (permutations_of_set (A - {x}));
return_pmf (x#xs)
}" (is "?lhs = ?rhs")
proof -
from assms have "permutations_of_set A = (\<Union>x\<in>A. (#) x ` permutations_of_set (A - {x}))"
by (simp add: permutations_of_set_nonempty)
also from assms have "pmf_of_set \<dots> = ?rhs"
by (subst pmf_of_set_UN[where n = "fact (card A - 1)"])
(auto simp: card_image disjoint_family_on_def map_pmf_def [symmetric] map_pmf_of_set_inj)
finally show ?thesis .
qed
text \<open>
A generic fold function that takes a function, an initial state, and a set
and chooses a random order in which it then traverses the set in the same
fashion as a left fold over a list.
We first give a recursive definition.
\<close>
function fold_random_permutation :: "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a set \<Rightarrow> 'b pmf" where
"fold_random_permutation f x {} = return_pmf x"
| "\<not>finite A \<Longrightarrow> fold_random_permutation f x A = return_pmf x"
| "finite A \<Longrightarrow> A \<noteq> {} \<Longrightarrow>
fold_random_permutation f x A =
pmf_of_set A \<bind> (\<lambda>a. fold_random_permutation f (f a x) (A - {a}))"
by (force, simp_all)
termination proof (relation "Wellfounded.measure (\<lambda>(_,_,A). card A)")
fix A :: "'a set" and f :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" and x :: 'b and y :: 'a
assume A: "finite A" "A \<noteq> {}" "y \<in> set_pmf (pmf_of_set A)"
then have "card A > 0" by (simp add: card_gt_0_iff)
with A show "((f, f y x, A - {y}), f, x, A) \<in> Wellfounded.measure (\<lambda>(_, _, A). card A)"
by simp
qed simp_all
text \<open>
We can now show that the above recursive definition is equivalent to
choosing a random set permutation and folding over it (in any direction).
\<close>
lemma fold_random_permutation_foldl:
assumes "finite A"
shows "fold_random_permutation f x A =
map_pmf (foldl (\<lambda>x y. f y x) x) (pmf_of_set (permutations_of_set A))"
using assms
proof (induction f x A rule: fold_random_permutation.induct [case_names empty infinite remove])
case (remove A f x)
from remove
have "fold_random_permutation f x A =
pmf_of_set A \<bind> (\<lambda>a. fold_random_permutation f (f a x) (A - {a}))" by simp
also from remove
have "\<dots> = pmf_of_set A \<bind> (\<lambda>a. map_pmf (foldl (\<lambda>x y. f y x) x)
(map_pmf ((#) a) (pmf_of_set (permutations_of_set (A - {a})))))"
by (intro bind_pmf_cong) (simp_all add: pmf.map_comp o_def)
also from remove have "\<dots> = map_pmf (foldl (\<lambda>x y. f y x) x) (pmf_of_set (permutations_of_set A))"
by (simp_all add: random_permutation_of_set map_bind_pmf map_pmf_def [symmetric])
finally show ?case .
qed (simp_all add: pmf_of_set_singleton)
lemma fold_random_permutation_foldr:
assumes "finite A"
shows "fold_random_permutation f x A =
map_pmf (\<lambda>xs. foldr f xs x) (pmf_of_set (permutations_of_set A))"
proof -
have "fold_random_permutation f x A =
map_pmf (foldl (\<lambda>x y. f y x) x \<circ> rev) (pmf_of_set (permutations_of_set A))"
using assms by (subst fold_random_permutation_foldl [OF assms])
(simp_all add: pmf.map_comp [symmetric] map_pmf_of_set_inj)
also have "foldl (\<lambda>x y. f y x) x \<circ> rev = (\<lambda>xs. foldr f xs x)"
by (intro ext) (simp add: foldl_conv_foldr)
finally show ?thesis .
qed
lemma fold_random_permutation_fold:
assumes "finite A"
shows "fold_random_permutation f x A =
map_pmf (\<lambda>xs. fold f xs x) (pmf_of_set (permutations_of_set A))"
by (subst fold_random_permutation_foldl [OF assms], intro map_pmf_cong)
(simp_all add: foldl_conv_fold)
lemma fold_random_permutation_code [code]:
"fold_random_permutation f x (set xs) =
map_pmf (foldl (\<lambda>x y. f y x) x) (pmf_of_set (permutations_of_set (set xs)))"
by (simp add: fold_random_permutation_foldl)
text \<open>
We now introduce a slightly generalised version of the above fold
operation that does not simply return the result in the end, but applies
a monadic bind to it.
This may seem somewhat arbitrary, but it is a common use case, e.g.
in the Social Decision Scheme of Random Serial Dictatorship, where
voters narrow down a set of possible winners in a random order and
the winner is chosen from the remaining set uniformly at random.
\<close>
function fold_bind_random_permutation
:: "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'c pmf) \<Rightarrow> 'b \<Rightarrow> 'a set \<Rightarrow> 'c pmf" where
"fold_bind_random_permutation f g x {} = g x"
| "\<not>finite A \<Longrightarrow> fold_bind_random_permutation f g x A = g x"
| "finite A \<Longrightarrow> A \<noteq> {} \<Longrightarrow>
fold_bind_random_permutation f g x A =
pmf_of_set A \<bind> (\<lambda>a. fold_bind_random_permutation f g (f a x) (A - {a}))"
by (force, simp_all)
termination proof (relation "Wellfounded.measure (\<lambda>(_,_,_,A). card A)")
fix A :: "'a set" and f :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" and x :: 'b
and y :: 'a and g :: "'b \<Rightarrow> 'c pmf"
assume A: "finite A" "A \<noteq> {}" "y \<in> set_pmf (pmf_of_set A)"
then have "card A > 0" by (simp add: card_gt_0_iff)
with A show "((f, g, f y x, A - {y}), f, g, x, A) \<in> Wellfounded.measure (\<lambda>(_, _, _, A). card A)"
by simp
qed simp_all
text \<open>
We now show that the recursive definition is equivalent to
a random fold followed by a monadic bind.
\<close>
lemma fold_bind_random_permutation_altdef [code]:
"fold_bind_random_permutation f g x A = fold_random_permutation f x A \<bind> g"
proof (induction f x A rule: fold_random_permutation.induct [case_names empty infinite remove])
case (remove A f x)
from remove have "pmf_of_set A \<bind> (\<lambda>a. fold_bind_random_permutation f g (f a x) (A - {a})) =
pmf_of_set A \<bind> (\<lambda>a. fold_random_permutation f (f a x) (A - {a}) \<bind> g)"
by (intro bind_pmf_cong) simp_all
with remove show ?case by (simp add: bind_return_pmf bind_assoc_pmf)
qed (simp_all add: bind_return_pmf)
text \<open>
We can now derive the following nice monadic representations of the
combined fold-and-bind:
\<close>
lemma fold_bind_random_permutation_foldl:
assumes "finite A"
shows "fold_bind_random_permutation f g x A =
do {xs \<leftarrow> pmf_of_set (permutations_of_set A); g (foldl (\<lambda>x y. f y x) x xs)}"
using assms by (simp add: fold_bind_random_permutation_altdef bind_assoc_pmf
fold_random_permutation_foldl bind_return_pmf map_pmf_def)
lemma fold_bind_random_permutation_foldr:
assumes "finite A"
shows "fold_bind_random_permutation f g x A =
do {xs \<leftarrow> pmf_of_set (permutations_of_set A); g (foldr f xs x)}"
using assms by (simp add: fold_bind_random_permutation_altdef bind_assoc_pmf
fold_random_permutation_foldr bind_return_pmf map_pmf_def)
lemma fold_bind_random_permutation_fold:
assumes "finite A"
shows "fold_bind_random_permutation f g x A =
do {xs \<leftarrow> pmf_of_set (permutations_of_set A); g (fold f xs x)}"
using assms by (simp add: fold_bind_random_permutation_altdef bind_assoc_pmf
fold_random_permutation_fold bind_return_pmf map_pmf_def)
text \<open>
The following useful lemma allows us to swap partitioning a set w.\,r.\,t.\ a
predicate and drawing a random permutation of that set.
\<close>
lemma partition_random_permutations:
assumes "finite A"
shows "map_pmf (partition P) (pmf_of_set (permutations_of_set A)) =
pair_pmf (pmf_of_set (permutations_of_set {x\<in>A. P x}))
(pmf_of_set (permutations_of_set {x\<in>A. \<not>P x}))" (is "?lhs = ?rhs")
proof (rule pmf_eqI, clarify, goal_cases)
case (1 xs ys)
show ?case
proof (cases "xs \<in> permutations_of_set {x\<in>A. P x} \<and> ys \<in> permutations_of_set {x\<in>A. \<not>P x}")
case True
let ?n1 = "card {x\<in>A. P x}" and ?n2 = "card {x\<in>A. \<not>P x}"
have card_eq: "card A = ?n1 + ?n2"
proof -
have "?n1 + ?n2 = card ({x\<in>A. P x} \<union> {x\<in>A. \<not>P x})"
using assms by (intro card_Un_disjoint [symmetric]) auto
also have "{x\<in>A. P x} \<union> {x\<in>A. \<not>P x} = A" by blast
finally show ?thesis ..
qed
from True have lengths [simp]: "length xs = ?n1" "length ys = ?n2"
by (auto intro!: length_finite_permutations_of_set)
have "pmf ?lhs (xs, ys) =
real (card (permutations_of_set A \<inter> partition P -` {(xs, ys)})) / fact (card A)"
using assms by (auto simp: pmf_map measure_pmf_of_set)
also have "partition P -` {(xs, ys)} = shuffles xs ys"
using True by (intro inv_image_partition) (auto simp: permutations_of_set_def)
also have "permutations_of_set A \<inter> shuffles xs ys = shuffles xs ys"
using True distinct_disjoint_shuffles[of xs ys]
by (auto simp: permutations_of_set_def dest: set_shuffles)
also have "card (shuffles xs ys) = length xs + length ys choose length xs"
using True by (intro card_disjoint_shuffles) (auto simp: permutations_of_set_def)
also have "length xs + length ys = card A" by (simp add: card_eq)
also have "real (card A choose length xs) = fact (card A) / (fact ?n1 * fact (card A - ?n1))"
by (subst binomial_fact) (auto intro!: card_mono assms)
also have "\<dots> / fact (card A) = 1 / (fact ?n1 * fact ?n2)"
by (simp add: field_split_simps card_eq)
also have "\<dots> = pmf ?rhs (xs, ys)" using True assms by (simp add: pmf_pair)
finally show ?thesis .
next
case False
hence *: "xs \<notin> permutations_of_set {x\<in>A. P x} \<or> ys \<notin> permutations_of_set {x\<in>A. \<not>P x}" by blast
hence eq: "permutations_of_set A \<inter> (partition P -` {(xs, ys)}) = {}"
by (auto simp: o_def permutations_of_set_def)
from * show ?thesis
by (elim disjE) (insert assms eq, simp_all add: pmf_pair pmf_map measure_pmf_of_set)
qed
qed
end
|
<ompts:test>
<ompts:testdescription>Test which checks the omp barrier directive. The test creates several threads and sends one of them sleeping before setting a flag. After the barrier the other ones do some littel work depending on the flag.</ ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp barrier</ompts:directive>
<ompts:testcode>
SUBROUTINE do_some_work3()
REAL i
INTRINSIC sqrt
DOUBLE PRECISION sum
INCLUDE "omp_testsuite.f"
sum = 0.0
DO WHILE (i < LOOPCOUNT-1)
sum = sum + sqrt(i)
i = i + 1
END DO
END
INTEGER FUNCTION <ompts:testcode:functionname>omp_barrier</ompts:testcode:functionname>()
IMPLICIT NONE
INTEGER sleeptime
INTEGER omp_get_thread_num
INTEGER result1, result2, rank
result1 = 0
result2 = 0
sleeptime = 1
!$omp parallel private(rank)
rank = omp_get_thread_num()
! PRINT *, "rank", rank
IF ( rank .EQ. 1 ) THEN
CALL sleep(sleeptime)
result2 = 3
END IF
<ompts:orphan>
<ompts:check>
!$omp barrier
</ompts:check>
</ompts:orphan>
IF ( rank .EQ. 0 ) THEN
result1 = result2
END IF
!$omp end parallel
IF ( result1 .EQ. 3 ) THEN
<testfunctionname></testfunctionname> = 1
ELSE
<testfunctionname></testfunctionname> = 0
END IF
END
</ompts:testcode>
</ompts:test>
|
(** This example demonstrates and approach to handling delimited information release in SeLoC.
It is based on the example from "A Separation Logic for Enforcing DeclarativeInformation Flow Control Policies" by David Costanzo and Zhong Shao.
*)
From iris.base_logic Require Import invariants.
From iris_ni.logrel Require Import types.
From iris_ni.program_logic Require Import dwp heap_lang_lifting.
From iris.proofmode Require Import proofmode.
From iris.heap_lang Require Import lang proofmode.
From iris_ni.proofmode Require Import dwp_tactics.
From iris_ni.logrel Require Import interp.
From iris_ni.examples Require Import lock.
(** The idea behind this example, is that a person A. has a calendar,
(which we model as a linked list), and each day is marked eiter with 0
(meaining that the day is free, and there are no meetings) or with a
positive integer t (meatning that there is a meeting at time t).
If someone wants to set up their appointment with A., they have to see
which days are they available. In order to do so, A. must be able to
disclose the days on which they are free, without disclosing any other
information (e.g. times of other meetings).
The program that discloses the avilablility of A. is the
`available_dates` below.
We show non-interference with a form of delimited control.
Specifically, we show that if an attacker already knows the days on
which A. is available, then the attacker cannot learn any further
information.
*)
(* Iterate a function `f` over the linked list `hd`
`f` is called with `f i x` where `x` is `i`-th element in the list.
*)
Definition iter_loop : val :=
rec: "loop" "i" "hd" "f" :=
match: "hd" with
NONE => #()
| SOME "l" =>
let: "tmp1" := Fst !"l" in
let: "tmp2" := Snd !"l" in
"f" "i" "tmp1";;
"loop" ("i"+#1) "tmp2" "f"
end.
Definition iter : val := λ: "hd" "f", iter_loop #0 "hd" "f".
Definition available_dates (out : loc) : val := λ: "cal",
iter "cal" (λ: "i" "x", if: "x" = #0 then #out <- "i" else Skip).
(* Meta-function (macro) : create a HeapLang linked list out of a Coq list *)
Fixpoint make_llist (xs : list val) : expr :=
match xs with
| [] => NONE
| (x::xs) =>
SOME (ref (x, make_llist xs))
end.
Section calendar.
Context `{!heapDG Σ}.
(* Predicate describing that `hd1` and `hd2` point to lists `xs1` and `xs2`, respectively *)
Fixpoint is_list (hd1 : val) (hd2 : val) (xs1 xs2 : list val) : iProp Σ :=
match xs1,xs2 with
| [],[] => ⌜hd1 = NONEV⌝ ∗ ⌜hd2 = NONEV⌝
| x1 :: xs1, x2 :: xs2 => ∃ (l1:loc) (l2 :loc) hd1' hd2',
⌜hd1 = SOMEV #l1⌝ ∗ ⌜hd2 = SOMEV #l2⌝ ∗ l1 ↦ₗ (x1,hd1') ∗ l2 ↦ᵣ (x2,hd2')
∗ is_list hd1' hd2' xs1 xs2
| _, _ => False
end%I.
Lemma make_llist_spec (xs1 xs2 : list val) :
length xs1 = length xs2 →
⊢ DWP make_llist xs1 & make_llist xs2 : λ hd1 hd2, is_list hd1 hd2 xs1 xs2.
Proof using Type.
revert xs2. induction xs1 as [|x1 xs1]=>xs2; simpl.
- intros Hxs2. symmetry in Hxs2. eapply nil_length_inv in Hxs2.
simplify_eq/=. dwp_pures.
iApply dwp_value. eauto.
- destruct xs2 as [|x2 xs2]; simpl.
{ inversion 1. }
intros Hlen. simplify_eq/=.
dwp_bind (make_llist xs1) (make_llist xs2).
iApply (dwp_wand with "[]").
{ by iApply IHxs1. }
iIntros (t1 t2) "Ht". dwp_pures.
dwp_bind (ref _)%E (ref _)%E.
iApply dwp_alloc. iIntros (hd1 hd2) "Hhd1 Hhd2". iNext.
dwp_pures. iApply dwp_value. iModIntro.
iExists _,_,_,_. eauto with iFrame.
Qed.
(* Specifications for the iter function *)
Lemma iter_loop_spec P (i : Z) (f1 f2 :val) hd1 hd2 xs1 xs2 ξ :
Forall2 P xs1 xs2 →
is_list hd1 hd2 xs1 xs2 -∗
□(∀ x1 x2 (i : Z), ⌜P x1 x2⌝ → DWP (App f1 #i x1) & (App f2 #i x2) : ⟦ tunit ⟧ ξ) -∗
DWP iter_loop #i hd1 f1
& iter_loop #i hd2 f2 : ⟦ tunit ⟧ ξ.
Proof using Type.
iIntros (Hxs) "Hlst #Hf". iRevert (Hxs).
iLöb as "IH" forall (i hd1 hd2 xs1 xs2). iIntros (Hxs).
dwp_rec. dwp_pures.
destruct xs1 as [|x1 xs1], xs2 as [|x2 xs2]; iSimpl in "Hlst"; try by iExFalso.
- iDestruct "Hlst" as "[-> ->]". dwp_pures. iApply logrel_unit.
- iDestruct "Hlst" as (l1 l2 hd1' hd2' -> ->) "(Hl1 & Hl2 & Hlst)".
apply Forall2_cons_1 in Hxs. destruct Hxs.
dwp_pures. dwp_bind (! _)%E (! _)%E. iApply (dwp_load with "Hl1 Hl2"). iIntros "Hl1 Hl2". iNext.
dwp_pures. dwp_bind (! _)%E (! _)%E. iApply (dwp_load with "Hl1 Hl2"). iIntros "Hl1 Hl2". iNext.
dwp_pures. dwp_bind (f1 #i x1) (f2 #i x2). iApply (dwp_wand with "[Hf]").
{ iApply "Hf". iPureIntro. naive_solver. }
iIntros (??) "_". dwp_pures.
iApply ("IH" $! (i+1)%Z with "Hlst [%//]").
Qed.
Lemma iter_spec P (f1 f2 :val) hd1 hd2 xs1 xs2 ξ :
Forall2 P xs1 xs2 →
is_list hd1 hd2 xs1 xs2 -∗
□(∀ x1 x2 (i : Z), ⌜P x1 x2⌝ → DWP (App f1 #i x1) & (App f2 #i x2) : ⟦ tunit ⟧ ξ) -∗
DWP iter hd1 f1 & iter hd2 f2 : ⟦ tunit ⟧ ξ. (* TODO: return is_list *)
Proof using Type.
iIntros (Hxs) "Hlst #Hf". dwp_rec. dwp_pures.
iApply (iter_loop_spec with "Hlst Hf"); eauto.
Qed.
Lemma available_dates_spec (out : loc) hd1 hd2 xs1 xs2 :
Forall2 (λ x1 x2, x1 = #0 ↔ x2 = #0) xs1 xs2 →
⟦ tref (tint Low) ⟧ Low #out #out -∗
is_list hd1 hd2 xs1 xs2 -∗
DWP available_dates out hd1 & available_dates out hd2 : ⟦ tunit ⟧ Low.
Proof using Type.
iIntros (HP) "#Hout Hlst". dwp_rec. dwp_pures.
iApply (iter_spec with "Hlst [-]"); eauto.
iModIntro. iIntros (x1 x2 i Hxs).
dwp_pures.
case_bool_decide; case_bool_decide; try by (exfalso; naive_solver).
- dwp_pures.
iApply logrel_store ; eauto; iApply dwp_value; eauto.
iModIntro. iExists _,_. eauto.
- dwp_pures. iApply logrel_unit.
Qed.
End calendar.
From iris_ni Require Import program_logic.dwp_adequacy.
(* Adequacy statement *)
(* xs1 xs2 are calendars *)
Lemma available_dates_secure (out : loc) (xs1 xs2 : list val) σ1 σ2 (n : Z):
Forall2 (λ x1 x2, x1 = #0 ↔ x2 = #0) xs1 xs2 →
σ1.(heap) !! out = Some (Some #n) →
σ2.(heap) !! out = Some (Some #n) →
dwp_adequacy.R heapDΣ {[out]}
([let: "hd1" := make_llist xs1 in available_dates out "hd1"]%E,σ1)
([let: "hd2" := make_llist xs2 in available_dates out "hd2"]%E,σ2).
Proof using Type.
intros Hxs Hσ1 Hσ2.
apply tc_once. simpl.
eapply dwp_lift_bisim_singleton; eauto.
iIntros (HG) "Ho".
dwp_bind (make_llist xs1) (make_llist xs2).
iApply (dwp_wand with "[]").
{ iApply make_llist_spec. by eapply Forall2_length. }
iIntros (hd1 hd2) "Hhd". dwp_pures.
iApply (dwp_wand with "[-]").
{ by iApply (available_dates_spec with "Ho Hhd"). }
iIntros (v1 v2). iDestruct 1 as "[-> ->]".
eauto.
Qed.
|
open import Common.Prelude
_test_test_ : Nat → Nat → Nat → Nat
m test_test n = λ i → m + i + n
|
!! Demonstrate Fortran calling C.
!!
!! Normally BIND(C) should be used after the function name in the interface block.
!!
!! There are various de facto compiler implementations, using no, one, or double underscores pre- or post-pended
!! to the C function name.
!! New code should use `BIND (C)` after the procedure name instead!
program FortranCallC
use, intrinsic :: iso_c_binding, only: dp=>c_double, c_int
implicit none
interface
subroutine timestwo(x, x2, N) bind (c)
!! bind (c) is omitted for legacy code; requires manually adding underscore to C function name
import
integer(c_int), value :: N
real(dp) :: x(N), x2(N)
end subroutine timestwo
end interface
integer(c_int) :: N, i
real(dp), allocatable :: x(:), x2(:)
N = 3
allocate(x(N), x2(N))
do i=1,N
x(i) = i
enddo
print '(A,100F7.3)','in: ',x
call timestwo(x, x2, N)
print '(A,100F7.3)','out: ',x2
end program
|
total twoPlusTwoNotFive : 2 + 2 = 5 -> Void
twoPlusTwoNotFive Refl impossible
partial loop : Void
loop = loop
|
module Internal.CTimespec
-- Local Variables:
-- idris-load-packages: ("contrib")
-- End:
import CFFI.Types
import CFFI.Memory
import Data.Bits
%include c "time.h"
-- struct tm {
-- int tm_sec; /* seconds, range 0 to 59 */
-- int tm_min; /* minutes, range 0 to 59 */
-- int tm_hour; /* hours, range 0 to 23 */
-- int tm_mday; /* day of the month, range 1 to 31 */
-- int tm_mon; /* month, range 0 to 11 */
-- int tm_year; /* The number of years since 1900 */
-- int tm_wday; /* day of the week, range 0 to 6 */
-- int tm_yday; /* day in the year, range 0 to 365 */
-- int tm_isdst; /* daylight saving time */
-- };
tm_spec : Composite
tm_spec = STRUCT [
mkComposite I8, --sec
mkComposite I8, --min
mkComposite I8, --hour
mkComposite I8, --mday
mkComposite I8, --mon
mkComposite I16, --year
mkComposite I8, --wday
mkComposite I8, --yday
mkComposite I8 -- dst
]
-- struct timespec {
-- time_t tv_sec; /* seconds */
-- long tv_nsec; /* nanoseconds */
-- };
time_spec : Composite
time_spec = STRUCT [
mkComposite I64, --
mkComposite I64 --min
]
public export
record CTimeSpec where
constructor MkCTimeSpec
sec : Integer
nsec : Integer
export
Show CTimeSpec where
show (MkCTimeSpec sec nsec) = show sec ++ "," ++ show nsec
get_time : Int -> Ptr -> IO Int
get_time clockId ptr = foreign FFI_C "clock_gettime" (Int -> Ptr -> IO Int) clockId ptr
ClockRealTime : IO Int
ClockRealTime = foreign FFI_C "#CLOCK_REALTIME" (IO Int)
export
clock_gettime : IO CTimeSpec
clock_gettime = do
mem <- alloc time_spec
result <- foreign FFI_C "clock_gettime" (Int -> Ptr -> IO Int) !ClockRealTime mem
let (CPt x1 y) = field time_spec 0 mem
let (CPt x2 y) = field time_spec 1 mem
pure $ MkCTimeSpec (prim__zextB64_BigInt !(peek I64 x1)) (prim__zextB64_BigInt !(peek I64 x1))
export
clock_getres : IO CTimeSpec
clock_getres = do
mem <- alloc time_spec
result <- foreign FFI_C "clock_getres" (Int -> Ptr -> IO Int) !ClockRealTime mem
let (CPt x1 y) = field time_spec 0 mem
let (CPt x2 y) = field time_spec 1 mem
pure $ MkCTimeSpec (prim__zextB64_BigInt !(peek I64 x1)) (prim__zextB64_BigInt !(peek I64 x1))
|
Require Export euclidean__axioms.
Require Export euclidean__defs.
Require Export euclidean__tactics.
Require Export lemma__8__2.
Require Export lemma__8__7.
Require Export lemma__NCdistinct.
Require Export lemma__collinearorder.
Require Export lemma__collinearright.
Require Export lemma__inequalitysymmetric.
Require Export lemma__rightangleNC.
Require Export logic.
Definition lemma__rectangleparallelogram : forall A B C D, (euclidean__defs.RE A B C D) -> (euclidean__defs.PG A B C D).
Proof.
intro A.
intro B.
intro C.
intro D.
intro H.
assert (* Cut *) ((euclidean__defs.Per D A B) /\ ((euclidean__defs.Per A B C) /\ ((euclidean__defs.Per B C D) /\ ((euclidean__defs.Per C D A) /\ (euclidean__defs.CR A C B D))))) as H0.
- assert ((euclidean__defs.Per D A B) /\ ((euclidean__defs.Per A B C) /\ ((euclidean__defs.Per B C D) /\ ((euclidean__defs.Per C D A) /\ (euclidean__defs.CR A C B D))))) as H0 by exact H.
assert ((euclidean__defs.Per D A B) /\ ((euclidean__defs.Per A B C) /\ ((euclidean__defs.Per B C D) /\ ((euclidean__defs.Per C D A) /\ (euclidean__defs.CR A C B D))))) as __TmpHyp by exact H0.
destruct __TmpHyp as [H1 H2].
destruct H2 as [H3 H4].
destruct H4 as [H5 H6].
destruct H6 as [H7 H8].
split.
-- exact H1.
-- split.
--- exact H3.
--- split.
---- exact H5.
---- split.
----- exact H7.
----- exact H8.
- assert (* Cut *) (euclidean__axioms.nCol D A B) as H1.
-- destruct H0 as [H1 H2].
destruct H2 as [H3 H4].
destruct H4 as [H5 H6].
destruct H6 as [H7 H8].
apply (@lemma__rightangleNC.lemma__rightangleNC D A B H1).
-- assert (* Cut *) (euclidean__axioms.nCol A B C) as H2.
--- destruct H0 as [H2 H3].
destruct H3 as [H4 H5].
destruct H5 as [H6 H7].
destruct H7 as [H8 H9].
apply (@lemma__rightangleNC.lemma__rightangleNC A B C H4).
--- assert (* Cut *) (euclidean__axioms.nCol C D A) as H3.
---- destruct H0 as [H3 H4].
destruct H4 as [H5 H6].
destruct H6 as [H7 H8].
destruct H8 as [H9 H10].
apply (@lemma__rightangleNC.lemma__rightangleNC C D A H9).
---- assert (* Cut *) (exists M, (euclidean__axioms.BetS A M C) /\ (euclidean__axioms.BetS B M D)) as H4.
----- destruct H0 as [H4 H5].
destruct H5 as [H6 H7].
destruct H7 as [H8 H9].
destruct H9 as [H10 H11].
exact H11.
----- destruct H4 as [M H5].
destruct H5 as [H6 H7].
destruct H0 as [H8 H9].
destruct H9 as [H10 H11].
destruct H11 as [H12 H13].
destruct H13 as [H14 H15].
assert (* Cut *) (~(euclidean__defs.Meet A B C D)) as H16.
------ intro H16.
assert (exists P, (euclidean__axioms.neq A B) /\ ((euclidean__axioms.neq C D) /\ ((euclidean__axioms.Col A B P) /\ (euclidean__axioms.Col C D P)))) as H17 by exact H16.
destruct H17 as [P H18].
destruct H18 as [H19 H20].
destruct H20 as [H21 H22].
destruct H22 as [H23 H24].
assert (* Cut *) (~(A = P)) as H25.
------- intro H25.
assert (* Cut *) (euclidean__axioms.Col C D A) as H26.
-------- apply (@eq__ind__r euclidean__axioms.Point P (fun A0 => (euclidean__defs.RE A0 B C D) -> ((euclidean__defs.Per D A0 B) -> ((euclidean__defs.Per A0 B C) -> ((euclidean__defs.Per C D A0) -> ((euclidean__defs.CR A0 C B D) -> ((euclidean__axioms.nCol D A0 B) -> ((euclidean__axioms.nCol A0 B C) -> ((euclidean__axioms.nCol C D A0) -> ((euclidean__axioms.BetS A0 M C) -> ((euclidean__defs.Meet A0 B C D) -> ((euclidean__axioms.neq A0 B) -> ((euclidean__axioms.Col A0 B P) -> (euclidean__axioms.Col C D A0)))))))))))))) with (x := A).
---------intro H26.
intro H27.
intro H28.
intro H29.
intro H30.
intro H31.
intro H32.
intro H33.
intro H34.
intro H35.
intro H36.
intro H37.
exact H24.
--------- exact H25.
--------- exact H.
--------- exact H8.
--------- exact H10.
--------- exact H14.
--------- exact H15.
--------- exact H1.
--------- exact H2.
--------- exact H3.
--------- exact H6.
--------- exact H16.
--------- exact H19.
--------- exact H23.
-------- apply (@euclidean__tactics.Col__nCol__False C D A H3 H26).
------- assert (* Cut *) (~(D = P)) as H26.
-------- intro H26.
assert (* Cut *) (euclidean__axioms.Col A B D) as H27.
--------- apply (@eq__ind__r euclidean__axioms.Point P (fun D0 => (euclidean__defs.RE A B C D0) -> ((euclidean__defs.Per D0 A B) -> ((euclidean__defs.Per B C D0) -> ((euclidean__defs.Per C D0 A) -> ((euclidean__defs.CR A C B D0) -> ((euclidean__axioms.nCol D0 A B) -> ((euclidean__axioms.nCol C D0 A) -> ((euclidean__axioms.BetS B M D0) -> ((euclidean__defs.Meet A B C D0) -> ((euclidean__axioms.neq C D0) -> ((euclidean__axioms.Col C D0 P) -> (euclidean__axioms.Col A B D0))))))))))))) with (x := D).
----------intro H27.
intro H28.
intro H29.
intro H30.
intro H31.
intro H32.
intro H33.
intro H34.
intro H35.
intro H36.
intro H37.
exact H23.
---------- exact H26.
---------- exact H.
---------- exact H8.
---------- exact H12.
---------- exact H14.
---------- exact H15.
---------- exact H1.
---------- exact H3.
---------- exact H7.
---------- exact H16.
---------- exact H21.
---------- exact H24.
--------- assert (* Cut *) (euclidean__axioms.Col D A B) as H28.
---------- assert (* Cut *) ((euclidean__axioms.Col B A D) /\ ((euclidean__axioms.Col B D A) /\ ((euclidean__axioms.Col D A B) /\ ((euclidean__axioms.Col A D B) /\ (euclidean__axioms.Col D B A))))) as H28.
----------- apply (@lemma__collinearorder.lemma__collinearorder A B D H27).
----------- destruct H28 as [H29 H30].
destruct H30 as [H31 H32].
destruct H32 as [H33 H34].
destruct H34 as [H35 H36].
exact H33.
---------- apply (@euclidean__tactics.Col__nCol__False C D A H3).
-----------apply (@euclidean__tactics.not__nCol__Col C D A).
------------intro H29.
apply (@euclidean__tactics.Col__nCol__False D A B H1 H28).
-------- assert (* Cut *) (euclidean__defs.Per B A D) as H27.
--------- apply (@lemma__8__2.lemma__8__2 D A B H8).
--------- assert (* Cut *) (euclidean__axioms.Col B A P) as H28.
---------- assert (* Cut *) ((euclidean__axioms.Col B A P) /\ ((euclidean__axioms.Col B P A) /\ ((euclidean__axioms.Col P A B) /\ ((euclidean__axioms.Col A P B) /\ (euclidean__axioms.Col P B A))))) as H28.
----------- apply (@lemma__collinearorder.lemma__collinearorder A B P H23).
----------- destruct H28 as [H29 H30].
destruct H30 as [H31 H32].
destruct H32 as [H33 H34].
destruct H34 as [H35 H36].
exact H29.
---------- assert (* Cut *) (euclidean__axioms.neq P A) as H29.
----------- apply (@lemma__inequalitysymmetric.lemma__inequalitysymmetric A P H25).
----------- assert (* Cut *) (euclidean__defs.Per P A D) as H30.
------------ apply (@lemma__collinearright.lemma__collinearright B A P D H27 H28 H29).
------------ assert (* Cut *) (euclidean__axioms.neq P D) as H31.
------------- apply (@lemma__inequalitysymmetric.lemma__inequalitysymmetric D P H26).
------------- assert (* Cut *) (euclidean__defs.Per P D A) as H32.
-------------- apply (@lemma__collinearright.lemma__collinearright C D P A H14 H24 H31).
-------------- assert (* Cut *) (euclidean__defs.Per A D P) as H33.
--------------- apply (@lemma__8__2.lemma__8__2 P D A H32).
--------------- assert (* Cut *) (~(euclidean__defs.Per P A D)) as H34.
---------------- apply (@lemma__8__7.lemma__8__7 P D A H33).
---------------- apply (@H34 H30).
------ assert (* Cut *) (euclidean__axioms.neq A B) as H17.
------- assert (* Cut *) ((euclidean__axioms.neq A B) /\ ((euclidean__axioms.neq B C) /\ ((euclidean__axioms.neq A C) /\ ((euclidean__axioms.neq B A) /\ ((euclidean__axioms.neq C B) /\ (euclidean__axioms.neq C A)))))) as H17.
-------- apply (@lemma__NCdistinct.lemma__NCdistinct A B C H2).
-------- destruct H17 as [H18 H19].
destruct H19 as [H20 H21].
destruct H21 as [H22 H23].
destruct H23 as [H24 H25].
destruct H25 as [H26 H27].
exact H18.
------- assert (* Cut *) (euclidean__axioms.neq C D) as H18.
-------- assert (* Cut *) ((euclidean__axioms.neq C D) /\ ((euclidean__axioms.neq D A) /\ ((euclidean__axioms.neq C A) /\ ((euclidean__axioms.neq D C) /\ ((euclidean__axioms.neq A D) /\ (euclidean__axioms.neq A C)))))) as H18.
--------- apply (@lemma__NCdistinct.lemma__NCdistinct C D A H3).
--------- destruct H18 as [H19 H20].
destruct H20 as [H21 H22].
destruct H22 as [H23 H24].
destruct H24 as [H25 H26].
destruct H26 as [H27 H28].
exact H19.
-------- assert (* Cut *) (euclidean__axioms.neq D C) as H19.
--------- assert (* Cut *) ((euclidean__axioms.neq C D) /\ ((euclidean__axioms.neq D A) /\ ((euclidean__axioms.neq C A) /\ ((euclidean__axioms.neq D C) /\ ((euclidean__axioms.neq A D) /\ (euclidean__axioms.neq A C)))))) as H19.
---------- apply (@lemma__NCdistinct.lemma__NCdistinct C D A H3).
---------- destruct H19 as [H20 H21].
destruct H21 as [H22 H23].
destruct H23 as [H24 H25].
destruct H25 as [H26 H27].
destruct H27 as [H28 H29].
exact H26.
--------- assert (* Cut *) (A = A) as H20.
---------- apply (@logic.eq__refl Point A).
---------- assert (* Cut *) (euclidean__axioms.Col A B A) as H21.
----------- right.
left.
exact H20.
----------- assert (* Cut *) (B = B) as H22.
------------ apply (@logic.eq__refl Point B).
------------ assert (* Cut *) (euclidean__axioms.Col A B B) as H23.
------------- right.
right.
left.
exact H22.
------------- assert (* Cut *) (C = C) as H24.
-------------- apply (@logic.eq__refl Point C).
-------------- assert (* Cut *) (euclidean__axioms.Col C D C) as H25.
--------------- right.
left.
exact H24.
--------------- assert (* Cut *) (D = D) as H26.
---------------- apply (@logic.eq__refl Point D).
---------------- assert (* Cut *) (euclidean__axioms.Col C D D) as H27.
----------------- right.
right.
left.
exact H26.
----------------- assert (* Cut *) (euclidean__axioms.BetS D M B) as H28.
------------------ apply (@euclidean__axioms.axiom__betweennesssymmetry B M D H7).
------------------ assert (* Cut *) (euclidean__defs.Par A B C D) as H29.
------------------- exists A.
exists B.
exists D.
exists C.
exists M.
split.
-------------------- exact H17.
-------------------- split.
--------------------- exact H18.
--------------------- split.
---------------------- exact H21.
---------------------- split.
----------------------- exact H23.
----------------------- split.
------------------------ exact H17.
------------------------ split.
------------------------- exact H27.
------------------------- split.
-------------------------- exact H25.
-------------------------- split.
--------------------------- exact H19.
--------------------------- split.
---------------------------- exact H16.
---------------------------- split.
----------------------------- exact H6.
----------------------------- exact H28.
------------------- assert (* Cut *) (~(euclidean__defs.Meet A D B C)) as H30.
-------------------- intro H30.
assert (exists P, (euclidean__axioms.neq A D) /\ ((euclidean__axioms.neq B C) /\ ((euclidean__axioms.Col A D P) /\ (euclidean__axioms.Col B C P)))) as H31 by exact H30.
destruct H31 as [P H32].
destruct H32 as [H33 H34].
destruct H34 as [H35 H36].
destruct H36 as [H37 H38].
assert (* Cut *) (~(A = P)) as H39.
--------------------- intro H39.
assert (* Cut *) (euclidean__axioms.Col B C A) as H40.
---------------------- apply (@eq__ind__r euclidean__axioms.Point P (fun A0 => (euclidean__defs.RE A0 B C D) -> ((euclidean__defs.Per D A0 B) -> ((euclidean__defs.Per A0 B C) -> ((euclidean__defs.Per C D A0) -> ((euclidean__defs.CR A0 C B D) -> ((euclidean__axioms.nCol D A0 B) -> ((euclidean__axioms.nCol A0 B C) -> ((euclidean__axioms.nCol C D A0) -> ((euclidean__axioms.BetS A0 M C) -> ((~(euclidean__defs.Meet A0 B C D)) -> ((euclidean__axioms.neq A0 B) -> ((A0 = A0) -> ((euclidean__axioms.Col A0 B A0) -> ((euclidean__axioms.Col A0 B B) -> ((euclidean__defs.Par A0 B C D) -> ((euclidean__defs.Meet A0 D B C) -> ((euclidean__axioms.neq A0 D) -> ((euclidean__axioms.Col A0 D P) -> (euclidean__axioms.Col B C A0)))))))))))))))))))) with (x := A).
-----------------------intro H40.
intro H41.
intro H42.
intro H43.
intro H44.
intro H45.
intro H46.
intro H47.
intro H48.
intro H49.
intro H50.
intro H51.
intro H52.
intro H53.
intro H54.
intro H55.
intro H56.
intro H57.
exact H38.
----------------------- exact H39.
----------------------- exact H.
----------------------- exact H8.
----------------------- exact H10.
----------------------- exact H14.
----------------------- exact H15.
----------------------- exact H1.
----------------------- exact H2.
----------------------- exact H3.
----------------------- exact H6.
----------------------- exact H16.
----------------------- exact H17.
----------------------- exact H20.
----------------------- exact H21.
----------------------- exact H23.
----------------------- exact H29.
----------------------- exact H30.
----------------------- exact H33.
----------------------- exact H37.
---------------------- assert (* Cut *) (euclidean__axioms.Col A B C) as H41.
----------------------- assert (* Cut *) ((euclidean__axioms.Col C B A) /\ ((euclidean__axioms.Col C A B) /\ ((euclidean__axioms.Col A B C) /\ ((euclidean__axioms.Col B A C) /\ (euclidean__axioms.Col A C B))))) as H41.
------------------------ apply (@lemma__collinearorder.lemma__collinearorder B C A H40).
------------------------ destruct H41 as [H42 H43].
destruct H43 as [H44 H45].
destruct H45 as [H46 H47].
destruct H47 as [H48 H49].
exact H46.
----------------------- apply (@euclidean__tactics.Col__nCol__False C D A H3).
------------------------apply (@euclidean__tactics.not__nCol__Col C D A).
-------------------------intro H42.
apply (@euclidean__tactics.Col__nCol__False A B C H2 H41).
--------------------- assert (* Cut *) (~(B = P)) as H40.
---------------------- intro H40.
assert (* Cut *) (euclidean__axioms.Col A D B) as H41.
----------------------- apply (@eq__ind__r euclidean__axioms.Point P (fun B0 => (euclidean__defs.RE A B0 C D) -> ((euclidean__defs.Per D A B0) -> ((euclidean__defs.Per A B0 C) -> ((euclidean__defs.Per B0 C D) -> ((euclidean__defs.CR A C B0 D) -> ((euclidean__axioms.nCol D A B0) -> ((euclidean__axioms.nCol A B0 C) -> ((euclidean__axioms.BetS B0 M D) -> ((~(euclidean__defs.Meet A B0 C D)) -> ((euclidean__axioms.neq A B0) -> ((euclidean__axioms.Col A B0 A) -> ((B0 = B0) -> ((euclidean__axioms.Col A B0 B0) -> ((euclidean__axioms.BetS D M B0) -> ((euclidean__defs.Par A B0 C D) -> ((euclidean__defs.Meet A D B0 C) -> ((euclidean__axioms.neq B0 C) -> ((euclidean__axioms.Col B0 C P) -> (euclidean__axioms.Col A D B0)))))))))))))))))))) with (x := B).
------------------------intro H41.
intro H42.
intro H43.
intro H44.
intro H45.
intro H46.
intro H47.
intro H48.
intro H49.
intro H50.
intro H51.
intro H52.
intro H53.
intro H54.
intro H55.
intro H56.
intro H57.
intro H58.
exact H37.
------------------------ exact H40.
------------------------ exact H.
------------------------ exact H8.
------------------------ exact H10.
------------------------ exact H12.
------------------------ exact H15.
------------------------ exact H1.
------------------------ exact H2.
------------------------ exact H7.
------------------------ exact H16.
------------------------ exact H17.
------------------------ exact H21.
------------------------ exact H22.
------------------------ exact H23.
------------------------ exact H28.
------------------------ exact H29.
------------------------ exact H30.
------------------------ exact H35.
------------------------ exact H38.
----------------------- assert (* Cut *) (euclidean__axioms.Col D A B) as H42.
------------------------ assert (* Cut *) ((euclidean__axioms.Col D A B) /\ ((euclidean__axioms.Col D B A) /\ ((euclidean__axioms.Col B A D) /\ ((euclidean__axioms.Col A B D) /\ (euclidean__axioms.Col B D A))))) as H42.
------------------------- apply (@lemma__collinearorder.lemma__collinearorder A D B H41).
------------------------- destruct H42 as [H43 H44].
destruct H44 as [H45 H46].
destruct H46 as [H47 H48].
destruct H48 as [H49 H50].
exact H43.
------------------------ apply (@euclidean__tactics.Col__nCol__False C D A H3).
-------------------------apply (@euclidean__tactics.not__nCol__Col C D A).
--------------------------intro H43.
apply (@euclidean__tactics.Col__nCol__False D A B H1 H42).
---------------------- assert (* Cut *) (euclidean__axioms.neq P A) as H41.
----------------------- apply (@lemma__inequalitysymmetric.lemma__inequalitysymmetric A P H39).
----------------------- assert (* Cut *) (euclidean__axioms.Col D A P) as H42.
------------------------ assert (* Cut *) ((euclidean__axioms.Col D A P) /\ ((euclidean__axioms.Col D P A) /\ ((euclidean__axioms.Col P A D) /\ ((euclidean__axioms.Col A P D) /\ (euclidean__axioms.Col P D A))))) as H42.
------------------------- apply (@lemma__collinearorder.lemma__collinearorder A D P H37).
------------------------- destruct H42 as [H43 H44].
destruct H44 as [H45 H46].
destruct H46 as [H47 H48].
destruct H48 as [H49 H50].
exact H43.
------------------------ assert (* Cut *) (euclidean__defs.Per P A B) as H43.
------------------------- apply (@lemma__collinearright.lemma__collinearright D A P B H8 H42 H41).
------------------------- assert (* Cut *) (euclidean__defs.Per C B A) as H44.
-------------------------- apply (@lemma__8__2.lemma__8__2 A B C H10).
-------------------------- assert (* Cut *) (euclidean__axioms.Col C B P) as H45.
--------------------------- assert (* Cut *) ((euclidean__axioms.Col C B P) /\ ((euclidean__axioms.Col C P B) /\ ((euclidean__axioms.Col P B C) /\ ((euclidean__axioms.Col B P C) /\ (euclidean__axioms.Col P C B))))) as H45.
---------------------------- apply (@lemma__collinearorder.lemma__collinearorder B C P H38).
---------------------------- destruct H45 as [H46 H47].
destruct H47 as [H48 H49].
destruct H49 as [H50 H51].
destruct H51 as [H52 H53].
exact H46.
--------------------------- assert (* Cut *) (euclidean__axioms.neq P B) as H46.
---------------------------- apply (@lemma__inequalitysymmetric.lemma__inequalitysymmetric B P H40).
---------------------------- assert (* Cut *) (euclidean__defs.Per P B A) as H47.
----------------------------- apply (@lemma__collinearright.lemma__collinearright C B P A H44 H45 H46).
----------------------------- assert (* Cut *) (euclidean__defs.Per B A P) as H48.
------------------------------ apply (@lemma__8__2.lemma__8__2 P A B H43).
------------------------------ assert (* Cut *) (~(euclidean__defs.Per P B A)) as H49.
------------------------------- apply (@lemma__8__7.lemma__8__7 P A B H48).
------------------------------- apply (@H49 H47).
-------------------- assert (* Cut *) (euclidean__axioms.neq A D) as H31.
--------------------- assert (* Cut *) ((euclidean__axioms.neq C D) /\ ((euclidean__axioms.neq D A) /\ ((euclidean__axioms.neq C A) /\ ((euclidean__axioms.neq D C) /\ ((euclidean__axioms.neq A D) /\ (euclidean__axioms.neq A C)))))) as H31.
---------------------- apply (@lemma__NCdistinct.lemma__NCdistinct C D A H3).
---------------------- destruct H31 as [H32 H33].
destruct H33 as [H34 H35].
destruct H35 as [H36 H37].
destruct H37 as [H38 H39].
destruct H39 as [H40 H41].
exact H40.
--------------------- assert (* Cut *) (euclidean__axioms.neq B C) as H32.
---------------------- assert (* Cut *) ((euclidean__axioms.neq A B) /\ ((euclidean__axioms.neq B C) /\ ((euclidean__axioms.neq A C) /\ ((euclidean__axioms.neq B A) /\ ((euclidean__axioms.neq C B) /\ (euclidean__axioms.neq C A)))))) as H32.
----------------------- apply (@lemma__NCdistinct.lemma__NCdistinct A B C H2).
----------------------- destruct H32 as [H33 H34].
destruct H34 as [H35 H36].
destruct H36 as [H37 H38].
destruct H38 as [H39 H40].
destruct H40 as [H41 H42].
exact H35.
---------------------- assert (D = D) as H33 by exact H26.
assert (* Cut *) (euclidean__axioms.Col A D A) as H34.
----------------------- right.
left.
exact H20.
----------------------- assert (* Cut *) (euclidean__axioms.Col A D D) as H35.
------------------------ right.
right.
left.
exact H33.
------------------------ assert (* Cut *) (euclidean__axioms.Col B C B) as H36.
------------------------- right.
left.
exact H22.
------------------------- assert (* Cut *) (euclidean__axioms.Col B C C) as H37.
-------------------------- right.
right.
left.
exact H24.
-------------------------- assert (* Cut *) (euclidean__defs.Par A D B C) as H38.
--------------------------- exists A.
exists D.
exists B.
exists C.
exists M.
split.
---------------------------- exact H31.
---------------------------- split.
----------------------------- exact H32.
----------------------------- split.
------------------------------ exact H34.
------------------------------ split.
------------------------------- exact H35.
------------------------------- split.
-------------------------------- exact H31.
-------------------------------- split.
--------------------------------- exact H36.
--------------------------------- split.
---------------------------------- exact H37.
---------------------------------- split.
----------------------------------- exact H32.
----------------------------------- split.
------------------------------------ exact H30.
------------------------------------ split.
------------------------------------- exact H6.
------------------------------------- exact H7.
--------------------------- assert (* Cut *) (euclidean__defs.PG A B C D) as H39.
---------------------------- split.
----------------------------- exact H29.
----------------------------- exact H38.
---------------------------- exact H39.
Qed.
|
lemma Im_divide_numeral [simp]: "Im (z / numeral w) = Im z / numeral w" |
State Before: R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
⊢ p / q ∣ p State After: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : q = 0
⊢ p / q ∣ p
case neg
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : ¬q = 0
⊢ p / q ∣ p Tactic: by_cases hq : q = 0 State Before: case neg
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : ¬q = 0
⊢ p / q ∣ p State After: case neg
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : ¬q = 0
⊢ p = p / q * q Tactic: use q State Before: case neg
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : ¬q = 0
⊢ p = p / q * q State After: no goals Tactic: rw [mul_comm, ← EuclideanDomain.mul_div_assoc _ hpq, mul_comm,
EuclideanDomain.mul_div_cancel _ hq] State Before: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : q ∣ p
hq : q = 0
⊢ p / q ∣ p State After: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : p = 0
hq : q = 0
⊢ p / q ∣ p Tactic: rw [hq, zero_dvd_iff] at hpq State Before: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : p = 0
hq : q = 0
⊢ p / q ∣ p State After: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : p = 0
hq : q = 0
⊢ 0 / q ∣ 0 Tactic: rw [hpq] State Before: case pos
R : Type u
inst✝ : EuclideanDomain R
p q : R
hpq : p = 0
hq : q = 0
⊢ 0 / q ∣ 0 State After: no goals Tactic: exact dvd_zero _ |
! PR libgomp/59467
! { dg-do compile }
! { dg-options "-fopenmp" }
FUNCTION t()
INTEGER :: a, b, t
a = 0
b = 0
!$OMP PARALLEL REDUCTION(+:b)
!$OMP SINGLE ! { dg-error "is not threadprivate or private in outer context" }
!$OMP ATOMIC WRITE
a = 6
!$OMP END SINGLE COPYPRIVATE (a)
b = a
!$OMP END PARALLEL
t = b
b = 0
!$OMP PARALLEL REDUCTION(+:b)
!$OMP SINGLE
!$OMP ATOMIC WRITE
b = 6
!$OMP END SINGLE COPYPRIVATE (b)
!$OMP END PARALLEL
t = t + b
END FUNCTION
|
Baw Baw NP - Photo: Kelby Douglas.
Covering a substantial part of the Baw Baw Plateau and sections of the Thomson and Aberfeldy River valleys, Baw Baw National Park offers colourful wildflowers in early summer and open grassy plains with Snow Gum woodlands.
Mount St Gwinear, Mount Erica and the Baw Baw Alpine Village are ideal bases for bushwalking.
The Thomson River downstream of the Thomson Dam offers some of the best white water rafting in Victoria. Several tour companies provide rafting tours all year round.
In winter, the park is ideal for ski touring and has a variety of marked trails to suit the skills of skiers. Trails can be followed from the St Gwinear car park on to the plateau or further to the Baw Baw Alpine Resort.
Parks Victoria acknowledges the Aboriginal Traditional Owners of Victoria - including its parks and reserves. Through their cultural traditions, the Gunaikurnai identify the Baw Baw National Park as their Traditional Country.
The park is situated approximately 120 kilometres east of Melbourne and 50 kilometres north of the Latrobe Valley.
The main access to the plateau is from the Mount Erica and St Gwinear carparks along signposted roads off the Thomson Valley Road, north of Erica. The Baw Baw Alpine Resort which adjoins the park can be reached from the Princes Highway at Drouin by following the Mount Baw Baw Tourist Road.
Rockclimbing and abseiling There is the opportunity for rockclimbing and abseiling at Mushroom Rocks. |
!***********************************************************************
SUBROUTINE lodcslmpiGG (nfile, ncore, jblock)
! An MPI container of lodcsh2 which loads CSL list of the current block
! into memory. It forwards the call together with the same set of
! parameters to lodcsh2 and then broadcasts the results to all nodes.
!
! Note: Memories have been allocated/deallocated each block outside.
! This subroutine calls lodcsh2 on node-0 to generate the data for the
! block; and then broadcasts to all other nodes. A new MPI data type
! of 4 byte-long is created to handle 64-bit machines whose MPI
! implementation does not support 4-byte integers. If jblock=-119,
! then ALL blocks will be loaded instead of just one. This is
! implemented in lodcsh2.
!
! Currently used by rcimpivu, mcpmpi, rscfmpivu
!
! Xinghong He 98-08-06
!
!***********************************************************************
!************************************************************************
!...Translated by Pacific-Sierra Research 77to90 4.3E 14:04:58 1/ 3/07
!...Modified by Charlotte Froese Fischer
! Gediminas Gaigalas 10/05/17
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE vast_kind_param, ONLY: BYTE
USE parameter_def, ONLY: NNNW
USE ORB_C, ONLY: NCF, IQA
USE syma_C, ONLY: JPGG, nblk0
use mpi_C
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER :: nfile, ncore, jblock
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER(BYTE) :: MPIX_INT1
!-----------------------------------------------------------------------
!
IF (myid == 0) THEN
!GG CALL lodcsh2 ((nfile), (ncore), (jblock))
CALL lodcsh2GG ((nfile), (ncore), (jblock))
ENDIF
! Construct mpi data type for Integer*1 and then broadcast.
!cjb mpix_bytes
!cjb if your compiler or your MPI does not accept type MPIX_INT1
!cjb try standard MPI type MPI_INTEGER1
CALL mpix_bytes (1, MPIX_INT1, ierr)
CALL MPI_Bcast (IQA(:,:),NNNW*NCF,MPIX_INT1,0,MPI_COMM_WORLD,ierr)
!cjb CALL MPI_Bcast (IQA(:,:),NNNW*NCF,MPI_INTEGER1,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (JPGG(:),nblk0,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
!GG CALL MPI_Bcast &
!GG (JQSA(:,:,:),3*NNNW*NCF,MPIX_INT1,0,MPI_COMM_WORLD,ierr)
!GG CALL MPI_Bcast &
!GG (JCUPA(:,:), NNNW*NCF,MPIX_INT1,0,MPI_COMM_WORLD,ierr)
RETURN
END SUBROUTINE lodcslmpiGG
|
import pickle
import numpy as np
from SecNet import SecNet, ReconnectionPolicy
from plots_sis import beta_plot, density_plot
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
g = pickle.load(open('graphs/new.pickle', 'rb'))
def replicate_density(niter = 100,
beta = 0.6,
default_delay = 2,
policy = ReconnectionPolicy.SOFT,
file_plot = 'images/Replicate_density/beta_0.4' ):
dens = []
densities = []
fig = figure()
plot = fig.add_subplot(111)
for i in range(niter):
sn = SecNet(g, mu = 0.2,
beta = beta,
reconnection_policy = policy,
default_delay = default_delay,
weight_transfer = False)
sn.run(400,variation_coeff = 10e-5)
plot.plot(sn.defaulted_density)
dens.append(sn.defaulted_density[- 1])
sn.defaulted_density
fig.suptitle(' Simulations function with n = %s'%niter )
plt.xlabel('Iteratuinons')
plt.ylabel('Density ')
file = file_plot + str(policy) + str(beta)+ str(default_delay)+'.png'
fig.savefig(file)
show()
return (dens)
def pct_density(dens,
niter = 100):
count = 0
for i in range(niter):
if (np.array(dens[i]) > 0.1):
count = count + 1
percnt = (count/niter)
return (percnt)
|
Our school invites outstanding resource persons and facilitators from diverse backgrounds to conduct sessions and interactive talks with our pupils. This enables and promotes collaboration and knowledge sharing. We ensure that this connection engages both our faculty and learners.
This session offers a general introduction to a range of scientific and development challenges of the 21st century. It has been conducted and designed for our post secondary learners and addresses issues of climate change, conservation and urbanization. Each student is challenged to adopt methods (scientific/non-scientific) appropriate to the questions posed in the plenary session.
Our school organizes the annual Maths seminar that aims to generate interest in mathematics among primary and secondary students. The theme of this is “Math Alive!” where the use of mathematics in science and everyday life is highlighted.
A mathematics project competition is given to all schools who participate in this endeavour.
This convention prepares older pupils for the skills they need, to cope with student life at college such as, study skills, time management, self- motivation (yes, you have to go to your morning lecture!) managing finances, maintaining a healthy lifestyle on a budget, as well as, writing perfect personal statements.
Be more than boss, be a leader! We show pupils how to balance the tight-rope of diplomacy, negotiation and conflict-resolution in our fast paced world.
This nurses the budding entrepreneur in our learners by testing their problem solving and initiative skills. It shows them how to turn an idea into a business.
This is mainly a science oriented symposium in which our institution collaborates with national scientific laboratories. Various teams construct a complex machine (the Amazing Machine) that performs a seemingly simple task in as many steps as possible. Teams are usually judged on their creativity and the incorporation of scientific concepts.
This is a student-initiated event that aims to raise awareness of social, economic, political and environmental issues revolving around the central theme of sustainable development. This convention brings together many secondary participants from various schools and provides a platform to raise their concerns and share their insights. Participants are often exposed and encouraged to ponder about the interplay between scientific developments and public policy.
It provides innovating and exciting science activities for primary learners. Our aim is to excite and engage children with the wonder of science through hands-on science investigations and demonstrations. This allows all pupils to engage in scientific investigations to stimulate questioning and kinaesthetic learning.
Times have changed. Our young people now face an increasingly competitive job market. Whatever they choose to do with their future, we want them to succeed. So we have collaborated with industry – leading business men and women to create lifelong learning sessions. These help learners become the candidate employers’ want with our practical facilitation on employability skills. They learn to write eye-catching CVs, develop interview techniques, communication and leadership skills. |
[STATEMENT]
lemma PseudoHA_Events [simp]:
"HAEvents (PseudoHA SA D) = SAEvents SA"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. HAEvents (PseudoHA SA D) = SAEvents SA
[PROOF STEP]
by (unfold PseudoHA_def HAEvents_def, simp add: Abs_hierauto_inverse) |
program ppraxis_crt
implicit none
integer :: i
do i = 1, 1716
if (mod(i,13) == 12 .and. mod(i,12) == 4 .and. mod(i,11) == 10) then
write(*,*) i
stop
end if
end do
stop
end program ppraxis_crt
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.