text
stringlengths 0
3.34M
|
---|
# From https://github.com/vchuravy/GPUifyLoops.jl/blob/b4d43198ec4aee75e6bcfdda7165ed5e469d2df5/src/loopinfo.jl
module LoopInfo
const HAS_LOOPINFO_EXPR = VERSION >= v"1.2.0-DEV.462"
export @unroll
##
# Uses the loopinfo expr node to attach LLVM loopinfo to loops
# the full list of supported metadata nodes is available at
# https://llvm.org/docs/LangRef.html#llvm-loop
# TODO: Figure out how to deal with compile-time constants in `@unroll(N, expr)`
# so constants that come from `Val{N}` but are not parse time constant.
# Most likely will require changes to base Julia.
##
module MD
unroll_count(n) = (Symbol("llvm.loop.unroll.count"), convert(Int, n))
unroll_disable() = (Symbol("llvm.loop.unroll.disable"), 1)
unroll_enable() = (Symbol("llvm.loop.unroll.enable"), 1)
unroll_full() = (Symbol("llvm.loop.unroll.full"), 1)
end
function loopinfo(expr, nodes...)
if expr.head != :for
error("Syntax error: loopinfo needs a for loop")
end
if HAS_LOOPINFO_EXPR
push!(expr.args[2].args, Expr(:loopinfo, nodes...))
end
return expr
end
"""
@unroll expr
Takes a for loop as `expr` and informs the LLVM unroller to fully unroll it, if
it is safe to do so and the loop count is known.
"""
macro unroll(expr)
expr = loopinfo(expr, MD.unroll_full())
return esc(expr)
end
"""
@unroll N expr
Takes a for loop as `expr` and informs the LLVM unroller to unroll it `N` times,
if it is safe to do so.
"""
macro unroll(N, expr)
if !(N isa Integer)
error("Syntax error: `@unroll N expr` needs a constant integer N")
end
expr = loopinfo(expr, MD.unroll_count(N))
return esc(expr)
end
end #module
|
lemma open_path_connected_component: fixes S :: "'a :: real_normed_vector set" shows "open S \<Longrightarrow> path_component S x = connected_component S x" |
Formal statement is: lemma filterlim_at_infinity: fixes f :: "_ \<Rightarrow> 'a::real_normed_vector" assumes "0 \<le> c" shows "(LIM x F. f x :> at_infinity) \<longleftrightarrow> (\<forall>r>c. eventually (\<lambda>x. r \<le> norm (f x)) F)" Informal statement is: If $f$ is a nonnegative real-valued function, then $f$ tends to infinity if and only if for every $r > c$, there exists an $x$ such that $r \leq f(x)$. |
Require Import ExtLib.Data.HList.
Require Import ExtLib.Core.RelDec.
Require Import Charge.Logics.ILogic.
Require Import Charge.ModularFunc.ListType.
Require Import Charge.ModularFunc.BaseType.
Require Import Charge.ModularFunc.ListFunc.
Require Import Charge.ModularFunc.BaseFunc.
Require Import Charge.ModularFunc.SemiEqDecTyp.
Require Import Charge.Tactics.Base.DenotationTacs.
Require Import Charge.Tactics.Base.MirrorCoreTacs.
Require Import ExtLib.Tactics.
Require Import MirrorCore.TypesI.
Require Import MirrorCore.RTac.Core.
Require Import MirrorCore.Lambda.AppN.
Require Import MirrorCore.Lambda.Red.
Require Import MirrorCore.Lambda.RedAll.
Require Import MirrorCore.Lambda.Expr.
Section Length.
Context {typ func : Type} {RType_typ : RType typ} {RSym_func : RSym func}
{BT : BaseType typ} {BTD : BaseTypeD BT}
{LT : ListType typ} {LTD: ListTypeD LT}.
Context {BF : BaseFunc typ func} {LF : ListFunc typ func}.
Context {RelDec_eq : RelDec (@eq typ)} {RelDecOk_eq : RelDec_Correct RelDec_eq}.
Context {Heqd : SemiEqDecTyp typ} {HeqdOk : SemiEqDecTypOk Heqd}.
Context {EU : ExprUVar (expr typ func)}.
Context {RType_typOk : RTypeOk} {RsymOk_func : RSymOk RSym_func}.
Context {Typ0_tyProp : Typ0 _ Prop}.
Context {Typ2_tyArr : Typ2 _ Fun}.
Context {Typ0Ok_tyProp : Typ0Ok Typ0_tyProp}.
Context {Typ2Ok_tyArr : Typ2Ok Typ2_tyArr}.
Context {BFOk : BaseFuncOk typ func } {LFOk : ListFuncOk typ func}.
Let tyArr : typ -> typ -> typ := @typ2 _ _ _ _.
Let tyProp := @typ0 typ RType_typ Prop Typ0_tyProp.
(* This function will return None unless lst eventually reaches nil. This means that we cannot partially evaluate a length of a list
(length 1::2::3::lst = 3 + (length lst) for instance). To be able to do this, we need a language that supports arithmetic operations
(at least +) on natural numbers. This is a perfectly natural thing to have, but it is not implemented at the moment. *)
Fixpoint lengthTacAux (t : typ) (lst : expr typ func) : option nat :=
match lst with
| App (App f x) xs =>
match listS f with
| Some (pCons _) =>
match lengthTacAux t xs with
| Some x => Some (S x)
| None => None
end
| _ => None
end
| _ => None
end.
Definition lengthTac (_ : list (option (expr typ func))) (e : expr typ func) (args : list (expr typ func)) : expr typ func :=
match listS e, args with
| Some (pLength t), lst::nil =>
match baseS lst with
| Some (pConst u c) =>
match type_cast (tyList t) u with
| Some pf => mkNat (natR (length (listD (eq_rect_r typD c pf))))
| None => apps e args
end
| _ =>
match lengthTacAux t lst with
| Some l => mkNat (natR l)
| None => apps e args
end
end
| _, _ => apps e args
end.
Existing Instance Expr_expr.
Existing Instance ExprOk_expr.
End Length. |
module Addition.Absorb
import Data.Vect
import Common.Util
import Common.Interfaces
import Specifications.DiscreteOrderedGroup
import Specifications.OrderedRing
import Proofs.GroupTheory
import Addition.Carry
import Addition.Scaling
import Addition.AbsorptionLemmas
import Addition.Adhoc
%default total
%access export
||| This is a proof friendly semantics function. Consider a tail
||| recursive variation for run time use.
public export
phi : Ringops s => (radix : s) -> (lsdf : Vect n s) -> (msc : Carry) -> s
phi radix (x :: xs) c = x + radix * phi radix xs c
phi radix [] c = value c
||| The result of absorbing carry digits:
|||
||| in1 in2 in3
||| ou1 ou2 pen + unk
||| msc abs abs unk
|||
||| unk = the least significant carry is still unknown
||| pen = ouput of reduction before absorbing the unknown carry
||| msc = most significant carry
||| abs = carry already absorbed in the corresponding output
data Absorption :
(k : Nat) ->
(constraints : s -> Vect k s -> Type) ->
(semantics : Vect (S k) s -> Carry -> s) ->
(inputs : Vect (S k) s) -> Type
where MkAbsorption :
(msc : Carry) ->
(pending : s) ->
(outputs : Vect k s) ->
(constraints pending outputs) ->
(semantics inputs O = semantics (pending :: outputs) msc) ->
Absorption k constraints semantics inputs
outputs : Absorption {s} k _ _ _ -> (Carry, Vect (S k) s)
outputs (MkAbsorption c p o _ _) = (c, reverse (p :: o))
absorptionBase : Ringops s =>
DiscreteOrderedRingSpec {s} (+) Zero Ng (*) leq One ->
(radix : s) ->
(red : Reduction (+) Zero Ng leq One u radix) ->
Absorption Z (Ranges leq Ng u (u + Ng One)) (phi radix) [input red]
absorptionBase spec radix (MkReduction i c o invariant outRange) =
MkAbsorption c o [] (MkRanges outRange []) o3
where
o1 : o + radix * value c = i
o1 = rewriteInvariant (unitalRing spec) radix i o c invariant
o2 : i = i + radix * value O
o2 = adhocIdentity2 (ring (unitalRing spec)) i radix
o3 : phi radix [i] O = phi radix [o] c
o3 = sym (o1 === o2)
arithLemma : Ringops s => UnitalRingSpec {s} (+) Zero Ng (*) One ->
(msc : Carry) ->
(pending : s) ->
(outputs : Vect k s) ->
(inputs : Vect (S k) s) ->
(red : Reduction {s} (+) Zero Ng _ One u radix) ->
(ih : phi radix inputs O = phi radix (pending :: outputs) msc) ->
phi radix (input red :: inputs) O =
phi radix (output red :: (value (carry red) + pending) :: outputs) msc
arithLemma {s} {radix} spec msc pending outputs inputs
(MkReduction i c o invariant _) inductionHypothesis =
let
adhoc = adhocIdentity1 (ring spec) pending o radix (value c) o2
shift = radix * phi radix inputs O
shifted = cong {f = (+ shift)} o1
in shifted @== adhoc
where
o1 : o + radix * value c = i
o1 = rewriteInvariant spec radix i o c invariant
o2 : phi radix inputs O = pending + radix * phi radix outputs msc
o2 = inductionHypothesis
absorptionStep : Ringops s =>
DiscreteOrderedRingSpec {s} (+) Zero Ng (*) leq One ->
(radix : s) ->
(red : Reduction (+) Zero Ng leq One u radix) ->
Absorption k (Ranges leq Ng u (u + Ng One)) (phi radix) inputs ->
Absorption (S k) (Ranges leq Ng u (u + Ng One))
(phi radix) (input red :: inputs)
absorptionStep spec radix red@(MkReduction _ _ _ _ reducedRange)
(MkAbsorption {inputs} msc pending outputs ranges invariant) =
let absorb = value (carry red) + pending
in MkAbsorption msc (output red) (absorb :: outputs)
(rangeLemma (discreteOrderedGroup spec) ranges reducedRange (carry red))
(arithLemma (unitalRing spec) msc pending outputs inputs red invariant)
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the GNU General Public License as published by *)
(* the Free Software Foundation, either version 2 of the License, or *)
(* (at your option) any later version. This file is also distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
Require Import Coqlib.
Require Import Integers.
Require Import AST.
Require Import Values.
Require Import Memory.
Require Import Globalenvs.
Require Import Events.
Inductive builtin_function: Type :=
| EF_vload (chunk: memory_chunk)
(** A volatile read operation. If the adress given as first argument
points within a volatile global variable, generate an
event and return the value found in this event. Otherwise,
produce no event and behave like a regular memory load. *)
| EF_vstore (chunk: memory_chunk)
(** A volatile store operation. If the adress given as first argument
points within a volatile global variable, generate an event.
Otherwise, produce no event and behave like a regular memory store. *)
| EF_vload_global (chunk: memory_chunk) (id: ident) (ofs: int)
(** A volatile load operation from a global variable.
Specialized version of [EF_vload]. *)
| EF_vstore_global (chunk: memory_chunk) (id: ident) (ofs: int)
(** A volatile store operation in a global variable.
Specialized version of [EF_vstore]. *)
| EF_memcpy (sz: Z) (al: Z)
(** Block copy, of [sz] bytes, between addresses that are [al]-aligned. *)
| EF_annot (text: ident) (targs: list annot_arg)
(** A programmer-supplied annotation. Takes zero, one or several arguments,
produces an event carrying the text and the values of these arguments,
and returns no value. *)
| EF_annot_val (text: ident) (targ: typ)
(** Another form of annotation that takes one argument, produces
an event carrying the text and the value of this argument,
and returns the value of the argument. *)
| EF_inline_asm (text: ident).
(** Inline [asm] statements. Semantically, treated like an
annotation with no parameters ([EF_annot text nil]). To be
used with caution, as it can invalidate the semantic
preservation theorem. Generated only if [-finline-asm] is
given. *)
Definition bf_sig (bf: builtin_function): signature :=
match bf with
| EF_vload chunk => mksignature (Tint :: nil) (Some (type_of_chunk chunk))
| EF_vstore chunk => mksignature (Tint :: type_of_chunk chunk :: nil) None
| EF_vload_global chunk _ _ => mksignature nil (Some (type_of_chunk chunk))
| EF_vstore_global chunk _ _ => mksignature (type_of_chunk chunk :: nil) None
| EF_memcpy sz al => mksignature (Tint :: Tint :: nil) None
| EF_annot text targs => mksignature (annot_args_typ targs) None
| EF_annot_val text targ => mksignature (targ :: nil) (Some targ)
| EF_inline_asm text => mksignature nil None
end.
Definition bf_reloads (bf: builtin_function) : bool :=
match bf with
| EF_annot text targs => false
| _ => true
end.
Global Instance bf_ops: ExtFunOps builtin_function := {
ef_sig := bf_sig;
ef_reloads := bf_reloads
}.
Global Instance bf_proof: ExternalFunctions builtin_function := {
}.
(** ** Semantics of volatile loads *)
Section WITHMEM.
Context `{Hcc: CompilerConfiguration}.
Set Implicit Arguments.
Inductive volatile_load_sem (chunk: memory_chunk) (F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| volatile_load_sem_intro: forall b ofs m t v,
volatile_load ge chunk m b ofs t v ->
volatile_load_sem chunk ge (Vptr b ofs :: nil) m t v m.
Lemma volatile_load_preserved:
forall F1 V1 (ge1: Genv.t F1 V1) F2 V2 (ge2: Genv.t F2 V2) chunk m b ofs t v,
(forall id, Genv.find_symbol ge2 id = Genv.find_symbol ge1 id) ->
(forall b, block_is_volatile ge2 b = block_is_volatile ge1 b) ->
volatile_load ge1 chunk m b ofs t v ->
volatile_load ge2 chunk m b ofs t v.
Proof.
intros. inv H1; constructor; auto.
rewrite H0; auto.
rewrite H; auto.
eapply eventval_match_preserved; eauto.
rewrite H0; auto.
Qed.
Lemma volatile_load_extends:
forall F V (ge: Genv.t F V) chunk m b ofs t v m',
volatile_load ge chunk m b ofs t v ->
Mem.extends m m' ->
exists v', volatile_load ge chunk m' b ofs t v' /\ Val.lessdef v v'.
Proof.
intros. inv H.
econstructor; split; eauto. econstructor; eauto.
exploit Mem.load_extends; eauto. intros [v' [A B]]. exists v'; split; auto. constructor; auto.
Qed.
Remark meminj_preserves_block_is_volatile:
forall F V (ge: Genv.t F V) f b1 b2 delta,
meminj_preserves_globals ge f ->
f b1 = Some (b2, delta) ->
block_is_volatile ge b2 = block_is_volatile ge b1.
Proof.
intros. destruct H as [A [B C]]. unfold block_is_volatile.
case_eq (Genv.find_var_info ge b1); intros.
exploit B; eauto. intro EQ; rewrite H0 in EQ; inv EQ. rewrite H; auto.
case_eq (Genv.find_var_info ge b2); intros.
exploit C; eauto. intro EQ. congruence.
auto.
Qed.
Lemma volatile_load_inject:
forall F V (ge: Genv.t F V) f chunk m b ofs t v b' ofs' m',
meminj_preserves_globals ge f ->
volatile_load ge chunk m b ofs t v ->
val_inject f (Vptr b ofs) (Vptr b' ofs') ->
Mem.inject f m m' ->
exists v', volatile_load ge chunk m' b' ofs' t v' /\ val_inject f v v'.
Proof.
intros. inv H0.
inv H1. exploit (proj1 H); eauto. intros EQ; rewrite H8 in EQ; inv EQ.
rewrite Int.add_zero. exists (Val.load_result chunk v0); split.
constructor; auto.
apply val_load_result_inject. eapply eventval_match_inject_2; eauto.
exploit Mem.loadv_inject; eauto. simpl; eauto. simpl; intros [v' [A B]]. exists v'; split; auto.
constructor; auto. rewrite <- H3. inv H1. eapply meminj_preserves_block_is_volatile; eauto.
Qed.
Lemma volatile_load_receptive:
forall F V (ge: Genv.t F V) chunk m b ofs t1 t2 v1,
volatile_load ge chunk m b ofs t1 v1 -> match_traces ge t1 t2 ->
exists v2, volatile_load ge chunk m b ofs t2 v2.
Proof.
intros. inv H; inv H0.
exploit eventval_match_valid; eauto. intros [A B].
exploit eventval_valid_match. eexact H9. rewrite <- H10; eauto.
intros [v' EVM]. exists (Val.load_result chunk v'). constructor; auto.
exists v1; constructor; auto.
Qed.
Lemma volatile_load_ok:
forall chunk,
extcall_properties (volatile_load_sem chunk)
(mksignature (Tint :: nil) (Some (type_of_chunk chunk))).
Proof.
intros; constructor; intros.
(* well typed *)
unfold proj_sig_res; simpl. inv H. inv H0.
destruct chunk; destruct v; simpl; constructor.
eapply Mem.load_type; eauto.
(* arity *)
inv H; inv H0; auto.
(* symbols *)
inv H1. constructor. eapply volatile_load_preserved; eauto.
(* valid blocks *)
inv H; auto.
(* max perms *)
inv H; auto.
(* readonly *)
inv H; auto.
(* mem extends *)
inv H. inv H1. inv H6. inv H4.
exploit volatile_load_extends; eauto. intros [v' [A B]].
exists v'; exists m1'; intuition. constructor; auto. red; auto.
(* mem injects *)
inv H0. inv H2. inv H7. inversion H5; subst.
exploit volatile_load_inject; eauto. intros [v' [A B]].
exists f; exists v'; exists m1'; intuition. constructor; auto.
red; auto. red; auto. red; intros. congruence.
(* trace length *)
inv H; inv H0; simpl; omega.
(* receptive *)
inv H. exploit volatile_load_receptive; eauto. intros [v2 A].
exists v2; exists m1; constructor; auto.
(* determ *)
inv H; inv H0. inv H1; inv H7; try congruence.
assert (id = id0) by (eapply Genv.genv_vars_inj; eauto). subst id0.
exploit eventval_match_valid. eexact H2. intros [V1 T1].
exploit eventval_match_valid. eexact H4. intros [V2 T2].
split. constructor; auto. congruence.
intros EQ; inv EQ.
assert (v = v0) by (eapply eventval_match_determ_1; eauto). subst v0.
auto.
split. constructor. intuition congruence.
Qed.
Inductive volatile_load_global_sem (chunk: memory_chunk) (id: ident) (ofs: int)
(F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| volatile_load_global_sem_intro: forall b t v m,
Genv.find_symbol ge id = Some b ->
volatile_load ge chunk m b ofs t v ->
volatile_load_global_sem chunk id ofs ge nil m t v m.
Remark volatile_load_global_charact:
forall chunk id ofs (F V: Type) (ge: Genv.t F V) vargs m t vres m',
volatile_load_global_sem chunk id ofs ge vargs m t vres m' <->
exists b, Genv.find_symbol ge id = Some b /\ volatile_load_sem chunk ge (Vptr b ofs :: vargs) m t vres m'.
Proof.
intros; split.
intros. inv H. exists b; split; auto. constructor; auto.
intros [b [P Q]]. inv Q. econstructor; eauto.
Qed.
Lemma volatile_load_global_ok:
forall chunk id ofs,
extcall_properties (volatile_load_global_sem chunk id ofs)
(mksignature nil (Some (type_of_chunk chunk))).
Proof.
intros; constructor; intros.
(* well typed *)
unfold proj_sig_res; simpl. inv H. inv H1.
destruct chunk; destruct v; simpl; constructor.
eapply Mem.load_type; eauto.
(* arity *)
inv H; inv H1; auto.
(* symbols *)
inv H1. econstructor. rewrite H; eauto. eapply volatile_load_preserved; eauto.
(* valid blocks *)
inv H; auto.
(* max perm *)
inv H; auto.
(* readonly *)
inv H; auto.
(* extends *)
inv H. inv H1. exploit volatile_load_extends; eauto. intros [v' [A B]].
exists v'; exists m1'; intuition. econstructor; eauto. red; auto.
(* inject *)
inv H0. inv H2.
assert (val_inject f (Vptr b ofs) (Vptr b ofs)).
exploit (proj1 H); eauto. intros EQ. econstructor. eauto. rewrite Int.add_zero; auto.
exploit volatile_load_inject; eauto. intros [v' [A B]].
exists f; exists v'; exists m1'; intuition. econstructor; eauto.
red; auto. red; auto. red; intros; congruence.
(* trace length *)
inv H; inv H1; simpl; omega.
(* receptive *)
inv H. exploit volatile_load_receptive; eauto. intros [v2 A].
exists v2; exists m1; econstructor; eauto.
(* determ *)
rewrite volatile_load_global_charact in *.
destruct H as [b1 [A1 B1]]. destruct H0 as [b2 [A2 B2]].
rewrite A1 in A2; inv A2.
eapply ec_determ. eapply volatile_load_ok. eauto. eauto.
Qed.
(** ** Semantics of volatile stores *)
Inductive volatile_store_sem (chunk: memory_chunk) (F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| volatile_store_sem_intro: forall b ofs m1 v t m2,
volatile_store ge chunk m1 b ofs v t m2 ->
volatile_store_sem chunk ge (Vptr b ofs :: v :: nil) m1 t Vundef m2.
Lemma volatile_store_preserved:
forall F1 V1 (ge1: Genv.t F1 V1) F2 V2 (ge2: Genv.t F2 V2) chunk m1 b ofs v t m2,
(forall id, Genv.find_symbol ge2 id = Genv.find_symbol ge1 id) ->
(forall b, block_is_volatile ge2 b = block_is_volatile ge1 b) ->
volatile_store ge1 chunk m1 b ofs v t m2 ->
volatile_store ge2 chunk m1 b ofs v t m2.
Proof.
intros. inv H1; constructor; auto.
rewrite H0; auto.
rewrite H; auto.
eapply eventval_match_preserved; eauto.
rewrite H0; auto.
Qed.
Lemma volatile_store_readonly:
forall F V (ge: Genv.t F V) chunk1 m1 b1 ofs1 v t m2 chunk ofs b,
volatile_store ge chunk1 m1 b1 ofs1 v t m2 ->
Mem.valid_block m1 b ->
(forall ofs', ofs <= ofs' < ofs + size_chunk chunk ->
~(Mem.perm m1 b ofs' Max Writable)) ->
Mem.load chunk m2 b ofs = Mem.load chunk m1 b ofs.
Proof.
intros. inv H.
auto.
eapply Mem.load_store_other; eauto.
destruct (eq_block b b1); auto. subst b1. right.
apply (Intv.range_disjoint' (ofs, ofs + size_chunk chunk)
(Int.unsigned ofs1, Int.unsigned ofs1 + size_chunk chunk1)).
red; intros; red; intros.
elim (H1 x); auto.
exploit Mem.store_valid_access_3; eauto. intros [A B].
apply Mem.perm_cur_max. apply A. auto.
simpl. generalize (size_chunk_pos chunk); omega.
simpl. generalize (size_chunk_pos chunk1); omega.
Qed.
Lemma volatile_store_extends:
forall F V (ge: Genv.t F V) chunk m1 b ofs v t m2 m1' v',
volatile_store ge chunk m1 b ofs v t m2 ->
Mem.extends m1 m1' ->
Val.lessdef v v' ->
exists m2',
volatile_store ge chunk m1' b ofs v' t m2'
/\ Mem.extends m2 m2'
/\ mem_unchanged_on (loc_out_of_bounds m1) m1' m2'.
Proof.
intros. inv H.
econstructor; split. econstructor; eauto. eapply eventval_match_lessdef; eauto.
split. auto. red; auto.
exploit Mem.store_within_extends; eauto. intros [m2' [A B]].
exists m2'; intuition. econstructor; eauto.
red; split; intros.
eapply Mem.perm_store_1; eauto.
rewrite <- H4. eapply Mem.load_store_other; eauto.
destruct (eq_block b0 b); auto. subst b0; right.
apply (Intv.range_disjoint' (ofs0, ofs0 + size_chunk chunk0)
(Int.unsigned ofs, Int.unsigned ofs + size_chunk chunk)).
red; intros; red; intros.
exploit (H x H5). exploit Mem.store_valid_access_3. eexact H3. intros [E G].
apply Mem.perm_cur_max. apply Mem.perm_implies with Writable; auto with mem.
auto.
simpl. generalize (size_chunk_pos chunk0). omega.
simpl. generalize (size_chunk_pos chunk). omega.
Qed.
Lemma volatile_store_inject:
forall F V (ge: Genv.t F V) f chunk m1 b ofs v t m2 m1' b' ofs' v',
meminj_preserves_globals ge f ->
volatile_store ge chunk m1 b ofs v t m2 ->
val_inject f (Vptr b ofs) (Vptr b' ofs') ->
val_inject f v v' ->
Mem.inject f m1 m1' ->
exists m2',
volatile_store ge chunk m1' b' ofs' v' t m2'
/\ Mem.inject f m2 m2'
/\ mem_unchanged_on (loc_unmapped f) m1 m2
/\ mem_unchanged_on (loc_out_of_reach f m1) m1' m2'.
Proof.
intros. inv H0.
inv H1. exploit (proj1 H); eauto. intros EQ; rewrite H9 in EQ; inv EQ.
rewrite Int.add_zero. exists m1'.
split. constructor; auto. eapply eventval_match_inject; eauto.
split. auto. split. red; auto. red; auto.
assert (Mem.storev chunk m1 (Vptr b ofs) v = Some m2). simpl; auto.
exploit Mem.storev_mapped_inject; eauto. intros [m2' [A B]].
inv H1. exists m2'; intuition.
constructor; auto. rewrite <- H4. eapply meminj_preserves_block_is_volatile; eauto.
split; intros. eapply Mem.perm_store_1; eauto.
rewrite <- H6. eapply Mem.load_store_other; eauto.
left. exploit (H1 ofs0). generalize (size_chunk_pos chunk0). omega.
unfold loc_unmapped. congruence.
split; intros. eapply Mem.perm_store_1; eauto.
rewrite <- H6. eapply Mem.load_store_other; eauto.
destruct (eq_block b0 b'); auto. subst b0; right.
assert (EQ: Int.unsigned (Int.add ofs (Int.repr delta)) = Int.unsigned ofs + delta).
eapply Mem.address_inject; eauto with mem.
unfold Mem.storev in A. rewrite EQ in A. rewrite EQ.
apply (Intv.range_disjoint' (ofs0, ofs0 + size_chunk chunk0)
(Int.unsigned ofs + delta, Int.unsigned ofs + delta + size_chunk chunk)).
red; intros; red; intros. exploit (H1 x H7). eauto.
exploit Mem.store_valid_access_3. eexact H0. intros [C D].
apply Mem.perm_cur_max. apply Mem.perm_implies with Writable; auto with mem.
apply C. red in H8; simpl in H8. omega.
auto.
simpl. generalize (size_chunk_pos chunk0). omega.
simpl. generalize (size_chunk_pos chunk). omega.
Qed.
Lemma volatile_store_receptive:
forall F V (ge: Genv.t F V) chunk m b ofs v t1 m1 t2,
volatile_store ge chunk m b ofs v t1 m1 -> match_traces ge t1 t2 -> t1 = t2.
Proof.
intros. inv H; inv H0; auto.
Qed.
Lemma volatile_store_ok:
forall chunk,
extcall_properties (volatile_store_sem chunk)
(mksignature (Tint :: type_of_chunk chunk :: nil) None).
Proof.
intros; constructor; intros.
(* well typed *)
unfold proj_sig_res; simpl. inv H; constructor.
(* arity *)
inv H; simpl; auto.
(* symbols preserved *)
inv H1. constructor. eapply volatile_store_preserved; eauto.
(* valid block *)
inv H. inv H1. auto. eauto with mem.
(* perms *)
inv H. inv H2. auto. eauto with mem.
(* readonly *)
inv H. eapply volatile_store_readonly; eauto.
(* mem extends*)
inv H. inv H1. inv H6. inv H7. inv H4.
exploit volatile_store_extends; eauto. intros [m2' [A [B C]]].
exists Vundef; exists m2'; intuition. constructor; auto.
(* mem inject *)
inv H0. inv H2. inv H7. inv H8. inversion H5; subst.
exploit volatile_store_inject; eauto. intros [m2' [A [B [C D]]]].
exists f; exists Vundef; exists m2'; intuition. constructor; auto. red; intros; congruence.
(* trace length *)
inv H; inv H0; simpl; omega.
(* receptive *)
assert (t1 = t2). inv H. eapply volatile_store_receptive; eauto.
subst t2; exists vres1; exists m1; auto.
(* determ *)
inv H; inv H0. inv H1; inv H8; try congruence.
assert (id = id0) by (eapply Genv.genv_vars_inj; eauto). subst id0.
assert (ev = ev0) by (eapply eventval_match_determ_2; eauto). subst ev0.
split. constructor. auto.
split. constructor. intuition congruence.
Qed.
Inductive volatile_store_global_sem (chunk: memory_chunk) (id: ident) (ofs: int)
(F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| volatile_store_global_sem_intro: forall b m1 v t m2,
Genv.find_symbol ge id = Some b ->
volatile_store ge chunk m1 b ofs v t m2 ->
volatile_store_global_sem chunk id ofs ge (v :: nil) m1 t Vundef m2.
Remark volatile_store_global_charact:
forall chunk id ofs (F V: Type) (ge: Genv.t F V) vargs m t vres m',
volatile_store_global_sem chunk id ofs ge vargs m t vres m' <->
exists b, Genv.find_symbol ge id = Some b /\ volatile_store_sem chunk ge (Vptr b ofs :: vargs) m t vres m'.
Proof.
intros; split.
intros. inv H; exists b; split; auto; econstructor; eauto.
intros [b [P Q]]. inv Q. econstructor; eauto.
Qed.
Lemma volatile_store_global_ok:
forall chunk id ofs,
extcall_properties (volatile_store_global_sem chunk id ofs)
(mksignature (type_of_chunk chunk :: nil) None).
Proof.
intros; constructor; intros.
(* well typed *)
unfold proj_sig_res; simpl. inv H; constructor.
(* arity *)
inv H; simpl; auto.
(* symbols preserved *)
inv H1. econstructor. rewrite H; eauto. eapply volatile_store_preserved; eauto.
(* valid block *)
inv H. inv H2. auto. eauto with mem.
(* perms *)
inv H. inv H3. auto. eauto with mem.
(* readonly *)
inv H. eapply volatile_store_readonly; eauto.
(* mem extends*)
rewrite volatile_store_global_charact in H. destruct H as [b [P Q]].
exploit ec_mem_extends. eapply volatile_store_ok. eexact Q. eauto. eauto.
intros [vres' [m2' [A [B [C D]]]]].
exists vres'; exists m2'; intuition. rewrite volatile_store_global_charact. exists b; auto.
(* mem inject *)
rewrite volatile_store_global_charact in H0. destruct H0 as [b [P Q]].
exploit (proj1 H). eauto. intros EQ.
assert (val_inject f (Vptr b ofs) (Vptr b ofs)). econstructor; eauto. rewrite Int.add_zero; auto.
exploit ec_mem_inject. eapply volatile_store_ok. eauto. eexact Q. eauto. eauto.
intros [f' [vres' [m2' [A [B [C [D [E G]]]]]]]].
exists f'; exists vres'; exists m2'; intuition.
rewrite volatile_store_global_charact. exists b; auto.
(* trace length *)
inv H. inv H1; simpl; omega.
(* receptive *)
assert (t1 = t2). inv H. eapply volatile_store_receptive; eauto. subst t2.
exists vres1; exists m1; congruence.
(* determ *)
rewrite volatile_store_global_charact in *.
destruct H as [b1 [A1 B1]]. destruct H0 as [b2 [A2 B2]]. rewrite A1 in A2; inv A2.
eapply ec_determ. eapply volatile_store_ok. eauto. eauto.
Qed.
(** ** Semantics of annotations. *)
Fixpoint annot_eventvals (targs: list annot_arg) (vargs: list eventval) : list eventval :=
match targs, vargs with
| AA_arg ty :: targs', varg :: vargs' => varg :: annot_eventvals targs' vargs'
| AA_int n :: targs', _ => EVint n :: annot_eventvals targs' vargs
| AA_float n :: targs', _ => EVfloat n :: annot_eventvals targs' vargs
| _, _ => vargs
end.
Inductive extcall_annot_sem (text: ident) (targs: list annot_arg) (F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| extcall_annot_sem_intro: forall vargs m args,
eventval_list_match ge args (annot_args_typ targs) vargs ->
extcall_annot_sem text targs ge vargs m
(Event_annot text (annot_eventvals targs args) :: E0) Vundef m.
Lemma extcall_annot_ok:
forall text targs,
extcall_properties (extcall_annot_sem text targs) (mksignature (annot_args_typ targs) None).
Proof.
intros; constructor; intros.
(* well typed *)
inv H. simpl. auto.
(* arity *)
inv H. simpl. eapply eventval_list_match_length; eauto.
(* symbols *)
inv H1. econstructor; eauto.
eapply eventval_list_match_preserved; eauto.
(* valid blocks *)
inv H; auto.
(* perms *)
inv H; auto.
(* readonly *)
inv H; auto.
(* mem extends *)
inv H.
exists Vundef; exists m1'; intuition.
econstructor; eauto.
eapply eventval_list_match_lessdef; eauto.
red; auto.
(* mem injects *)
inv H0.
exists f; exists Vundef; exists m1'; intuition.
econstructor; eauto.
eapply eventval_list_match_inject; eauto.
red; auto.
red; auto.
red; intros; congruence.
(* trace length *)
inv H; simpl; omega.
(* receptive *)
assert (t1 = t2). inv H; inv H0; auto.
exists vres1; exists m1; congruence.
(* determ *)
inv H; inv H0.
assert (args = args0). eapply eventval_list_match_determ_2; eauto. subst args0.
split. constructor. auto.
Qed.
Inductive extcall_annot_val_sem (text: ident) (targ: typ) (F V: Type) (ge: Genv.t F V):
list val -> mem -> trace -> val -> mem -> Prop :=
| extcall_annot_val_sem_intro: forall varg m arg,
eventval_match ge arg targ varg ->
extcall_annot_val_sem text targ ge (varg :: nil) m (Event_annot text (arg :: nil) :: E0) varg m.
Lemma extcall_annot_val_ok:
forall text targ,
extcall_properties (extcall_annot_val_sem text targ) (mksignature (targ :: nil) (Some targ)).
Proof.
intros; constructor; intros.
inv H. unfold proj_sig_res; simpl. eapply eventval_match_type; eauto.
inv H. auto.
inv H1. econstructor; eauto.
eapply eventval_match_preserved; eauto.
inv H; auto.
inv H; auto.
inv H; auto.
inv H. inv H1. inv H6.
exists v2; exists m1'; intuition.
econstructor; eauto.
eapply eventval_match_lessdef; eauto.
red; auto.
inv H0. inv H2. inv H7.
exists f; exists v'; exists m1'; intuition.
econstructor; eauto.
eapply eventval_match_inject; eauto.
red; auto.
red; auto.
red; intros; congruence.
inv H; simpl; omega.
assert (t1 = t2). inv H; inv H0; auto. subst t2.
exists vres1; exists m1; auto.
inv H; inv H0.
assert (arg = arg0). eapply eventval_match_determ_2; eauto. subst arg0.
split. constructor. auto.
Qed.
(** ** Semantics of [memcpy] operations. *)
Inductive extcall_memcpy_sem (sz al: Z) (F V: Type) (ge: Genv.t F V): list val -> mem -> trace -> val -> mem -> Prop :=
| extcall_memcpy_sem_intro: forall bdst odst bsrc osrc m bytes m',
al = 1 \/ al = 2 \/ al = 4 \/ al = 8 -> sz > 0 ->
(al | sz) -> (al | Int.unsigned osrc) -> (al | Int.unsigned odst) ->
bsrc <> bdst \/ Int.unsigned osrc = Int.unsigned odst
\/ Int.unsigned osrc + sz <= Int.unsigned odst
\/ Int.unsigned odst + sz <= Int.unsigned osrc ->
Mem.loadbytes m bsrc (Int.unsigned osrc) sz = Some bytes ->
Mem.storebytes m bdst (Int.unsigned odst) bytes = Some m' ->
extcall_memcpy_sem sz al ge (Vptr bdst odst :: Vptr bsrc osrc :: nil) m E0 Vundef m'.
Lemma extcall_memcpy_ok:
forall sz al,
extcall_properties (extcall_memcpy_sem sz al) (mksignature (Tint :: Tint :: nil) None).
Proof.
intros. constructor.
(* return type *)
intros. inv H. constructor.
(* arity *)
intros. inv H. auto.
(* change of globalenv *)
intros. inv H1. econstructor; eauto.
(* valid blocks *)
intros. inv H. eauto with mem.
(* perms *)
intros. inv H. eapply Mem.perm_storebytes_2; eauto.
(* readonly *)
intros. inv H. eapply Mem.load_storebytes_other; eauto.
destruct (eq_block b bdst); auto. subst b. right.
apply (Intv.range_disjoint'
(ofs, ofs + size_chunk chunk)
(Int.unsigned odst, Int.unsigned odst + Z_of_nat (length bytes))).
red; intros; red; intros. elim (H1 x); auto.
apply Mem.perm_cur_max.
eapply Mem.storebytes_range_perm; eauto.
simpl. generalize (size_chunk_pos chunk); omega.
simpl. rewrite (Mem.loadbytes_length _ _ _ _ _ H8). rewrite nat_of_Z_eq.
omega. omega.
(* extensions *)
intros. inv H.
inv H1. inv H13. inv H14. inv H10. inv H11.
exploit Mem.loadbytes_length; eauto. intros LEN.
exploit Mem.loadbytes_extends; eauto. intros [bytes2 [A B]].
exploit Mem.storebytes_within_extends; eauto. intros [m2' [C D]].
exists Vundef; exists m2'.
split. econstructor; eauto.
split. constructor.
split. auto.
red; split; intros.
eauto with mem.
exploit Mem.loadbytes_length. eexact H8. intros.
rewrite <- H1. eapply Mem.load_storebytes_other; eauto.
destruct (eq_block b bdst); auto. subst b; right.
exploit list_forall2_length; eauto. intros R.
apply (Intv.range_disjoint' (ofs, ofs + size_chunk chunk)
(Int.unsigned odst, Int.unsigned odst + Z_of_nat (length bytes2))); simpl.
red; unfold Intv.In; simpl; intros; red; intros.
eapply (H x H11).
apply Mem.perm_cur_max. apply Mem.perm_implies with Writable; auto with mem.
eapply Mem.storebytes_range_perm. eexact H9.
rewrite R. auto.
generalize (size_chunk_pos chunk). omega.
rewrite <- R. rewrite H10. rewrite nat_of_Z_eq. omega. omega.
(* injections *)
intros. inv H0. inv H2. inv H14. inv H15. inv H11. inv H12.
exploit Mem.loadbytes_length; eauto. intros LEN.
assert (RPSRC: Mem.range_perm m1 bsrc (Int.unsigned osrc) (Int.unsigned osrc + sz) Cur Nonempty).
eapply Mem.range_perm_implies. eapply Mem.loadbytes_range_perm; eauto. auto with mem.
assert (RPDST: Mem.range_perm m1 bdst (Int.unsigned odst) (Int.unsigned odst + sz) Cur Nonempty).
replace sz with (Z_of_nat (length bytes)).
eapply Mem.range_perm_implies. eapply Mem.storebytes_range_perm; eauto. auto with mem.
rewrite LEN. apply nat_of_Z_eq. omega.
assert (PSRC: Mem.perm m1 bsrc (Int.unsigned osrc) Cur Nonempty).
apply RPSRC. omega.
assert (PDST: Mem.perm m1 bdst (Int.unsigned odst) Cur Nonempty).
apply RPDST. omega.
exploit Mem.address_inject. eauto. eexact PSRC. eauto. intros EQ1.
exploit Mem.address_inject. eauto. eexact PDST. eauto. intros EQ2.
exploit Mem.loadbytes_inject; eauto. intros [bytes2 [A B]].
exploit Mem.storebytes_mapped_inject; eauto. intros [m2' [C D]].
exists f; exists Vundef; exists m2'.
split. econstructor; try rewrite EQ1; try rewrite EQ2; eauto.
eapply Mem.aligned_area_inject with (m := m1); eauto.
eapply Mem.aligned_area_inject with (m := m1); eauto.
eapply Mem.disjoint_or_equal_inject with (m := m1); eauto.
apply Mem.range_perm_max with Cur; auto.
apply Mem.range_perm_max with Cur; auto.
split. constructor.
split. auto.
split. red; split; intros. eauto with mem.
rewrite <- H2. eapply Mem.load_storebytes_other; eauto.
destruct (eq_block b bdst); auto. subst b.
assert (loc_unmapped f bdst ofs). apply H0. generalize (size_chunk_pos chunk); omega.
red in H12. congruence.
split. red; split; intros. eauto with mem.
rewrite <- H2. eapply Mem.load_storebytes_other; eauto.
destruct (eq_block b b0); auto. subst b0; right.
rewrite <- (list_forall2_length B). rewrite LEN. rewrite nat_of_Z_eq; try omega.
apply (Intv.range_disjoint' (ofs, ofs + size_chunk chunk)
(Int.unsigned odst + delta0, Int.unsigned odst + delta0 + sz)); simpl.
red; unfold Intv.In; simpl; intros; red; intros.
eapply (H0 x H12). eauto. apply Mem.perm_cur_max. apply RPDST. omega.
generalize (size_chunk_pos chunk); omega.
omega.
split. apply inject_incr_refl.
red; intros; congruence.
(* trace length *)
intros; inv H. simpl; omega.
(* receptive *)
intros.
assert (t1 = t2). inv H; inv H0; auto. subst t2.
exists vres1; exists m1; auto.
(* determ *)
intros; inv H; inv H0. split. constructor. intros; split; congruence.
Qed.
Definition builtin_call (bf: builtin_function): extcall_sem :=
match bf with
| EF_vload chunk => volatile_load_sem chunk
| EF_vstore chunk => volatile_store_sem chunk
| EF_vload_global chunk id ofs => volatile_load_global_sem chunk id ofs
| EF_vstore_global chunk id ofs => volatile_store_global_sem chunk id ofs
| EF_memcpy sz al => extcall_memcpy_sem sz al
| EF_annot txt targs => extcall_annot_sem txt targs
| EF_annot_val txt targ=> extcall_annot_val_sem txt targ
| EF_inline_asm txt => extcall_annot_sem txt nil
end.
Theorem builtin_call_ok:
forall ef,
extcall_properties (builtin_call ef) (ef_sig ef).
Proof.
intros. unfold external_call, ef_sig. destruct ef.
apply volatile_load_ok.
apply volatile_store_ok.
apply volatile_load_global_ok.
apply volatile_store_global_ok.
apply extcall_memcpy_ok.
apply extcall_annot_ok.
apply extcall_annot_val_ok.
apply extcall_annot_ok.
Qed.
Global Instance bc_ops: ExtCallOps mem builtin_function := {
external_call := builtin_call
}.
Global Instance bc_proof: ExternalCalls mem builtin_function := {
external_call_spec := builtin_call_ok
}.
End WITHMEM.
|
{-# OPTIONS --cubical --safe --postfix-projections #-}
-- This module defines a data type for balanced strings of parentheses,
-- which is isomorphic to binary trees.
module Data.Dyck.Rose where
open import Prelude
open import Data.Nat using (_+_)
open import Data.Vec.Iterated using (Vec; _∷_; []; foldlN; head)
open import Data.Tree.Rose
open import Data.List
private
variable
n : ℕ
--------------------------------------------------------------------------------
-- Programs: definition and associated functions
--------------------------------------------------------------------------------
data Prog (A : Type a) : ℕ → Type a where
halt : Prog A 1
pull : Prog A (1 + n) → Prog A n
push : A → Prog A (1 + n) → Prog A (2 + n)
--------------------------------------------------------------------------------
-- Conversion from a Prog to a Tree
--------------------------------------------------------------------------------
prog→tree⊙ : Prog A n → Vec (Forest A) n → Forest A
prog→tree⊙ halt (v ∷ []) = v
prog→tree⊙ (pull is) st = prog→tree⊙ is ([] ∷ st)
prog→tree⊙ (push v is) (t₁ ∷ t₂ ∷ st) = prog→tree⊙ is (((v & t₂) ∷ t₁) ∷ st)
prog→tree : Prog A zero → Forest A
prog→tree ds = prog→tree⊙ ds []
--------------------------------------------------------------------------------
-- Conversion from a Tree to a Prog
--------------------------------------------------------------------------------
tree→prog⊙ : Forest A → Prog A (suc n) → Prog A n
tree→prog⊙ [] = pull
tree→prog⊙ ((t & ts) ∷ xs) = tree→prog⊙ ts ∘ tree→prog⊙ xs ∘ push t
tree→prog : Forest A → Prog A zero
tree→prog tr = tree→prog⊙ tr halt
--------------------------------------------------------------------------------
-- Proof of isomorphism
--------------------------------------------------------------------------------
tree→prog→tree⊙ : (e : Forest A) (is : Prog A (1 + n)) (st : Vec (Forest A) n) →
prog→tree⊙ (tree→prog⊙ e is) st ≡ prog→tree⊙ is (e ∷ st)
tree→prog→tree⊙ [] is st = refl
tree→prog→tree⊙ ((t & ts) ∷ xs) is st =
tree→prog→tree⊙ ts _ st ; tree→prog→tree⊙ xs (push t is) (ts ∷ st)
tree→prog→tree : (e : Forest A) → prog→tree (tree→prog e) ≡ e
tree→prog→tree e = tree→prog→tree⊙ e halt []
prog→tree→prog⊙ : (is : Prog A n) (st : Vec (Forest A) n) →
tree→prog (prog→tree⊙ is st) ≡ foldlN (Prog A) tree→prog⊙ is st
prog→tree→prog⊙ halt st = refl
prog→tree→prog⊙ (pull is) st = prog→tree→prog⊙ is ([] ∷ st)
prog→tree→prog⊙ (push x is) (t₁ ∷ t₂ ∷ st) =
prog→tree→prog⊙ is (((x & t₂) ∷ t₁) ∷ st)
prog→tree→prog : (is : Prog A 0) → tree→prog (prog→tree is) ≡ is
prog→tree→prog is = prog→tree→prog⊙ is []
prog-iso : Prog A zero ⇔ Forest A
prog-iso .fun = prog→tree
prog-iso .inv = tree→prog
prog-iso .rightInv = tree→prog→tree
prog-iso .leftInv = prog→tree→prog
|
[STATEMENT]
lemma extensional_push_forward: "extensional0 dest.diff_fun_space (push_forward X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. extensional0 dest.diff_fun_space (push_forward X)
[PROOF STEP]
by (auto simp: push_forward_def) |
-- ------------------------------------------------------------ [ Sequence.idr ]
-- Module : UML.Sequence
-- Description : Sequence diagrams.
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module UML.Sequence
import public UML.Sequence.Model
import public UML.Sequence.Parser
-- --------------------------------------------------------------------- [ EOF ]
|
Eight FAB subtypes were proposed in 1976 .
|
-- Jesper, 2016-11-04
-- Absurd patterns should *not* be counted as a UnusedArg.
open import Common.Equality
data ⊥ : Set where
data Bool : Set where
true false : Bool
abort : (A : Set) → ⊥ → A
abort A ()
test : (x y : ⊥) → abort Bool x ≡ abort Bool y
test x y = refl
|
Formal statement is: lemma real_lim_sequentially: fixes l::complex shows "(f \<longlongrightarrow> l) sequentially \<Longrightarrow> (\<exists>N. \<forall>n\<ge>N. f n \<in> \<real>) \<Longrightarrow> l \<in> \<real>" Informal statement is: If $f$ converges to $l$ and $f$ is eventually real, then $l$ is real. |
= = = 2000s = = =
|
[STATEMENT]
lemma bound0_qf: "bound0 p \<Longrightarrow> qfree p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bound0 p \<Longrightarrow> qfree p
[PROOF STEP]
by (induct p) simp_all |
module Semantics where
open import Data.Nat hiding (_⊔_; _⊓_)
open import Data.Product
open import Data.Sum
open import Data.String using (String)
open import Data.Unit hiding (_≟_)
open import Data.Empty
open import Relation.Nullary
import Relation.Binary.PropositionalEquality as Eq
open Eq using (_≡_;_≢_; refl)
open Eq.≡-Reasoning
open import Level hiding (_⊔_) renaming (zero to lzero; suc to lsuc)
{- TODO:
* subtyping of refinement types
* union types
* intersection types
-}
Id = String
variable
x y : Id
ℓ : Level
data Expr : Set where
Nat : ℕ → Expr
Var : Id → Expr
Lam : Id → Expr → Expr
App : Expr → Expr → Expr
Pair : Expr → Expr → Expr
Fst Snd : Expr → Expr
Inl Inr : Expr → Expr
Case : Expr → Id → Expr → Id → Expr → Expr
data RawType : Set where
Nat : RawType
_⇒_ _⋆_ _⊹_ : RawType → RawType → RawType
ss⇒tt : ∀ {S S₁ T T₁ : RawType} → (S ⇒ S₁) ≡ (T ⇒ T₁) → (S ≡ T × S₁ ≡ T₁)
ss⇒tt refl = refl , refl
ss⋆tt : ∀ {S S₁ T T₁ : RawType} → (S ⋆ S₁) ≡ (T ⋆ T₁) → (S ≡ T × S₁ ≡ T₁)
ss⋆tt refl = refl , refl
ss⊹tt : ∀ {S S₁ T T₁ : RawType} → (S ⊹ S₁) ≡ (T ⊹ T₁) → (S ≡ T × S₁ ≡ T₁)
ss⊹tt refl = refl , refl
data Type : Set₁ where
Base : (P : ℕ → Set) → Type -- refinement
Nat : Type
_⇒_ : Type → Type → Type
_⋆_ : Type → Type → Type
_⊹_ : Type → Type → Type
T-Nat = Base (λ n → ⊤) -- all natural numbers
data ne : Type → Set where
ne-base : ∀ {P} → (∃P : Σ ℕ P) → ne (Base P)
ne-nat : ne Nat
ne-⇒ : ∀ {S T} → ne S → ne T → ne (S ⇒ T)
ne-⋆ : ∀ {S T} → ne S → ne T → ne (S ⋆ T)
ne-⊹L : ∀ {S T} → ne S → ne (S ⊹ T)
ne-⊹R : ∀ {S T} → ne T → ne (S ⊹ T)
data Env (A : Set ℓ) : Set ℓ where
· : Env A
_,_⦂_ : Env A → (x : Id) → (a : A) → Env A
∥_∥ : Type → RawType
∥ Base P ∥ = Nat
∥ Nat ∥ = Nat
∥ S ⇒ S₁ ∥ = ∥ S ∥ ⇒ ∥ S₁ ∥
∥ S ⋆ S₁ ∥ = ∥ S ∥ ⋆ ∥ S₁ ∥
∥ S ⊹ S₁ ∥ = ∥ S ∥ ⊹ ∥ S₁ ∥
_∨_ : (P Q : ℕ → Set) → ℕ → Set
P ∨ Q = λ n → P n ⊎ Q n
_∧_ : (P Q : ℕ → Set) → ℕ → Set
P ∧ Q = λ n → P n × Q n
implies : ∀ {P Q : ℕ → Set} → (n : ℕ) → P n → (P n ⊎ Q n)
implies n Pn = inj₁ Pn
p*q->p : ∀ {P Q : ℕ → Set} → (n : ℕ) → (P n × Q n) → P n
p*q->p n (Pn , Qn) = Pn
_⊔_ _⊓_ : (S T : Type) {r : ∥ S ∥ ≡ ∥ T ∥} → Type
(Base P ⊔ Base P₁) {refl} = Base (P ∨ P₁)
(Base P ⊔ Nat) = Nat
(Nat ⊔ Base P) = Nat
(Nat ⊔ Nat) = Nat
((S ⇒ S₁) ⊔ (T ⇒ T₁)) {r} with ss⇒tt r
... | sss , ttt = (S ⊓ T){sss} ⇒ (S₁ ⊔ T₁){ttt}
((S ⋆ S₁) ⊔ (T ⋆ T₁)) {r} with ss⋆tt r
... | sss , ttt = (S ⊔ T){sss} ⋆ (S₁ ⊔ T₁){ttt}
((S ⊹ S₁) ⊔ (T ⊹ T₁)) {r} with ss⊹tt r
... | sss , ttt = (S ⊔ T){sss} ⊹ (S₁ ⊔ T₁){ttt}
Base P ⊓ Base P₁ = Base (P ∧ P₁)
Base P ⊓ Nat = Base P
Nat ⊓ Base P = Base P
Nat ⊓ Nat = Nat
((S ⇒ S₁) ⊓ (T ⇒ T₁)){r} with ss⇒tt r
... | sss , ttt = (S ⊔ T){sss} ⇒ (S₁ ⊓ T₁){ttt}
((S ⋆ S₁) ⊓ (T ⋆ T₁)){r} with ss⋆tt r
... | sss , ttt = (S ⊓ T){sss} ⋆ (S₁ ⊓ T₁){ttt}
((S ⊹ S₁) ⊓ (T ⊹ T₁)){r} with ss⊹tt r
... | sss , ttt = (S ⊓ T){sss} ⊹ (S₁ ⊓ T₁){ttt}
variable
S T U S′ T′ U′ U″ : Type
Γ Γ₁ Γ₂ : Env Type
L M N : Expr
n : ℕ
P : ℕ → Set
data Split {A : Set ℓ} : Env A → Env A → Env A → Set ℓ where
nil : Split · · ·
lft : ∀ {a : A}{Γ Γ₁ Γ₂ : Env A} → Split Γ Γ₁ Γ₂ → Split (Γ , x ⦂ a) (Γ₁ , x ⦂ a) Γ₂
rgt : ∀ {a : A}{Γ Γ₁ Γ₂ : Env A} → Split Γ Γ₁ Γ₂ → Split (Γ , x ⦂ a) Γ₁ (Γ₂ , x ⦂ a)
data _⦂_∈_ {A : Set ℓ} : Id → A → Env A → Set ℓ where
found : ∀ {a : A}{E : Env A} →
x ⦂ a ∈ (E , x ⦂ a)
there : ∀ {a a' : A}{E : Env A} →
x ⦂ a ∈ E →
-- x ≢ y →
x ⦂ a ∈ (E , y ⦂ a')
data _<:_ : Type → Type → Set where
<:-refl :
T <: T
<:-base :
(P Q : ℕ → Set) →
(p→q : ∀ n → P n → Q n) →
Base P <: Base Q
<:-base-nat :
Base P <: Nat
<:-⇒ :
S′ <: S →
T <: T′ →
(S ⇒ T) <: (S′ ⇒ T′)
<:-⋆ :
S <: S′ →
T <: T′ →
(S ⋆ T) <: (S′ ⋆ T′)
<:-⊹ :
S <: S′ →
T <: T′ →
(S ⊹ T) <: (S′ ⊹ T′)
-- subtyping is compatible with raw types
<:-raw : S <: T → ∥ S ∥ ≡ ∥ T ∥
<:-raw <:-refl = refl
<:-raw (<:-base P Q p→q) = refl
<:-raw <:-base-nat = refl
<:-raw (<:-⇒ s<:t s<:t₁) = Eq.cong₂ _⇒_ (Eq.sym (<:-raw s<:t)) (<:-raw s<:t₁)
<:-raw (<:-⋆ s<:t s<:t₁) = Eq.cong₂ _⋆_ (<:-raw s<:t) (<:-raw s<:t₁)
<:-raw (<:-⊹ s<:t s<:t₁) = Eq.cong₂ _⊹_ (<:-raw s<:t) (<:-raw s<:t₁)
<:-⊔ : ∀ S T → {c : ∥ S ∥ ≡ ∥ T ∥} → S <: (S ⊔ T){c}
<:-⊓ : ∀ S T → {c : ∥ S ∥ ≡ ∥ T ∥} → (S ⊓ T){c} <: S
<:-⊔ (Base P) (Base P₁) {refl} = <:-base P (P ∨ P₁) implies
<:-⊔ (Base P) Nat = <:-base-nat
<:-⊔ Nat (Base P) = <:-refl
<:-⊔ Nat Nat = <:-refl
<:-⊔ (S ⇒ S₁) (T ⇒ T₁) {c} with ss⇒tt c
... | c1 , c2 = <:-⇒ (<:-⊓ S T) (<:-⊔ S₁ T₁)
<:-⊔ (S ⋆ S₁) (T ⋆ T₁) {c} with ss⋆tt c
... | c1 , c2 = <:-⋆ (<:-⊔ S T) (<:-⊔ S₁ T₁)
<:-⊔ (S ⊹ S₁) (T ⊹ T₁) {c} with ss⊹tt c
... | c1 , c2 = <:-⊹ (<:-⊔ S T) (<:-⊔ S₁ T₁)
<:-⊓ (Base P) (Base P₁) {refl} = <:-base (P ∧ P₁) P p*q->p
<:-⊓ (Base P) Nat = <:-refl
<:-⊓ Nat (Base P) = <:-base-nat
<:-⊓ Nat Nat = <:-refl
<:-⊓ (S ⇒ S₁) (T ⇒ T₁) {c} with ss⇒tt c
... | c1 , c2 = <:-⇒ (<:-⊔ S T) (<:-⊓ S₁ T₁)
<:-⊓ (S ⋆ S₁) (T ⋆ T₁) {c} with ss⋆tt c
... | c1 , c2 = <:-⋆ (<:-⊓ S T) (<:-⊓ S₁ T₁)
<:-⊓ (S ⊹ S₁) (T ⊹ T₁) {c} with ss⊹tt c
... | c1 , c2 = <:-⊹ (<:-⊓ S T) (<:-⊓ S₁ T₁)
-- should be in terms of RawType for evaluation
data _⊢_⦂_ : Env Type → Expr → Type → Set₁ where
nat' :
Γ ⊢ Nat n ⦂ Base (_≡_ n)
var :
(x∈ : x ⦂ T ∈ Γ) →
--------------------
Γ ⊢ Var x ⦂ T
lam :
(Γ , x ⦂ S) ⊢ M ⦂ T →
--------------------
Γ ⊢ Lam x M ⦂ (S ⇒ T)
app :
Γ ⊢ M ⦂ (S ⇒ T) →
Γ ⊢ N ⦂ S →
--------------------
Γ ⊢ App M N ⦂ T
pair :
Γ ⊢ M ⦂ S →
Γ ⊢ N ⦂ T →
--------------------
Γ ⊢ Pair M N ⦂ (S ⋆ T)
pair-E1 :
Γ ⊢ M ⦂ (S ⋆ T) →
--------------------
Γ ⊢ Fst M ⦂ S
pair-E2 :
Γ ⊢ M ⦂ (S ⋆ T) →
--------------------
Γ ⊢ Snd M ⦂ T
sum-I1 :
Γ ⊢ M ⦂ S →
--------------------
Γ ⊢ Inl M ⦂ (S ⊹ T)
sum-I2 :
Γ ⊢ N ⦂ T →
--------------------
Γ ⊢ Inl N ⦂ (S ⊹ T)
sum-E :
Γ ⊢ L ⦂ (S ⊹ T) →
(Γ , x ⦂ S) ⊢ M ⦂ U →
(Γ , y ⦂ T) ⊢ N ⦂ U →
--------------------
Γ ⊢ Case L x M y N ⦂ U
split-sym : Split Γ Γ₁ Γ₂ → Split Γ Γ₂ Γ₁
split-sym nil = nil
split-sym (lft sp) = rgt (split-sym sp)
split-sym (rgt sp) = lft (split-sym sp)
weaken-∈ : Split Γ Γ₁ Γ₂ → x ⦂ T ∈ Γ₁ → x ⦂ T ∈ Γ
weaken-∈ (lft sp) found = found
weaken-∈ (rgt sp) found = there (weaken-∈ sp found)
weaken-∈ (lft sp) (there x∈) = there (weaken-∈ sp x∈)
weaken-∈ (rgt sp) (there x∈) = there (weaken-∈ sp (there x∈))
weaken : Split Γ Γ₁ Γ₂ → Γ₁ ⊢ M ⦂ T → Γ ⊢ M ⦂ T
weaken sp (nat') = nat'
weaken sp (var x∈) = var (weaken-∈ sp x∈)
weaken sp (lam ⊢M) = lam (weaken (lft sp) ⊢M)
weaken sp (app ⊢M ⊢N) = app (weaken sp ⊢M) (weaken sp ⊢N)
weaken sp (pair ⊢M ⊢N) = pair (weaken sp ⊢M) (weaken sp ⊢N)
weaken sp (pair-E1 ⊢M) = pair-E1 (weaken sp ⊢M)
weaken sp (pair-E2 ⊢M) = pair-E2 (weaken sp ⊢M)
weaken sp (sum-I1 ⊢M) = sum-I1 (weaken sp ⊢M)
weaken sp (sum-I2 ⊢N) = sum-I2 (weaken sp ⊢N)
weaken sp (sum-E ⊢L ⊢M ⊢N) = sum-E (weaken sp ⊢L) (weaken (lft sp) ⊢M) (weaken (lft sp) ⊢N)
-- incorrectness typing
P=n : ℕ → ℕ → Set
P=n = λ n x → n ≡ x
data _⊢_÷_ : Env Type → Expr → Type → Set₁ where
nat' :
--------------------
· ⊢ Nat n ÷ Base (_≡_ n)
var1 :
( · , x ⦂ T) ⊢ Var x ÷ T
{-
var :
x ⦂ T ∈ Γ →
--------------------
Γ ⊢ Var x ÷ T
-}
lam :
(· , x ⦂ S) ⊢ M ÷ T →
--------------------
· ⊢ Lam x M ÷ (S ⇒ T)
pair :
Split Γ Γ₁ Γ₂ →
Γ₁ ⊢ M ÷ S →
Γ₂ ⊢ N ÷ T →
--------------------
Γ ⊢ Pair M N ÷ (S ⋆ T)
pair-E1 :
Γ ⊢ M ÷ (S ⋆ T) →
--------------------
Γ ⊢ Fst M ÷ S
pair-E2 :
Γ ⊢ M ÷ (S ⋆ T) →
--------------------
Γ ⊢ Snd M ÷ T
sum-E :
Split Γ Γ₁ Γ₂ →
Γ₁ ⊢ L ÷ (S ⊹ T) →
(Γ₂ , x ⦂ S) ⊢ M ÷ U →
(Γ₂ , y ⦂ T) ⊢ N ÷ U →
--------------------
Γ ⊢ Case L x M y N ÷ U
sum-E′ : ∀ {ru′=ru″} →
Split Γ Γ₁ Γ₂ →
Γ₁ ⊢ L ÷ (S ⊹ T) →
(Γ₂ , x ⦂ S) ⊢ M ÷ U′ →
(Γ₂ , y ⦂ T) ⊢ N ÷ U″ →
U ≡ (U′ ⊔ U″){ru′=ru″} →
--------------------
Γ ⊢ Case L x M y N ÷ U
{-
`sub` :
Γ ⊢ M ÷ S →
T <: S →
--------------------
Γ ⊢ M ÷ T
-}
record _←_ (A B : Set) : Set where
field
func : A → B
back : ∀ (b : B) → ∃ λ (a : A) → func a ≡ b
open _←_
T⟦_⟧ : Type → Set
T⟦ Base P ⟧ = Σ ℕ P
T⟦ Nat ⟧ = ℕ
T⟦ S ⇒ T ⟧ = T⟦ S ⟧ → T⟦ T ⟧
T⟦ S ⋆ T ⟧ = T⟦ S ⟧ × T⟦ T ⟧
T⟦ S ⊹ T ⟧ = T⟦ S ⟧ ⊎ T⟦ T ⟧
T'⟦_⟧ : Type → Set
T'⟦ Base P ⟧ = Σ ℕ P
T'⟦ Nat ⟧ = ℕ
T'⟦ S ⇒ T ⟧ = T'⟦ S ⟧ ← T'⟦ T ⟧
T'⟦ S ⋆ T ⟧ = T'⟦ S ⟧ × T'⟦ T ⟧
T'⟦ S ⊹ T ⟧ = T'⟦ S ⟧ ⊎ T'⟦ T ⟧
E⟦_⟧ : Env Type → Env Set
E⟦ · ⟧ = ·
E⟦ Γ , x ⦂ T ⟧ = E⟦ Γ ⟧ , x ⦂ T⟦ T ⟧
data iEnv : Env Set → Set where
· : iEnv ·
_,_⦂_ : ∀ {E}{A} → iEnv E → (x : Id) → (a : A) → iEnv (E , x ⦂ A)
lookup : (x ⦂ T ∈ Γ) → iEnv E⟦ Γ ⟧ → T⟦ T ⟧
lookup found (γ , _ ⦂ a) = a
lookup (there x∈) (γ , _ ⦂ a) = lookup x∈ γ
eval : Γ ⊢ M ⦂ T → iEnv E⟦ Γ ⟧ → T⟦ T ⟧
eval (nat'{n = n}) γ = n , refl
eval (var x∈) γ = lookup x∈ γ
eval (lam ⊢M) γ = λ s → eval ⊢M (γ , _ ⦂ s)
eval (app ⊢M ⊢N) γ = eval ⊢M γ (eval ⊢N γ)
eval (pair ⊢M ⊢N) γ = (eval ⊢M γ) , (eval ⊢N γ)
eval (pair-E1 ⊢M) γ = proj₁ (eval ⊢M γ)
eval (pair-E2 ⊢M) γ = proj₂ (eval ⊢M γ)
eval (sum-I1 ⊢M) γ = inj₁ (eval ⊢M γ)
eval (sum-I2 ⊢N) γ = inj₂ (eval ⊢N γ)
eval (sum-E{S = S}{T = T}{U = U} ⊢L ⊢M ⊢N) γ =
[ (λ s → eval ⊢M (γ , _ ⦂ s)) , (λ t → eval ⊢N (γ , _ ⦂ t)) ] (eval ⊢L γ)
corr : Γ ⊢ M ÷ T → Γ ⊢ M ⦂ T
corr (nat') = nat'
corr var1 = var found
-- corr (var x) = var x
corr (lam ⊢M) = lam (corr ⊢M)
corr (pair-E1 ÷M) = pair-E1 (corr ÷M)
corr (pair-E2 ÷M) = pair-E2 (corr ÷M)
corr (pair sp ÷M ÷N) = pair (weaken sp (corr ÷M)) (weaken (split-sym sp) (corr ÷N))
corr (sum-E sp ÷L ÷M ÷N) = sum-E (weaken sp (corr ÷L)) (weaken (lft (split-sym sp)) (corr ÷M)) (weaken (lft (split-sym sp)) (corr ÷N))
corr (sum-E′ sp ÷L ÷M ÷N U≡U′⊔U″) =
sum-E (weaken sp (corr ÷L)) (weaken (lft (split-sym sp)) (corr {!!})) (weaken (lft (split-sym sp)) (corr {!!}))
{-
corr (`sub` ÷M T<S) = {!÷M!}
-}
-- pick one element of a type to demonstrate non-emptiness
one : ∀ (T : Type) {net : ne T} → T⟦ T ⟧
one (Base P) {ne-base ∃P} = ∃P
one Nat = zero
one (T ⇒ T₁) {ne-⇒ ne-T ne-T₁} = λ x → one T₁ {ne-T₁}
one (T ⋆ T₁) {ne-⋆ ne-T ne-T₁} = (one T {ne-T}) , (one T₁ {ne-T₁})
one (T ⊹ T₁) {ne-⊹L ne-T} = inj₁ (one T {ne-T})
one (T ⊹ T₁) {ne-⊹R ne-T₁} = inj₂ (one T₁ {ne-T₁})
{- not needed
many : iEnv E⟦ Γ ⟧
many {·} = ·
many {Γ , x ⦂ T} = many , x ⦂ one T
gen : (x∈ : x ⦂ T ∈ Γ) (t : T⟦ T ⟧) → iEnv E⟦ Γ ⟧
gen found t = many , _ ⦂ t
gen (there x∈) t = (gen x∈ t) , _ ⦂ one {!!}
lookup-gen : (x∈ : x ⦂ T ∈ Γ) (t : T⟦ T ⟧) → lookup x∈ (gen x∈ t) ≡ t
lookup-gen found t = refl
lookup-gen (there x∈) t = lookup-gen x∈ t
-}
open Eq.≡-Reasoning
postulate
ext : ∀ {A B : Set}{f g : A → B} → (∀ x → f x ≡ g x) → f ≡ g
unsplit-env : Split Γ Γ₁ Γ₂ → iEnv E⟦ Γ₁ ⟧ → iEnv E⟦ Γ₂ ⟧ → iEnv E⟦ Γ ⟧
unsplit-env nil γ₁ γ₂ = ·
unsplit-env (lft sp) (γ₁ , _ ⦂ a) γ₂ = (unsplit-env sp γ₁ γ₂) , _ ⦂ a
unsplit-env (rgt sp) γ₁ (γ₂ , _ ⦂ a) = (unsplit-env sp γ₁ γ₂) , _ ⦂ a
unsplit-split : (sp : Split Γ Γ₁ Γ₂) (γ₁ : iEnv E⟦ Γ₁ ⟧) (γ₂ : iEnv E⟦ Γ₂ ⟧) →
unsplit-env sp γ₁ γ₂ ≡ unsplit-env (split-sym sp) γ₂ γ₁
unsplit-split nil γ₁ γ₂ = refl
unsplit-split (lft sp) (γ₁ , _ ⦂ a) γ₂ rewrite unsplit-split sp γ₁ γ₂ = refl
unsplit-split (rgt sp) γ₁ (γ₂ , _ ⦂ a) rewrite unsplit-split sp γ₁ γ₂ = refl
lookup-unsplit : (sp : Split Γ Γ₁ Γ₂) (γ₁ : iEnv E⟦ Γ₁ ⟧) (γ₂ : iEnv E⟦ Γ₂ ⟧) →
(x∈ : x ⦂ T ∈ Γ₁) →
lookup (weaken-∈ sp x∈) (unsplit-env sp γ₁ γ₂) ≡ lookup x∈ γ₁
lookup-unsplit (lft sp) (γ₁ , _ ⦂ a) γ₂ found = refl
lookup-unsplit (rgt sp) γ₁ (γ₂ , _ ⦂ a) found = lookup-unsplit sp γ₁ γ₂ found
lookup-unsplit (lft sp) (γ₁ , _ ⦂ a) γ₂ (there x∈) = lookup-unsplit sp γ₁ γ₂ x∈
lookup-unsplit (rgt sp) γ₁ (γ₂ , _ ⦂ a) (there x∈) = lookup-unsplit sp γ₁ γ₂ (there x∈)
eval-unsplit : (sp : Split Γ Γ₁ Γ₂) (γ₁ : iEnv E⟦ Γ₁ ⟧) (γ₂ : iEnv E⟦ Γ₂ ⟧) →
(⊢M : Γ₁ ⊢ M ⦂ T) →
eval (weaken sp ⊢M) (unsplit-env sp γ₁ γ₂) ≡ eval ⊢M γ₁
eval-unsplit sp γ₁ γ₂ (nat')= refl
eval-unsplit sp γ₁ γ₂ (var x∈) = lookup-unsplit sp γ₁ γ₂ x∈
eval-unsplit sp γ₁ γ₂ (lam ⊢M) = ext (λ s → eval-unsplit (lft sp) (γ₁ , _ ⦂ s) γ₂ ⊢M)
eval-unsplit sp γ₁ γ₂ (app ⊢M ⊢M₁)
rewrite eval-unsplit sp γ₁ γ₂ ⊢M | eval-unsplit sp γ₁ γ₂ ⊢M₁ = refl
eval-unsplit sp γ₁ γ₂ (pair ⊢M ⊢M₁)
rewrite eval-unsplit sp γ₁ γ₂ ⊢M | eval-unsplit sp γ₁ γ₂ ⊢M₁ = refl
eval-unsplit sp γ₁ γ₂ (pair-E1 ⊢M)
rewrite eval-unsplit sp γ₁ γ₂ ⊢M = refl
eval-unsplit sp γ₁ γ₂ (pair-E2 ⊢M)
rewrite eval-unsplit sp γ₁ γ₂ ⊢M = refl
eval-unsplit sp γ₁ γ₂ (sum-I1 ⊢M)
rewrite eval-unsplit sp γ₁ γ₂ ⊢M = refl
eval-unsplit sp γ₁ γ₂ (sum-I2 ⊢N)
rewrite eval-unsplit sp γ₁ γ₂ ⊢N = refl
eval-unsplit sp γ₁ γ₂ (sum-E ⊢L ⊢M ⊢N)
rewrite eval-unsplit sp γ₁ γ₂ ⊢L
| ext (λ s → eval-unsplit (lft sp) (γ₁ , _ ⦂ s) γ₂ ⊢M)
| ext (λ t → eval-unsplit (lft sp) (γ₁ , _ ⦂ t) γ₂ ⊢N)
= refl
-- soundness of the incorrectness rules
lave :
(÷M : Γ ⊢ M ÷ T) →
∀ (t : T⟦ T ⟧) →
∃ λ (γ : iEnv E⟦ Γ ⟧) →
eval (corr ÷M) γ ≡ t
lave nat' (n , refl) = · , refl
lave var1 t = (· , _ ⦂ t) , refl
-- lave (var x∈) t = (gen x∈ t) , lookup-gen x∈ t
lave (lam{x = x}{S = S} ÷M) t = · , ext aux
where
aux : (s : T⟦ S ⟧) → eval (corr ÷M) (· , x ⦂ s) ≡ t s
aux s with lave ÷M (t s)
... | (· , .x ⦂ a) , snd = {!!} -- impossible to complete!
lave (pair-E1 ÷M) t with lave ÷M (t , one {!!})
... | γ , ih = γ , Eq.cong proj₁ ih
lave (pair-E2 ÷M) t with lave ÷M (one {!!} , t)
... | γ , ih = γ , Eq.cong proj₂ ih
lave (pair sp ÷M ÷N) (s , t) with lave ÷M s | lave ÷N t
... | γ₁ , ih-M | γ₂ , ih-N =
unsplit-env sp γ₁ γ₂ ,
Eq.cong₂ _,_ (Eq.trans (eval-unsplit sp γ₁ γ₂ (corr ÷M)) ih-M)
(begin eval (weaken (split-sym sp) (corr ÷N)) (unsplit-env sp γ₁ γ₂)
≡⟨ Eq.cong (eval (weaken (split-sym sp) (corr ÷N))) (unsplit-split sp γ₁ γ₂) ⟩
eval (weaken (split-sym sp) (corr ÷N)) (unsplit-env (split-sym sp) γ₂ γ₁)
≡⟨ eval-unsplit (split-sym sp) γ₂ γ₁ (corr ÷N) ⟩
ih-N)
-- works, but unsatisfactory!
-- this proof uses only one branch of the case
-- this choice is possible because both branches ÷M and ÷N have the same type
-- in general, U could be the union of the types of ÷M and ÷N
lave (sum-E{S = S}{T = T}{U = U} sp ÷L ÷M ÷N) u
with lave ÷M u | lave ÷N u
... | (γ₁ , x ⦂ s) , ih-M | (γ₂ , y ⦂ t) , ih-N
with lave ÷L (inj₁ s)
... | γ₀ , ih-L
=
unsplit-env sp γ₀ γ₁ ,
(begin [
(λ s₁ →
eval (weaken (lft (split-sym sp)) (corr ÷M))
(unsplit-env sp γ₀ γ₁ , x ⦂ s₁))
,
(λ t₁ →
eval (weaken (lft (split-sym sp)) (corr ÷N))
(unsplit-env sp γ₀ γ₁ , y ⦂ t₁))
]
(eval (weaken sp (corr ÷L)) (unsplit-env sp γ₀ γ₁))
≡⟨ Eq.cong [
(λ s₁ →
eval (weaken (lft (split-sym sp)) (corr ÷M))
(unsplit-env sp γ₀ γ₁ , x ⦂ s₁))
,
(λ t₁ →
eval (weaken (lft (split-sym sp)) (corr ÷N))
(unsplit-env sp γ₀ γ₁ , y ⦂ t₁))
] (eval-unsplit sp γ₀ γ₁ (corr ÷L)) ⟩
[
(λ s₁ →
eval (weaken (lft (split-sym sp)) (corr ÷M))
(unsplit-env sp γ₀ γ₁ , x ⦂ s₁))
,
(λ t₁ →
eval (weaken (lft (split-sym sp)) (corr ÷N))
(unsplit-env sp γ₀ γ₁ , y ⦂ t₁))
]
(eval (corr ÷L) γ₀)
≡⟨ Eq.cong
[
(λ s₁ →
eval (weaken (lft (split-sym sp)) (corr ÷M))
(unsplit-env sp γ₀ γ₁ , x ⦂ s₁))
,
(λ t₁ →
eval (weaken (lft (split-sym sp)) (corr ÷N))
(unsplit-env sp γ₀ γ₁ , y ⦂ t₁))
]
ih-L ⟩
eval (weaken (lft (split-sym sp)) (corr ÷M)) (unsplit-env sp γ₀ γ₁ , x ⦂ s)
≡⟨ Eq.cong (λ γ → eval (weaken (lft (split-sym sp)) (corr ÷M)) (γ , x ⦂ s)) (unsplit-split sp γ₀ γ₁) ⟩
eval (weaken (lft (split-sym sp)) (corr ÷M)) (unsplit-env (split-sym sp) γ₁ γ₀ , x ⦂ s)
≡⟨⟩
eval (weaken (lft (split-sym sp)) (corr ÷M)) (unsplit-env (lft (split-sym sp)) (γ₁ , x ⦂ s) γ₀)
≡⟨ eval-unsplit (lft (split-sym sp)) (γ₁ , x ⦂ s) γ₀ (corr ÷M) ⟩
ih-M)
lave (sum-E′{S = S}{T = T}{U = U} sp ÷L ÷M ÷N uuu) u = {!!}
|
"""
Kalman filters, log-likelihood and reasonable priors for a "Celerite"
process, as defined in [Foreman-Mackey, et
al. (2017)[https://arxiv.org/abs/1703.09710].
A Celerite process is a sum of AR(1) and CARMA(2,0) processes; an
alternative description is as a sum of damped random walks and SHOs
driven by white noise. It is, in fact, a re-parameterization of a
CARMA process, but a convenient one for stably finding mode
frequencies and damping rates and correlation timescales because it is
a sum of low-order terms.
"""
module Celerite
using Ensemble
mutable struct CeleriteKalmanFilter
mu::Float64 # Mean
x::Array{ComplexF64, 1} # State mean
Vx::Array{ComplexF64, 2} # State variance
K::Array{ComplexF64, 1} # Kalman Gain
lambda::Array{ComplexF64, 1} # Evolution factors exp(roots*dt)
b::Array{ComplexF64, 2} # Rotated observation vector
roots::Array{ComplexF64, 1} # Eigenvalues of the ODEs
V::Array{ComplexF64, 2} # Stationary covariance
Vtemp::Array{ComplexF64, 2} # Storage for matrix ops
drw_rms::Array{Float64, 1}
drw_rates::Array{Float64, 1}
osc_rms::Array{Float64, 1}
osc_freqs::Array{Float64, 1}
osc_Qs::Array{Float64, 1}
end
function reset!(filt::CeleriteKalmanFilter)
p = size(filt.x, 1)
filt.x = zeros(ComplexF64, p)
filt.Vx = copy(filt.V)
filt.K = zeros(ComplexF64, p)
filt
end
function osc_roots(osc_freqs::Array{Float64, 1}, osc_Qs::Array{Float64, 1})
nosc = size(osc_freqs, 1)
oscroots = zeros(ComplexF64, nosc)
for i in 1:nosc
omega0 = 2.0*pi*osc_freqs[i]
alpha = omega0/(2.0*osc_Qs[i])
oscroots[i] = -alpha + omega0*1im
end
oscroots
end
function CeleriteKalmanFilter(mu::Float64, drw_rms::Array{Float64, 1}, drw_rates::Array{Float64, 1}, osc_rms::Array{Float64, 1}, osc_freqs::Array{Float64, 1}, osc_Qs::Array{Float64, 1})
ndrw = size(drw_rms,1)
nosc = size(osc_rms,1)
dim = ndrw + 2*nosc
oscroots = osc_roots(osc_freqs, osc_Qs)
roots = zeros(ComplexF64, dim)
ii = 1
for i in 1:ndrw
roots[ii] = -drw_rates[i]
ii+=1
end
for i in 1:nosc
roots[ii] = oscroots[i]
roots[ii+1] = conj(oscroots[i])
ii += 2
end
b = zeros(ComplexF64, dim)
iobs = 1
for i in 1:ndrw
b[iobs] = 1.0
iobs += 1
end
for i in 1:nosc
b[iobs] = 1.0
iobs += 2 # skip the derivative term
end
b = b'
U = zeros(ComplexF64, (dim,dim))
ii = 1
for i in 1:ndrw
U[ii,ii] = 1.0
ii += 1
end
for i in 1:nosc
U[ii,ii] = 1.0
U[ii, ii+1] = 1.0
U[ii+1,ii] = oscroots[i]
U[ii+1,ii+1] = conj(oscroots[i])
ii += 2
end
b = b*U # Rotated observation vector
e = zeros(ComplexF64, dim)
ii = 1
for i in 1:ndrw
e[ii] = 1.0
ii += 1
end
for i in 1:nosc
e[ii+1] = 1.0
ii += 2
end
J = U \ e
V = zeros(ComplexF64, (dim,dim))
ii = 1
for i in 1:ndrw
V[ii,ii] = -J[ii]*conj(J[ii])/(roots[ii] + conj(roots[ii]))
ii += 1
end
for i in 1:nosc
for j in 0:1
for k in 0:1
V[ii+j, ii+k] = -J[ii+j]*conj(J[ii+k])/(roots[ii+j] + conj(roots[ii+k]))
end
end
ii += 2
end
ii = 1
for i in 1:ndrw
s2 = b[1,ii]*V[ii,ii]*conj(b[1,ii])
s2 = s2[1]
V[ii,ii] *= drw_rms[i]*drw_rms[i]/s2
ii += 1
end
for i in 1:nosc
s2 = b[:,ii:ii+1]*V[ii:ii+1,ii:ii+1]*b[:,ii:ii+1]'
s2 = s2[1]
V[ii:ii+1, ii:ii+1] *= osc_rms[i]*osc_rms[i]/s2
ii += 2
end
CeleriteKalmanFilter(mu, zeros(ComplexF64, dim), V, zeros(ComplexF64, dim), zeros(ComplexF64, dim), b, roots, copy(V), zeros(ComplexF64, (dim,dim)), drw_rms, drw_rates, osc_rms, osc_freqs, osc_Qs)
end
@inbounds function advance!(filt::CeleriteKalmanFilter, dt::Float64)
p = size(filt.x, 1)
for i in 1:p
x = filt.roots[i]*dt
filt.lambda[i] = exp(x)
end
lam = filt.lambda
for i in 1:p
filt.x[i] = lam[i]*filt.x[i]
end
for j in 1:p
for i in 1:p
a::ComplexF64 = lam[i]*conj(lam[j])
b::ComplexF64 = a*(filt.Vx[i,j] - filt.V[i,j])
filt.Vx[i,j] = b + filt.V[i,j]
end
end
filt
end
@inbounds function observe!(filt::CeleriteKalmanFilter, y::Float64, dy::Float64)
p = size(filt.x, 1)
ey, vy = predict(filt)
vy += dy*dy
for i in 1:p
filt.K[i] = zero(filt.K[i])
for j in 1:p
filt.K[i] += filt.Vx[i,j]*conj(filt.b[1,j])/vy
end
end
for i in 1:p
filt.x[i] = filt.x[i] + (y - ey)*filt.K[i]
end
for j in 1:p
for i in 1:p
a::ComplexF64 = vy*filt.K[i]
b::ComplexF64 = a*conj(filt.K[j])
filt.Vx[i,j] = filt.Vx[i,j] - b
end
end
filt
end
@inbounds function predict(filt::CeleriteKalmanFilter)
p = size(filt.x,1)
yp = filt.mu
for i in 1:p
yp += real(filt.b[1,i]*filt.x[i])
end
vyp = 0.0
for i in 1:p
for j in 1:p
a::ComplexF64 = filt.b[1,i]*filt.Vx[i,j]
b::ComplexF64 = a*conj(filt.b[1,j])
vyp = vyp + real(b)
end
end
yp, vyp
end
function whiten(filt::CeleriteKalmanFilter, ts, ys, dys)
n = size(ts, 1)
reset!(filt)
zs = zeros(n)
for i in 1:n
yp, vyp = predict(filt)
zs[i] = (ys[i] - yp)/sqrt(vyp + dys[i]*dys[i])
observe!(filt, ys[i], dys[i])
if i < n
advance!(filt, ts[i+1]-ts[i])
end
end
zs
end
function draw_and_collapse!(filt::CeleriteKalmanFilter)
nd = size(filt.x, 1)
try
for i in 1:nd
filt.Vx[i,i] = real(filt.Vx[i,i]) # Fix a roundoff error problem?
end
L = ctranspose(chol(Hermitian(filt.Vx)))
filt.x = filt.x + L*randn(nd)
filt.Vx = zeros(ComplexF64, (nd, nd))
catch e
if isa(e, Base.LinearAlgebra.PosDefException)
warn("Current variance matrix not pos. def.---may be roundoff problem in generation.")
F = eigfact(filt.Vx)
for i in eachindex(F[:values])
l = real(F[:values][i])
v = F[:vectors][:,i]
if l < 0.0
l = 0.0
end
filt.x = filt.x + sqrt(l)*randn()*v
end
filt.Vx = zeros(ComplexF64, (nd, nd))
else
rethrow()
end
end
end
function generate(filt::CeleriteKalmanFilter, ts, dys)
n = size(ts, 1)
nd = size(filt.x, 1)
ys = zeros(n)
reset!(filt)
for i in 1:n
# Draw a new state
draw_and_collapse!(filt)
y, _ = predict(filt)
ys[i] = y + dys[i]*randn()
if i < n
advance!(filt, ts[i+1]-ts[i])
end
end
ys
end
function log_likelihood(filt, ts, ys, dys)
n = size(ts, 1)
ll = -0.5*n*log(2.0*pi)
reset!(filt)
for i in 1:n
yp, vyp = predict(filt)
if vyp < 0.0
warn("Kalman filter has gone unstable!")
return -Inf
end
dy = ys[i] - yp
vy = vyp + dys[i]*dys[i]
ll -= 0.5*log(vy)
ll -= 0.5*dy*dy/vy
observe!(filt, ys[i], dys[i])
if i < n
advance!(filt, ts[i+1]-ts[i])
end
end
ll
end
function residuals(filt, ts, ys, dys)
n = size(ts, 1)
resid = zeros(n)
dresid = zeros(n)
reset!(filt)
for i in 1:n
yp, vyp = predict(filt)
if vyp < 0.0
warn("Kalman filter has gone unstable!")
return resid, dresid
end
resid[i] = ys[i] - yp
dresid[i] = sqrt(vyp + dys[i]*dys[i])
observe!(filt, ys[i], dys[i])
if i < n
advance!(filt, ts[i+1]-ts[i])
end
end
resid, dresid
end
function raw_covariance(ts::Array{Float64, 1}, dys::Array{Float64, 1}, drw_rms::Array{Float64, 1}, drw_rates::Array{Float64, 1}, osc_rms::Array{Float64, 1}, osc_freqs::Array{Float64, 1}, osc_Qs::Array{Float64,1})
N = size(ts, 1)
ndrw = size(drw_rms,1)
nosc = size(osc_rms,1)
cov = zeros((N,N))
dts = zeros((N,N))
oscroots = osc_roots(osc_freqs, osc_Qs)
for j in 1:N
for i in 1:N
dts[i,j] = abs(ts[i] - ts[j])
end
end
for i in 1:ndrw
cov += drw_rms[i]*drw_rms[i]*exp.(-drw_rates[i].*dts)
end
for i in 1:nosc
A = 1.0 / (-4.0*real(oscroots[i])*(conj(oscroots[i]) - oscroots[i])*oscroots[i])
B = 1.0 / (-4.0*real(oscroots[i])*(oscroots[i] - conj(oscroots[i]))*conj(oscroots[i]))
s2 = osc_rms[i]*osc_rms[i] / (A+B)
cov = cov .+ real.(s2.*(A.*exp.(oscroots[i].*dts) .+ B.*exp.(conj(oscroots[i]).*dts)))
end
for i in 1:N
cov[i,i] += dys[i]*dys[i]
end
cov
end
function psd_drw(rms_amp, damp_rate, fs)
4.0*damp_rate*rms_amp*rms_amp./abs2.(2.0*pi*1im.*fs .+ damp_rate)
end
function psd_osc(rms_amp, freq, Q, fs)
r1 = osc_roots([freq], [Q])[1]
r2 = conj(r1)
norm = 1.0/real(2.0*r1*(r1-r2)*(r1+r2))
rms_amp*rms_amp/norm./abs2.((2.0*pi*1im.*fs .- r1).*(2.0*pi*1im.*fs .- r2))
end
function psd(filt::CeleriteKalmanFilter, fs::Array{Float64, 1})
Pfs = zeros(size(fs,1))
for i in 1:size(filt.drw_rms,1)
Pfs .= Pfs .+ psd_drw(filt.drw_rms[i], filt.drw_rates[i], fs)
end
for i in 1:size(filt.osc_rms,1)
Pfs .= Pfs .+ psd_osc(filt.osc_rms[i], filt.osc_freqs[i], filt.osc_Qs[i], fs)
end
Pfs
end
function predict(filt::CeleriteKalmanFilter, ts, ys, dys, tsp)
# The algorithm here is to run the filter twice over the union of
# the data and prediction times. The first run is "forward" in
# time, and predicts at each time *before* observing the output at
# data times, and the second run is "backward" in time, and
# predicts at each time *after* observing the output at data
# times. Weighted-averaging of the two runs together gives the
# full internal state prediction incorporating all the data at the
# given times.
allts = vcat(ts, tsp)
obsflag = convert(Array{Bool, 1}, vcat(trues(size(ts, 1)), falses(size(tsp, 1))))
inds = sortperm(allts)
rinds = reverse(inds)
yspforward = zeros(size(allts, 1))
vyspforward = zeros(size(allts, 1))
reset!(filt)
for i in eachindex(allts)
yspforward[inds[i]], vyspforward[inds[i]] = predict(filt)
if obsflag[inds[i]]
observe!(filt, ys[inds[i]], dys[inds[i]])
end
if i < size(allts, 1)
advance!(filt, allts[inds[i+1]]-allts[inds[i]])
end
end
yspbackward = zeros(size(allts, 1))
vyspbackward = zeros(size(allts, 1))
reset!(filt)
for i in eachindex(allts)
if obsflag[rinds[i]]
observe!(filt, ys[rinds[i]], dys[rinds[i]])
end
yspbackward[rinds[i]], vyspbackward[rinds[i]] = predict(filt)
if i < size(allts, 1)
advance!(filt, allts[rinds[i]] - allts[rinds[i+1]])
end
end
ysp = (yspforward.*vyspbackward .+ yspbackward.*vyspforward) ./ (vyspbackward .+ vyspforward)
vysp = 1.0./(1.0./vyspforward .+ 1.0./vyspbackward)
ysp[size(ts,1)+1:end], vysp[size(ts, 1)+1:end]
end
struct CeleriteKalmanPosterior
ts::Array{Float64, 1}
ys::Array{Float64, 1}
dys::Array{Float64, 1}
ndrw::Int
nosc::Int
mumin::Float64
mumax::Float64
fmin::Float64
fmax::Float64
ratemin::Float64
ratemax::Float64
Qmin::Float64
Qmax::Float64
drw_rms_min::Float64
drw_rms_max::Float64
osc_rms_min::Float64
osc_rms_max::Float64
end
struct CeleriteKalmanParams
mu::Float64
nu::Float64
drw_rms::Array{Float64, 1}
drw_rates::Array{Float64, 1}
osc_rms::Array{Float64, 1}
osc_freqs::Array{Float64, 1}
osc_Qs::Array{Float64, 1}
end
function ndim(post::CeleriteKalmanPosterior)
2 + 2*post.ndrw + 3*post.nosc
end
function to_params(post::CeleriteKalmanPosterior, x::Array{Float64, 1})
nd = post.ndrw
no = post.nosc
mu = Parameterizations.bounded_value(x[1], post.mumin, post.mumax)
nu = Parameterizations.bounded_value(x[2], 0.1, 10.0)
drw_rms = Parameterizations.bounded_value.(x[3:3+nd-1], post.drw_rms_min, post.drw_rms_max)
drw_rates = Parameterizations.bounded_value.(x[3+nd:3+2*nd-1], post.ratemin, post.ratemax)
osc_rms = Parameterizations.bounded_value.(x[3+2*nd:3+2*nd+no-1], post.osc_rms_min, post.osc_rms_max)
osc_freqs = Parameterizations.bounded_value.(x[3+2*nd+no:3+2*nd+2*no-1], post.fmin, post.fmax)
osc_Qs = Parameterizations.bounded_value.(x[3+2*nd+2*no:3+2*nd+3*no-1], post.Qmin, post.Qmax)
CeleriteKalmanParams(mu, nu, drw_rms, drw_rates, osc_rms, osc_freqs, osc_Qs)
end
function to_array(post::CeleriteKalmanPosterior, p::CeleriteKalmanParams)
x = zeros(ndim(post))
nd = post.ndrw
no = post.nosc
x[1] = Parameterizations.bounded_param(p.mu, post.mumin, post.mumax)
x[2] = Parameterizations.bounded_param(p.nu, 0.1, 10)
x[3:3+nd-1] = Parameterizations.bounded_param.(p.drw_rms, post.drw_rms_min, post.drw_rms_max)
x[3+nd:3+2*nd-1] = Parameterizations.bounded_param.(p.drw_rates, post.ratemin, post.ratemax)
x[3+2*nd:3+2*nd+no-1] = Parameterizations.bounded_param.(p.osc_rms, post.osc_rms_min, post.osc_rms_max)
x[3+2*nd+no:3+2*nd+2*no-1] = Parameterizations.bounded_param.(p.osc_freqs, post.fmin, post.fmax)
x[3+2*nd+2*no:3+2*nd+3*no-1] = Parameterizations.bounded_param.(p.osc_Qs, post.Qmin, post.Qmax)
x
end
function log_prior(post::CeleriteKalmanPosterior, x::Array{Float64, 1})
log_prior(post, to_params(post, x), x)
end
function log_prior(post::CeleriteKalmanPosterior, p::CeleriteKalmanParams)
log_prior(post, p, to_array(post, p))
end
function log_prior(post::CeleriteKalmanPosterior, p::CeleriteKalmanParams, x::Array{Float64, 1})
nd = post.ndrw
no = post.nosc
lp = 0.0
# Flat prior on mu
lp += Parameterizations.bounded_logjac(p.mu, x[1], post.mumin, post.mumax)
# Flat-in-log prior on nu
lp += -log(p.nu) + Parameterizations.bounded_logjac(p.nu, x[2], 0.1, 10.0)
# Flat in log prior on rms
lp += -sum(log.(p.drw_rms)) + sum(Parameterizations.bounded_logjac(p.drw_rms, x[3:3+nd-1], post.drw_rms_min, post.drw_rms_max))
# Flat in log prior on rates
lp += -sum(log.(p.drw_rates)) + sum(Parameterizations.bounded_logjac(p.drw_rates, x[3+nd:3+2*nd-1], post.ratemin, post.ratemax))
# Flat in log prior on osc rms
lp += -sum(log.(p.osc_rms)) + sum(Parameterizations.bounded_logjac(p.osc_rms, x[3+2*nd:3+2*nd+no-1], post.osc_rms_min, post.osc_rms_max))
# Flat in log prior on osc freqs
lp += -sum(log.(p.osc_freqs)) + sum(Parameterizations.bounded_logjac(p.osc_freqs, x[3+2*nd+no:3+2*nd+2*no-1], post.fmin, post.fmax))
# Flat in log prior on Q
lp += -sum(log.(p.osc_Qs)) + sum(Parameterizations.bounded_logjac(p.osc_Qs, x[3+2*nd+2*no:3+2*nd+3*no-1], post.Qmin, post.Qmax))
lp
end
function log_likelihood(post::CeleriteKalmanPosterior, x::Array{Float64, 1})
log_likelihood(post, to_params(post, x))
end
function log_likelihood(post::CeleriteKalmanPosterior, p::CeleriteKalmanParams)
filt = CeleriteKalmanFilter(p.mu, p.drw_rms, p.drw_rates, p.osc_rms, p.osc_freqs, p.osc_Qs)
log_likelihood(filt, post.ts, post.ys, p.nu*post.dys)
end
function init(post::CeleriteKalmanPosterior, n::Int)
nd = post.ndrw
no = post.nosc
xs = zeros(ndim(post), n)
for i in 1:n
mu = post.mumin + (post.mumax-post.mumin)*rand()
nu = exp(log(0.1) + (log(10.0) - log(0.1))*rand())
drw_rms = exp.(log(post.drw_rms_min) + log(post.drw_rms_max/post.drw_rms_min)*rand(nd))
drw_rates = exp.(log(post.ratemin) + log(post.ratemax/post.ratemin)*rand(nd))
osc_rms = exp.(log(post.osc_rms_min) + log(post.rms_osc_max/post.osc_rms_min)*rand(no))
osc_freqs = exp.(log(post.fmin) + log(post.fmax/post.fmin)*rand(no))
osc_Qs = exp.(log(post.Qmin) + log(post.Qmax/post.Qmin)*rand(no))
p = CeleriteKalmanParams(mu, nu, drw_rms, drw_rates, osc_rms, osc_freqs, osc_Qs)
xs[:,i] = to_array(post, p)
end
xs
end
end
|
Originally conceived as a domestic hi @-@ fi speaker , the NS @-@ 10 was designed by Akira Nakamura and launched in 1978 . It was sold at the $ 400 price point . The speaker was poorly received and its commercial life was short . However , it took five years for its popularity to be established with professional users . As recording engineers came to rely on the NS @-@ 10 as a benchmark , it dominated the mixing of pop and rock music throughout the world for at least 20 years .
|
module Monoid
%access public export
||| The proof that some element is identity in the type
total
IsIdentity : (mon : Type) -> ((*) : mon -> mon -> mon) -> (e : mon) -> Type
IsIdentity mon (*) e = (a : mon) -> ((a*e) = a, (e*a) = a)
||| Given a type and a binary operation the type of proofs that the operation is associative
total
Associative : (typ : Type) -> ((*): typ -> typ -> typ) -> Type
Associative typ (*) = (a : typ) -> (b : typ) -> (c : typ) -> ((a * b) * c) = (a * (b * c))
||| Given a type and a binary operation the type of proofs that the operation is commutative
total
Commutative : (typ : Type) -> ((*) : typ -> typ -> typ) -> Type
Commutative typ (*) = (a : typ) -> (b : typ) -> (a * b) = (b * a)
||| Given a type and a binary operation the type of proofs that identity exists
total
IdentityExists : (typ : Type) -> ((*) : typ -> typ -> typ) -> Type
IdentityExists typ (*) = (e : typ ** (IsIdentity typ (*) e))
--((a : typ) -> ((a*e) = a, (e*a) = a)))
total
IsMonoid : (mon : Type) -> ((*) : mon -> mon -> mon) -> Type
IsMonoid mon (*) = (Associative mon (*), IdentityExists mon (*))
||| Gives the identity of the monoid
total
Monoid_id : (mon : Type) -> ((*) : mon -> mon -> mon) -> (IsMonoid mon (*))
-> (IdentityExists mon (*))
Monoid_id mon (*) (pfAss, pfId) = pfId
|
\documentclass[executivepaper]{article}
\usepackage{mathtools}
\everymath{\displaystyle}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{commath}
\usepackage{kantlipsum,graphicx}
\usepackage{amsmath}
\usepackage[utf8]{inputenc}
\usepackage{sectsty}
\usepackage{tcolorbox}
\usepackage{geometry}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes}
\usepackage{float}
\setlength\parindent{3pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height
\newtheorem{definition}{Definition}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}[theorem]
\newtheorem{sidenote}{Side Note}
\newcommand{\KP}[1]{%
\begin{tikzpicture}[baseline=-\dimexpr\fontdimen22\textfont2\relax]
#1
\end{tikzpicture}%
}
\newcommand{\KPA}{%
\KP{\filldraw[color=gray, fill=none, thick] circle (0.3);}%
}
\newcommand{\KPB}{%
\KP{
\draw[color=gray,thick] (-0.3,0.3) -- (0.3,-0.3);
\draw[color=gray,thick] (-0.3,-0.3) -- (-0.05,-0.05);
\draw[color=gray,thick] (0.05,0.05) -- (0.3,0.3);
}%
}
\newcommand{\KPC}{%
\KP{%
\draw[color=gray,thick] (-0.3,0.3) .. controls (0,-0.05) .. (0.3,0.3);
\draw[color=gray,thick] (-0.3,-0.3) .. controls (0,0.05) .. (0.3,-0.3);
}%
}
\newcommand{\KPD}{%
\KP{%
\draw[color=gray,thick] (-0.3,-0.3) .. controls (0.05,0) .. (-0.3,0.3);
\draw[color=gray,thick] (0.3,-0.3) .. controls (-0.05,0) .. (0.3,0.3);
}%
}
\begin{document}
\title
{
\vspace*{-40mm}
\normalfont \normalsize
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge Topology Final Exam Study Guide\\ % The assignment title
\horrule{0.5pt} \\[0.5cm] % Thick bottom horizontal rule
}
\author{Brendan Busey} % Your name
\date{\normalsize\today} % Today's date or a custom date
\maketitle
\begin{center}
\section*{Chapter 1}
\end{center}
\subsection*{1.1 Equivalence Relations}
\begin{tcolorbox}
\begin{definition}
\textit{A binary relation $\thicksim$ on a set X is an \textbf{equivalence relation} if and only if $\forall ~ x, y, z \in$ X satisfies}
\begin{enumerate}
\item x $\thicksim$ x (reflexivity)
\item if x $\thicksim$ y, then y $\thicksim$ x (symmetry)
\item if x $\thicksim$ y and y $\thicksim$ z, then x $\thicksim$ z (transitivity)
\end{enumerate}
\end{definition}
\end{tcolorbox}
\section*{1.2 Bijections}
\begin{tcolorbox}
\begin{definition}
\textit{A function $f: X \rightarrow Y$ is an injection (or a \textbf{one-to-one} function) if and only if for an $x_{1}, x_{2} \in X$ we have}
\begin{center}
$f(x{_1})=f(x_{2}) \implies x_{1}=x_{2}$
\end{center}
\textit{The function is a \textbf{surjection} (or an \textbf{onto} function) if and only if for every $y \in Y$, there is $x \in X$ with $f(x)=y$. The function is a \textbf{bijection} if and only if it is an injection and a surjection.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{Consider a function $f: X \rightarrow Y$. Then, f is a bijection if and only if f has an inverse function.}
\end{theorem}
\end{tcolorbox}
\pagebreak
\vspace*{-35mm}
\section*{1.3 Continuous Functions}
\begin{tcolorbox}
\begin{definition}
\textit{A function $f: X \rightarrow Y$ is continuous at $x_{0} \in X$ if and only if for every $\varepsilon > 0$ there is $\delta > 0$ such that $\forall ~ x \in X$, we have the implication that $d(x, x_{0}) < \delta \implies d(f(x), f(x_{0})) < \varepsilon$. A function is continuous if and only if it is continuous at each point of it s domain.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{Suppose A and B are regions of $\mathbb{R}^{2}$ that are bounded by polygons. Suppose $f: A \rightarrow Y$ and $g: B \rightarrow Y$ are continuous functions such that $f(x)=g(x) ~ \forall x \in A \cap B$. Then, the function $h: A \cup B \rightarrow Y$ is defined by}
\begin{center}
\[h(x)= \begin{cases}
f(x) & if ~ x \in A \\
g(x) & if ~ x \in B
\end{cases}
\]
\end{center}
is continuous.
\end{theorem}
\end{tcolorbox}
\subsection*{1.4 Topological Equivalance}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{homeomorphism} (or \textbf{topological equivalence}) is a bijection $h: X \rightarrow Y$ such that both $h$ and $h^{-1}$ are continuous. The spaces X and Y are \textbf{homeomorphic} (or \textbf{topologically equivalent}) if and only if there is a homeomorphism from X to Y.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The \textit{standard disk} is the set \{$(x,y) \in \mathbb{R}^{2} ~ | ~ x^2+y^2 \leq 1$\}. A \textbf{disk} is any topological space homeomorphic to the standard disk. \\[2ex]
The \textbf{standard n-dimensional ball} (or more simply, the \textbf{the standard n-ball}) is the set $\{(x_{1}, x_{2}, \ldots, x_{n}) \in \mathbb{R}^{n} ~ | ~ x_{1}^{2} + x_{2}^{2} + \cdots + x_{n}^{2} \leq 1\}$. An \textbf{n-ball} (or \textbf{n-cell}) is any topological space homeomorphic to the standard n-ball.\\[2ex]
The \textbf{standard} n-\textbf{dimensional sphere} (or more simply, the \textbf{standard} n-\textbf{sphere}) is the set $\{(x_{1}, x_{2}, \cdots, x_{n}) \in \mathbb{R}^{n} ~ | ~ x_{1}^{2} + x_{2}^{2} + \ldots + x_{n+1}^{2} = 1\}$. An n-\textbf{sphere} is any topological space homeomorphic to the standard n-sphere.}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-35mm}
\subsection*{1.5 Topological Invariants}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{path} in a space X is a continuous function $\alpha: [0,1] \rightarrow X$. Consider the equivalence relation between pairs of points in a set of X defined by $x \thicksim y$ if and only if there is a path $\alpha : [0,1] \rightarrow X$ with $\alpha(0)=x$ and $\alpha(1)=y$. The equivalence classes under this relation are called \textbf{path components} of X. A set such that every two points are joined by a path is said to be \textbf{path-connected}}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{Suppose $\alpha : [0,1] \rightarrow A \cup B ~ is ~ a ~ path ~ \alpha(0) \in A$ and $\alpha(1) \in B$. Then, there is a sequence of points of A that converges to a point of B or else there is a sequence of points of B that converges to a point in A.}
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{corollary}
\textit{A homeomorphism $h: X \rightarrow Y$ induces a bijection $h_{*}: P(x) \rightarrow P(Y)$. In particular, the number of path components of a space is topologically invariant.}
\end{corollary}
\end{tcolorbox}
\subsection*{1.6 Isotopy}
\begin{tcolorbox}
\begin{definition}
\textit{Suppose A and B are two subsets of a space X. An \textbf{Ambient isotopy} from A to B in X is a continuous function $h : X \times [0,1] \rightarrow X$ that satisfies the following three conditions. We denote $h(x,t)$ by $h_{t}(x)$.}
\begin{center}
\begin{enumerate}
\item $h_{t}: X \rightarrow X$ is a homeomorphism for every t $\in$ [0,1]
\item $h_{0} ~ is ~ the ~ identity ~ function ~ on ~ X$
\item $h_{1}(A)=B$
\end{enumerate}
\end{center}
\end{definition}
\end{tcolorbox}
\begin{center}
\section*{Chapter 2}
\end{center}
\subsection*{2.1 Knots, Links, and Equivalences}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{knot K} is a simple closed curve in $\mathbb{R}^{3}$ that can be broken into a finite number of straight line segments $e_{1}, e_{2}, \cdots, e_{n}$ such that the intersection of any segment with $e_{k}$ with the other segments is exactly one endpoint of $e_{k}$ intersecting an endpoint of $e_{k-1}$ (or $e_{n}$ if $k=1$) and the other endpoint of $e_{k}$ intersecting an endpoint of $e_{k+1}$ (or $e_{1}$ if $k=n$).}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\begin{tcolorbox}
\begin{definition}
\textit{Consider a triangle ABC with side AC matching one of the line segments of a knot K. In the plane determined by the triangle, we require that the region bounded by ABC intersects K only in the edge AC. A \textbf{triangular detour} involves replacing the edge AC of knot K with the two edges AB and BC to produce a new knot L. With the same notation, a \textbf{triangular shortcut} involves replacing the two edges AB and BC and L with the single edge of AC to produce knot K. A \textbf{triangular move} is either a triangular detour or a triangular shortcut. Two knots are \textbf{equivalent} if and only if there is a finite sequence of triangular moves that changes the first knot into the second.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{Link} is the nonempty union of a finite number of disjoint knots.}
\end{definition}
\end{tcolorbox}
\subsection*{2.2 Knot Diagrams}
\begin{tcolorbox}
\begin{definition}[General Position Rule of Thumb]
\textit{Suppose two piecewise-linear objects are embedded in general position in $\mathbb{R}^{n}$. Suppose A is a vertex, edge, face, or analogous higher-dimensional part of one object and B is a vertex, edge, face, or analogous higher-dimensional part of the other object. If the intersection $A \cup B$ is nonempty, then.}
\begin{center}
dim($A \cap B$)=dim(A)+dim(B)-n
\end{center}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The orthogonal projection of a knot onto a plane is a \textbf{regular projection} if and only if no vertex projects to the image of another point of the knot and there are no triple points.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The \textbf{crossing number} of a knot K is the minimum number of crossing points that occur in the knot diagrams for all knots equivalent K.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The \textbf{unknotting number} is the minimum number of times the knot must be passed through itself (\textbf{crossing switch}) to untie it}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{trivial knot} is a knot that is equivalent to a triangle. A \textbf{trivial link} is a link that is equivalent to the union of disjoint triangles lying in a plane}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{A knot is \textbf{alternating} if and only if it is equivalent to a knot with a diagram in which underpasses alternate with overpasses as you travel around the knot}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\subsection*{2.3 Reidmeister Moves}
\begin{figure}[H]
\centering
\includegraphics[scale=0.5]{Reidemeister_move_1.png}
\caption{Type 1}
\end{figure}
\vspace{2mm}
\begin{figure}[H]
\centering
\includegraphics[scale=0.5]{Reidemeister_move_2.png}
\caption{Type 2}
\end{figure}
\pagebreak
\vspace*{-40mm}
\begin{figure}[H]
\centering
\includegraphics[scale=0.5]{Reidemeister_move_3.png}
\caption{Type 3}
\end{figure}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{If two links are equivalent, then their diagrams, subject to ambient isotopies of the plane, are related by a sequence of Reidemeister moves.}
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{An \textbf{orientation} of a link is a choice of direction to travel around each component of the link. Consider a crossing a regular projection of an oriented link. Stand on the overpass and face in the direction of the orientation. The crossing is \textbf{right-handed} if and only if traffic on the underpass goes from right to left; the crossing is \textbf{left-handed} if and only if traffic on the underpass goes from left to right. In regular projection of an oriented link of two components, assign +1 to right-handed crossings and -1 to left-handed crossings. Add up the numbers assigned to crossings involving both components. One half this sum is the \textbf{linking number} of the two oriented components of the link.}
\end{definition}
\end{tcolorbox}
\subsection*{2.4 Colorings}
\begin{tcolorbox}
\begin{definition}
\textit{The diagram of a knot is \textbf{colorable} if and only if each arc can be assigned one of three colors subject to the two conditions:}
\begin{center}
\begin{enumerate}
\item At least two colors appear
\item At any crossing where two colors appear, all three colors appear
\end{enumerate}
\end{center}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{The colorability of a knot diagram is an invariant property of the knot type.}
\end{theorem}
\end{tcolorbox}
\pagebreak
\vspace*{-20mm}
\begin{tcolorbox}
\begin{definition}
\textit{Let p be an odd number greater than two. A knot is p-colorable if at every crossing:}
\begin{center}
\begin{enumerate}
\item At least two colors appear
\item you can solve $color1 + color2 \equiv 2x ~ mod ~ (the ~ number ~ of ~ colors ~ used)$, where color1 and color2 are numbers assigned to the arcs of the knot
\end{enumerate}
\end{center}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{The colorability of a knot diagram is an invariant property of the knot type.}
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{The representation of a knot diagram on a wheel with p colors is an invariant property of the knot type.}
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The determinant of a knot is the absolute value of its Alexander Polynominal evaluated at -1 (simply, plug-in -1 for t)}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{A knot is p-\textbf{colorable} for prime p greater than two if and only if p divides its determinant}
\end{theorem}
\end{tcolorbox}
\subsection*{2.5 The Alexander Polynominal}
\begin{tcolorbox}
\textit{Steps in computing the Alexander Polynominal:}
\begin{center}
\begin{enumerate}
\item Label the crossings $x_{1}, x_{2}, \cdots, x_{n}$
\item Label the arcs $a_{1}, a_{2}, \cdots, a_{n}$
\item Choose an orientation for the knot
\item As you travel around the knot in the chosen orientation, stand on the overpass of each crossing. Label the overstrand with $1-t$, the left-end of the understrand $t$, and the right-end of the understrand -1
\item Create the arc/crossing matrix
\item Compute the determinant of the matrix, which is the Alexander Polynominal
\end{enumerate}
\end{center}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\begin{tcolorbox}
\begin{definition}
\textit{The projection of an oriented knot divides the plane into a number of regions. The \textbf{index} of one of these regions is the net number of times the projection winds counterclockwise around any point in the region.}
\end{definition}
\end{tcolorbox}
\begin{tcolorbox}
\begin{definition}
\textit{The \textbf{index} of a crossing of a knot diagram is the common value of the index of two of the regions near the crossing.}
\end{definition}
\end{tcolorbox}
\begin{tcolorbox}
\begin{theorem}
\textit{The Alexander Polynominal of an oriented knot is an invariant under Reidemeister moves.}
\end{theorem}
\end{tcolorbox}
\subsection*{2.6 Skein Relations}
\begin{tcolorbox}
\textit{Calculating the $\Delta$ polynominal is the same as calculating the Alexander Polynominal}
\end{tcolorbox}
\subsection*{2.7 The Jones Polynominal}
\begin{tcolorbox}
\textbf{Rules for the Bracket Polynominal} \textit{The Kauffman \textbf{Bracket Polynominal} of a regular projection of a link is a polynominal in integer powers of the variable A defined by the following three rules:}
\begin{center}
\begin{enumerate}
%the first rule
\item
$\left\langle\KPA\right\rangle=1$
%the second rule
\item
$\left\langle L \cup \KPA\right\rangle=(-A^{2}-A^{-2})\langle L\rangle$
%the third rule
\item
$\left\langle\KPB\right\rangle=
A\left\langle\KPC\right\rangle + A^{-1} \left\langle \KPD \right\rangle$
\end{enumerate}
\end{center}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The \textbf{writhe} w(L) of the regular projection L of a link is the number of right-handed crossings minus the number of left-handed crossings.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{The X(L) polynominal is defined as:}
\begin{center}
$X(L)=(-A)^{-3w(L)} \big \langle L \big \rangle$, where $\big \langle L \big \rangle$ is the bracket polynominal for L and $w(L)$ is the writhe of L
\end{center}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\begin{tcolorbox}
\textit{Steps in calculating the Jones Polynominal:}
\begin{center}
\begin{enumerate}
\item Calculate the Bracket Polynominal
\item Calculate the $X-polynominal$
\item Substitute $t^{-\frac{1}{4}}$ in for every ``A" in the $X-polynominal$ and simplify
\end{enumerate}
\end{center}
\end{tcolorbox}
\begin{center}
\section*{Chapter 3 Surfaces}
\end{center}
\subsection*{3.1 Definition and Examples}
\begin{tcolorbox}
\begin{definition}
\textit{In a space with a way of measuring distances between points, a \textbf{neighborhood} of a point is a subset that contains all points within some positive distance of the point}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{surface} (or 2 \textbf{-manifold}) is a space that is homeomorphic to a nonempty subset of finite-dimensional Euclidean space and in which every point has a neighborhood homeomorphic to $\mathbb{R}^{2}$. We sometimes also wish to admit \textbf{boundary} points, which have neighborhoods homeomorphic to the half-plane $\{(x,y) \in \mathbb{R}^{2} ~ | ~ y \geq 0\}$}.
\end{definition}
\end{tcolorbox}
\subsection*{3.2 Cut-and-Paste Techniques}
\begin{tcolorbox}
\begin{definition}
\textit{Let S and T be path-connected surfaces Remove the interior of a disk from each surface by cutting along the boundaries of the disks. Glue the remaining surfaces together along the newly formed boundary components. The result surface is the \textbf{connected sum} of S and T. It is denoted S\texttt{\#}T.}
\end{definition}
\end{tcolorbox}
\subsection*{3.3 The Euler Characteristic and Orientability}
\begin{tcolorbox}
\begin{definition}
\textit{A \textbf{triangulation} of a space is a decomposition of the space into a union of disks, arcs, and points. The disks are called \textbf{faces}, the arcs are called \textbf{edges}, and the points are \textbf{vertices} of the triangulation. A face intersects other components of a triangulation only along its boundary; and the boundary of a face consists of three edges and three vertices. An edge intersects other edges and the vertices only at its endpoints; and both endpoints of an edge are vertices.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{A triangulated space is \textbf{compact} if and only if it consists of a finite number of faces, edges, and vertices.}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\begin{tcolorbox}
\begin{definition}
\textit{The \textbf{Euler characteristic} of a compact triangulation space S is the number of vertices minus the number of edges plus the number of faces. The Euler characteristic of S is denoted by $\chi(S)$.}
\end{definition}
\end{tcolorbox}
\begin{tcolorbox}
\begin{theorem}
\textit{Every closed, path-connected surface is homeomorphic to exactly one of:}
\begin{center}
\begin{enumerate}
\item A 2-sphere
\item A connected sum of Tori
\item A connected sum of projective planes
\end{enumerate}
\end{center}
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{sidenote}
\textit{Something is orientable if it is 2-colorable or doesn't have a mobius band}
\end{sidenote}
\end{tcolorbox}
\begin{tcolorbox}
\begin{theorem}
\textit{Suppose $A$ and $B$ are triangulated so that $A \cap B$ is also triangulated. Then, $\chi(A \cup B)=\chi(A)+\chi(B)-\chi(A \cap B)$}.
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{theorem}
\textit{The surface formed by taking the connected sum of g tori and cutting out disks to leave b boundary components has Euler characteristic 2-2g-b. The surface formed by taking the connected sum of n projective plane and cutting out disks to leave b boundary components has Euler characteristic 2-n-b}.
\end{theorem}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{An \textbf{orientation} of a polygonal face of a triangulated surface is the choice of one of the two possible orientations of the boundary curve of the face. A surface is \textbf{orientable} if and only if it is possible to choose orientations of all the faces of a triangulation of the surface so that whenever two faces share a common edge, the orientation of the faces induce opposite orientations on the edge.}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{$\chi(f_{1}\texttt{\#}f_{2})=\chi(f_{1}) + \chi(f_{2}) - 2$}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{With the word you come up with for the surface, if every letter does not have an inverse, the surface is not orientable. If every letter does have an inverse, it is orientable}
\end{definition}
\end{tcolorbox}
\vspace{2mm}
\begin{tcolorbox}
\begin{definition}
\textit{Just concatenate the two individual words of each surface to get a word for the connected sum of the two surfaces}
\end{definition}
\end{tcolorbox}
\pagebreak
\vspace*{-30mm}
\begin{tcolorbox}
\begin{sidenote}
\textit{The table below helps you figure out what surface is being asked for based on \textbf{Orientability} and the \textbf{Euler characterisitic}}
\vspace{2mm}
\begin{tabular}{||c c c||}
\hline
\textit{$\chi$} & \textit{$Oreintable$} & \textit{$Non-orientable$} \\ [0.5ex]
\hline\hline
2 & Sphere & \\
\hline
1 & & P \\
\hline
0 & T & P \texttt{\#} P \\
\hline
-1 & & P \texttt{\#} P \texttt{\#} P \\
\hline
-2 & T \texttt{\#} T & P \texttt{\#} P \texttt{\#} P \texttt{\#} P \\
\hline
-3 & & P \texttt{\#} P \texttt{\#} P \texttt{\#} P \texttt{\#} P \\
\hline
-4 & T \texttt{\#} T \texttt{\#} T & P \texttt{\#} P \texttt{\#} P \texttt{\#} P \texttt{\#} P \texttt{\# P} \\ [1ex]
\hline
\end{tabular}
\end{sidenote}
\end{tcolorbox}
\end{document} |
The monomial $x^0$ is equal to $1$. |
State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.228490
β : Type ?u.228493
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
h : Convex 𝕜 s
x y : E
hx : x ∈ s
hy : y ∈ s
t : 𝕜
ht : t ∈ Icc 0 1
⊢ x + t • (y - x) ∈ s State After: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.228490
β : Type ?u.228493
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
h : Convex 𝕜 s
x y : E
hx : x ∈ s
hy : y ∈ s
t : 𝕜
ht : t ∈ Icc 0 1
⊢ t • (y - x) + x ∈ s Tactic: rw [add_comm] State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.228490
β : Type ?u.228493
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
h : Convex 𝕜 s
x y : E
hx : x ∈ s
hy : y ∈ s
t : 𝕜
ht : t ∈ Icc 0 1
⊢ t • (y - x) + x ∈ s State After: no goals Tactic: exact h.lineMap_mem hx hy ht |
(* Author: Tobias Nipkow *)
section "Splay Tree Implementation of Maps"
theory Splay_Map
imports
Splay_Tree
"HOL-Data_Structures.Map_Specs"
begin
function splay :: "'a::linorder \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
"splay x Leaf = Leaf" |
"x = fst a \<Longrightarrow> splay x (Node t1 a t2) = Node t1 a t2" |
"x = fst a \<Longrightarrow> x < fst b \<Longrightarrow> splay x (Node (Node t1 a t2) b t3) = Node t1 a (Node t2 b t3)" |
"x < fst a \<Longrightarrow> splay x (Node Leaf a t) = Node Leaf a t" |
"x < fst a \<Longrightarrow> x < fst b \<Longrightarrow> splay x (Node (Node Leaf a t1) b t2) = Node Leaf a (Node t1 b t2)" |
"x < fst a \<Longrightarrow> x < fst b \<Longrightarrow> t1 \<noteq> Leaf \<Longrightarrow>
splay x (Node (Node t1 a t2) b t3) =
(case splay x t1 of Node t11 y t12 \<Rightarrow> Node t11 y (Node t12 a (Node t2 b t3)))" |
"fst a < x \<Longrightarrow> x < fst b \<Longrightarrow> splay x (Node (Node t1 a Leaf) b t2) = Node t1 a (Node Leaf b t2)" |
"fst a < x \<Longrightarrow> x < fst b \<Longrightarrow> t2 \<noteq> Leaf \<Longrightarrow>
splay x (Node (Node t1 a t2) b t3) =
(case splay x t2 of Node t21 y t22 \<Rightarrow> Node (Node t1 a t21) y (Node t22 b t3))" |
"fst a < x \<Longrightarrow> x = fst b \<Longrightarrow> splay x (Node t1 a (Node t2 b t3)) = Node (Node t1 a t2) b t3" |
"fst a < x \<Longrightarrow> splay x (Node t a Leaf) = Node t a Leaf" |
"fst a < x \<Longrightarrow> x < fst b \<Longrightarrow> t2 \<noteq> Leaf \<Longrightarrow>
splay x (Node t1 a (Node t2 b t3)) =
(case splay x t2 of Node t21 y t22 \<Rightarrow> Node (Node t1 a t21) y (Node t22 b t3))" |
"fst a < x \<Longrightarrow> x < fst b \<Longrightarrow> splay x (Node t1 a (Node Leaf b t2)) = Node (Node t1 a Leaf) b t2" |
"fst a < x \<Longrightarrow> fst b < x \<Longrightarrow> splay x (Node t1 a (Node t2 b Leaf)) = Node (Node t1 a t2) b Leaf" |
"fst a < x \<Longrightarrow> fst b < x \<Longrightarrow> t3 \<noteq> Leaf \<Longrightarrow>
splay x (Node t1 a (Node t2 b t3)) =
(case splay x t3 of Node t31 y t32 \<Rightarrow> Node (Node (Node t1 a t2) b t31) y t32)"
apply(atomize_elim)
apply(auto)
(* 1 subgoal *)
apply (subst (asm) neq_Leaf_iff)
apply(auto)
apply (metis tree.exhaust surj_pair less_linear)+
done
termination splay
by lexicographic_order
lemma splay_code: "splay (x::_::linorder) t = (case t of Leaf \<Rightarrow> Leaf |
Node al a ar \<Rightarrow> (case cmp x (fst a) of
EQ \<Rightarrow> t |
LT \<Rightarrow> (case al of
Leaf \<Rightarrow> t |
Node bl b br \<Rightarrow> (case cmp x (fst b) of
EQ \<Rightarrow> Node bl b (Node br a ar) |
LT \<Rightarrow> if bl = Leaf then Node bl b (Node br a ar)
else case splay x bl of
Node bll y blr \<Rightarrow> Node bll y (Node blr b (Node br a ar)) |
GT \<Rightarrow> if br = Leaf then Node bl b (Node br a ar)
else case splay x br of
Node brl y brr \<Rightarrow> Node (Node bl b brl) y (Node brr a ar))) |
GT \<Rightarrow> (case ar of
Leaf \<Rightarrow> t |
Node bl b br \<Rightarrow> (case cmp x (fst b) of
EQ \<Rightarrow> Node (Node al a bl) b br |
LT \<Rightarrow> if bl = Leaf then Node (Node al a bl) b br
else case splay x bl of
Node bll y blr \<Rightarrow> Node (Node al a bll) y (Node blr b br) |
GT \<Rightarrow> if br=Leaf then Node (Node al a bl) b br
else case splay x br of
Node bll y blr \<Rightarrow> Node (Node (Node al a bl) b bll) y blr))))"
by(auto split!: tree.split)
definition lookup :: "('a*'b)tree \<Rightarrow> 'a::linorder \<Rightarrow> 'b option" where "lookup t x =
(case splay x t of Leaf \<Rightarrow> None | Node _ (a,b) _ \<Rightarrow> if x=a then Some b else None)"
hide_const (open) insert
fun update :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
"update x y t = (if t = Leaf then Node Leaf (x,y) Leaf
else case splay x t of
Node l a r \<Rightarrow> if x = fst a then Node l (x,y) r
else if x < fst a then Node l (x,y) (Node Leaf a r) else Node (Node l a Leaf) (x,y) r)"
definition delete :: "'a::linorder \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
"delete x t = (if t = Leaf then Leaf
else case splay x t of Node l a r \<Rightarrow>
if x = fst a
then if l = Leaf then r else case splay_max l of Node l' m r' \<Rightarrow> Node l' m r
else Node l a r)"
subsection "Functional Correctness Proofs"
lemma splay_Leaf_iff: "(splay x t = Leaf) = (t = Leaf)"
by(induction x t rule: splay.induct) (auto split: tree.splits)
subsubsection "Proofs for lookup"
lemma lookup_eq:
"sorted1(inorder t) \<Longrightarrow> lookup t x = map_of (inorder t) x"
by(auto simp: lookup_def splay_Leaf_iff splay_map_of_inorder split: tree.split)
subsubsection "Proofs for update"
lemma inorder_splay: "inorder(splay x t) = inorder t"
by(induction x t rule: splay.induct)
(auto simp: neq_Leaf_iff split: tree.split)
lemma sorted_splay:
"sorted1(inorder t) \<Longrightarrow> splay x t = Node l a r \<Longrightarrow>
sorted(map fst (inorder l) @ x # map fst (inorder r))"
unfolding inorder_splay[of x t, symmetric]
by(induction x t arbitrary: l a r rule: splay.induct)
(auto simp: sorted_lems sorted_Cons_le sorted_snoc_le splay_Leaf_iff split: tree.splits)
lemma inorder_update_splay:
"sorted1(inorder t) \<Longrightarrow> inorder(update x y t) = upd_list x y (inorder t)"
using inorder_splay[of x t, symmetric] sorted_splay[of t x]
by(auto simp: upd_list_simps upd_list_Cons upd_list_snoc neq_Leaf_iff split: tree.split)
subsubsection "Proofs for delete"
lemma inorder_splay_maxD:
"splay_max t = Node l a r \<Longrightarrow> sorted1(inorder t) \<Longrightarrow>
inorder l @ [a] = inorder t \<and> r = Leaf"
by(induction t arbitrary: l a r rule: splay_max.induct)
(auto simp: sorted_lems split: tree.splits if_splits)
lemma inorder_delete_splay:
"sorted1(inorder t) \<Longrightarrow> inorder(delete x t) = del_list x (inorder t)"
using inorder_splay[of x t, symmetric] sorted_splay[of t x]
by (auto simp: del_list_simps del_list_sorted_app delete_def del_list_notin_Cons inorder_splay_maxD
split: tree.splits)
subsubsection "Overall Correctness"
interpretation Map_by_Ordered
where empty = empty and lookup = lookup and update = update
and delete = delete and inorder = inorder and inv = "\<lambda>_. True"
proof (standard, goal_cases)
case 2 thus ?case by(simp add: lookup_eq)
next
case 3 thus ?case by(simp add: inorder_update_splay del: update.simps)
next
case 4 thus ?case by(simp add: inorder_delete_splay)
qed (auto simp: empty_def)
end
|
State Before: E : Type u
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℝ E
f : E → ℝ
a : E
f' : E →L[ℝ] ℝ
s : Set E
h : IsLocalMaxOn f s a
y : E
hy : y ∈ posTangentConeAt s a
hy' : -y ∈ posTangentConeAt s a
hf : ¬DifferentiableWithinAt ℝ f s a
⊢ ↑(fderivWithin ℝ f s a) y = 0 State After: E : Type u
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℝ E
f : E → ℝ
a : E
f' : E →L[ℝ] ℝ
s : Set E
h : IsLocalMaxOn f s a
y : E
hy : y ∈ posTangentConeAt s a
hy' : -y ∈ posTangentConeAt s a
hf : ¬DifferentiableWithinAt ℝ f s a
⊢ ↑0 y = 0 Tactic: rw [fderivWithin_zero_of_not_differentiableWithinAt hf] State Before: E : Type u
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℝ E
f : E → ℝ
a : E
f' : E →L[ℝ] ℝ
s : Set E
h : IsLocalMaxOn f s a
y : E
hy : y ∈ posTangentConeAt s a
hy' : -y ∈ posTangentConeAt s a
hf : ¬DifferentiableWithinAt ℝ f s a
⊢ ↑0 y = 0 State After: no goals Tactic: rfl |
{-
- Automatically generate UARel and DUARel instances
-}
{-# OPTIONS --no-exact-split --safe #-}
module Cubical.Displayed.Auto where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Function
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Displayed.Base
open import Cubical.Displayed.Subst
open import Cubical.Displayed.Morphism
open import Cubical.Displayed.Constant
open import Cubical.Displayed.Function
open import Cubical.Displayed.Generic
open import Cubical.Displayed.Sigma
open import Cubical.Displayed.Unit
open import Cubical.Displayed.Universe
open import Cubical.Data.List.Base
open import Cubical.Data.Nat.Base
open import Cubical.Data.Sigma.Base
open import Cubical.Data.Unit.Base
import Agda.Builtin.Reflection as R
open import Cubical.Reflection.Base
-- Descriptor language
mutual
data UARelDesc : ∀ {ℓA ℓ≅A} {A : Type ℓA} (𝒮-A : UARel A ℓ≅A) → Typeω where
generic : ∀ {ℓA} {A : Type ℓA} → UARelDesc (𝒮-generic A)
univ : ∀ ℓU → UARelDesc (𝒮-Univ ℓU)
-- Having a special descriptor for non-dependent × helps to avoid
-- combinatorial explosion. Automation will try to apply this first.
prod : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A} (dA : UARelDesc 𝒮-A)
{B : Type ℓB} {𝒮-B : UARel B ℓ≅B} (dB : UARelDesc 𝒮-B)
→ UARelDesc (𝒮-A ×𝒮 𝒮-B)
sigma : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A} (dA : UARelDesc 𝒮-A)
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B} (dB : DUARelDesc 𝒮-A 𝒮ᴰ-B)
→ UARelDesc (∫ 𝒮ᴰ-B)
param : ∀ {ℓA ℓB ℓ≅B} (A : Type ℓA)
{B : Type ℓB} {𝒮-B : UARel B ℓ≅B} (dB : UARelDesc 𝒮-B)
→ UARelDesc (A →𝒮 𝒮-B)
pi : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A} (dA : UARelDesc 𝒮-A)
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B} (dB : DUARelDesc 𝒮-A 𝒮ᴰ-B)
→ UARelDesc (𝒮-Π 𝒮-A 𝒮ᴰ-B)
unit : UARelDesc 𝒮-Unit
-- Projections from one UARel to another
data UARelReindex : ∀ {ℓA ℓ≅A ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{C : Type ℓC} {𝒮-C : UARel C ℓ≅C}
(f : UARelHom 𝒮-A 𝒮-C)
→ Typeω
where
id : ∀ {ℓA ℓ≅A} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
→ UARelReindex (𝒮-id 𝒮-A)
∘fst : ∀ {ℓA ℓ≅A ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B}
{C : Type ℓC} {𝒮-C : UARel C ℓ≅C}
{f : UARelHom 𝒮-A 𝒮-C}
→ UARelReindex f
→ UARelReindex (𝒮-∘ f (𝒮-fst {𝒮ᴰ-B = 𝒮ᴰ-B}))
∘snd : ∀ {ℓA ℓ≅A ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : Type ℓB} {𝒮-B : UARel B ℓ≅B}
{C : Type ℓC} {𝒮-C : UARel C ℓ≅C}
{f : UARelHom 𝒮-B 𝒮-C}
→ UARelReindex f
→ UARelReindex (𝒮-∘ f (𝒮-snd {𝒮-A = 𝒮-A}))
∘app : ∀ {ℓA ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA}
{B : Type ℓB} {𝒮-B : UARel B ℓ≅B}
{C : Type ℓC} {𝒮-C : UARel C ℓ≅C}
{f : UARelHom 𝒮-B 𝒮-C}
→ UARelReindex f
→ (a : A) → UARelReindex (𝒮-∘ f (𝒮-app a))
data SubstRelDesc : ∀ {ℓA ℓ≅A ℓB}
{A : Type ℓA} (𝒮-A : UARel A ℓ≅A)
{B : A → Type ℓB} (𝒮ˢ-B : SubstRel 𝒮-A B) → Typeω
where
generic : ∀ {ℓA ℓ≅A ℓB} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A} {B : A → Type ℓB}
→ SubstRelDesc 𝒮-A (𝒮ˢ-generic 𝒮-A B)
constant : ∀ {ℓA ℓ≅A ℓB}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A} {B : Type ℓB}
→ SubstRelDesc 𝒮-A (𝒮ˢ-const 𝒮-A B)
-- We have an element DUARel over any 𝒮-A with a proejction to a universe
-- that can be described with UARelReindex
el : ∀ {ℓA ℓ≅A ℓU} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{f : UARelHom 𝒮-A (𝒮-Univ ℓU)}
→ UARelReindex f
→ SubstRelDesc 𝒮-A (𝒮ˢ-reindex f (𝒮ˢ-El ℓU))
prod : ∀ {ℓA ℓ≅A ℓB ℓC}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ˢ-B : SubstRel 𝒮-A B} (dB : SubstRelDesc 𝒮-A 𝒮ˢ-B)
{C : A → Type ℓC} {𝒮ˢ-C : SubstRel 𝒮-A C} (dC : SubstRelDesc 𝒮-A 𝒮ˢ-C)
→ SubstRelDesc 𝒮-A (𝒮ˢ-B ×𝒮ˢ 𝒮ˢ-C)
sigma : ∀ {ℓA ℓ≅A ℓB ℓC}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ˢ-B : SubstRel 𝒮-A B} (dB : SubstRelDesc 𝒮-A 𝒮ˢ-B)
{C : Σ A B → Type ℓC} {𝒮ˢ-C : SubstRel (∫ˢ 𝒮ˢ-B) C} (dC : SubstRelDesc (∫ˢ 𝒮ˢ-B) 𝒮ˢ-C)
→ SubstRelDesc 𝒮-A (𝒮ˢ-Σ 𝒮ˢ-B 𝒮ˢ-C)
pi : ∀ {ℓA ℓ≅A ℓB ℓC}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ˢ-B : SubstRel 𝒮-A B} (dB : SubstRelDesc 𝒮-A 𝒮ˢ-B)
{C : Σ A B → Type ℓC} {𝒮ˢ-C : SubstRel (∫ˢ 𝒮ˢ-B) C} (dC : SubstRelDesc (∫ˢ 𝒮ˢ-B) 𝒮ˢ-C)
→ SubstRelDesc 𝒮-A (𝒮ˢ-Π 𝒮ˢ-B 𝒮ˢ-C)
data DUARelDesc : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} (𝒮-A : UARel A ℓ≅A)
{B : A → Type ℓB} (𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B) → Typeω
where
generic : ∀ {ℓA ℓ≅A ℓB} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A} {B : A → Type ℓB}
→ DUARelDesc 𝒮-A (𝒮ᴰ-generic 𝒮-A B)
constant : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : Type ℓB} {𝒮-B : UARel B ℓ≅B}
→ UARelDesc 𝒮-B
→ DUARelDesc 𝒮-A (𝒮ᴰ-const 𝒮-A 𝒮-B)
el : ∀ {ℓA ℓ≅A ℓU} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{f : UARelHom 𝒮-A (𝒮-Univ ℓU)}
→ UARelReindex f
→ DUARelDesc 𝒮-A (𝒮ᴰ-reindex f (𝒮ᴰ-El ℓU))
prod : ∀ {ℓA ℓ≅A ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B} (dB : DUARelDesc 𝒮-A 𝒮ᴰ-B)
{C : A → Type ℓC} {𝒮ᴰ-C : DUARel 𝒮-A C ℓ≅C} (dC : DUARelDesc 𝒮-A 𝒮ᴰ-C)
→ DUARelDesc 𝒮-A (𝒮ᴰ-B ×𝒮ᴰ 𝒮ᴰ-C)
sigma : ∀ {ℓA ℓ≅A ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B} (dB : DUARelDesc 𝒮-A 𝒮ᴰ-B)
{C : Σ A B → Type ℓC} {𝒮ᴰ-C : DUARel (∫ 𝒮ᴰ-B) C ℓ≅C} (dC : DUARelDesc (∫ 𝒮ᴰ-B) 𝒮ᴰ-C)
→ DUARelDesc 𝒮-A (𝒮ᴰ-Σ 𝒮ᴰ-B 𝒮ᴰ-C)
pi : ∀ {ℓA ℓ≅A ℓB ℓ≅B ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B} (dB : DUARelDesc 𝒮-A 𝒮ᴰ-B)
{C : Σ A B → Type ℓC} {𝒮ᴰ-C : DUARel (∫ 𝒮ᴰ-B) C ℓ≅C} (dC : DUARelDesc (∫ 𝒮ᴰ-B) 𝒮ᴰ-C)
→ DUARelDesc 𝒮-A (𝒮ᴰ-Π 𝒮ᴰ-B 𝒮ᴰ-C)
piˢ : ∀ {ℓA ℓ≅A ℓB ℓC ℓ≅C}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ˢ-B : SubstRel 𝒮-A B} (dB : SubstRelDesc 𝒮-A 𝒮ˢ-B)
{C : Σ A B → Type ℓC} {𝒮ᴰ-C : DUARel (∫ˢ 𝒮ˢ-B) C ℓ≅C} (dC : DUARelDesc (∫ˢ 𝒮ˢ-B) 𝒮ᴰ-C)
→ DUARelDesc 𝒮-A (𝒮ᴰ-Πˢ 𝒮ˢ-B 𝒮ᴰ-C)
private
getUARel : ∀ {ℓA ℓ≅A} {A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
→ UARelDesc 𝒮-A → UARel A ℓ≅A
getUARel {𝒮-A = 𝒮-A} _ = 𝒮-A
getDUARel : ∀ {ℓA ℓ≅A ℓB ℓ≅B}
{A : Type ℓA} {𝒮-A : UARel A ℓ≅A}
{B : A → Type ℓB} {𝒮ᴰ-B : DUARel 𝒮-A B ℓ≅B}
→ DUARelDesc 𝒮-A 𝒮ᴰ-B → DUARel 𝒮-A B ℓ≅B
getDUARel {𝒮ᴰ-B = 𝒮ᴰ-B} _ = 𝒮ᴰ-B
-- Magic number
private
FUEL = 10000
mutual
autoUARelDesc : ℕ → R.Term → R.TC Unit
autoUARelDesc zero hole = R.typeError [ R.strErr "Out of fuel" ]
autoUARelDesc (suc n) hole =
tryUniv <|> tryProd <|> trySigma <|> tryParam <|> tryPi <|> tryUnit <|> useGeneric
where
tryUniv : R.TC Unit
tryUniv = R.unify (R.con (quote UARelDesc.univ) [ varg R.unknown ]) hole
tryBinary : R.Name → R.TC Unit
tryBinary name =
newMeta R.unknown >>= λ hole₁ →
newMeta R.unknown >>= λ hole₂ →
R.unify (R.con name (hole₁ v∷ hole₂ v∷ [])) hole >>
autoUARelDesc n hole₁ >>
autoDUARelDesc n hole₂
tryParam : R.TC Unit
tryParam =
newMeta R.unknown >>= λ paramTy →
newMeta R.unknown >>= λ hole₁ →
R.unify (R.con (quote UARelDesc.param) (paramTy v∷ hole₁ v∷ [])) hole >>
autoUARelDesc n hole₁
tryProd = tryBinary (quote UARelDesc.prod)
trySigma = tryBinary (quote UARelDesc.sigma)
tryPi = tryBinary (quote UARelDesc.pi)
tryUnit : R.TC Unit
tryUnit = R.unify (R.con (quote UARelDesc.unit) []) hole
useGeneric : R.TC Unit
useGeneric = R.unify (R.con (quote UARelDesc.generic) []) hole
autoUARelReindex : ℕ → R.Term → R.TC Unit
autoUARelReindex zero hole = R.typeError [ R.strErr "Out of fuel" ]
autoUARelReindex (suc n) hole =
tryId <|> tryFst <|> trySnd <|> tryApp
where
tryId : R.TC Unit
tryId = R.unify (R.con (quote UARelReindex.id) []) hole
tryUnary : R.Name → R.TC Unit
tryUnary name =
newMeta R.unknown >>= λ hole₁ →
R.unify (R.con name [ varg hole₁ ]) hole >>
autoUARelReindex n hole₁
tryFst = tryUnary (quote UARelReindex.∘fst)
trySnd = tryUnary (quote UARelReindex.∘snd)
tryApp : R.TC Unit
tryApp =
newMeta R.unknown >>= λ hole₁ →
newMeta R.unknown >>= λ param →
R.unify (R.con (quote UARelReindex.∘app) (hole₁ v∷ param v∷ [])) hole >>
autoUARelReindex n hole₁
autoSubstRelDesc : ℕ → R.Term → R.TC Unit
autoSubstRelDesc zero hole = R.typeError [ R.strErr "Out of fuel" ]
autoSubstRelDesc (suc n) hole =
tryConstant <|> tryEl <|> tryProd <|> trySigma <|> tryPi <|> useGeneric
where
tryConstant : R.TC Unit
tryConstant =
R.unify (R.con (quote SubstRelDesc.constant) []) hole
tryEl : R.TC Unit
tryEl =
newMeta R.unknown >>= λ hole₁ →
R.unify (R.con (quote SubstRelDesc.el) [ varg hole₁ ]) hole >>
autoUARelReindex n hole₁
tryBinary : R.Name → R.TC Unit
tryBinary name =
newMeta R.unknown >>= λ hole₁ →
newMeta R.unknown >>= λ hole₂ →
R.unify (R.con name (hole₁ v∷ hole₂ v∷ [])) hole >>
autoSubstRelDesc n hole₁ >>
autoSubstRelDesc n hole₂
tryProd = tryBinary (quote SubstRelDesc.prod)
trySigma = tryBinary (quote SubstRelDesc.sigma)
tryPi = tryBinary (quote SubstRelDesc.pi)
useGeneric : R.TC Unit
useGeneric = R.unify (R.con (quote SubstRelDesc.generic) []) hole
autoDUARelDesc : ℕ → R.Term → R.TC Unit
autoDUARelDesc zero hole = R.typeError [ R.strErr "Out of fuel" ]
autoDUARelDesc (suc n) hole =
tryConstant <|> tryEl <|> tryProd <|> trySigma <|> tryPiˢ <|> tryPi <|> useGeneric
where
tryConstant : R.TC Unit
tryConstant =
newMeta R.unknown >>= λ hole₁ →
R.unify (R.con (quote DUARelDesc.constant) [ varg hole₁ ]) hole >>
autoUARelDesc n hole₁
tryEl : R.TC Unit
tryEl =
newMeta R.unknown >>= λ hole₁ →
R.unify (R.con (quote DUARelDesc.el) [ varg hole₁ ]) hole >>
autoUARelReindex n hole₁
tryBinary : R.Name → R.TC Unit
tryBinary name =
newMeta R.unknown >>= λ hole₁ →
newMeta R.unknown >>= λ hole₂ →
R.unify (R.con name (hole₁ v∷ hole₂ v∷ [])) hole >>
autoDUARelDesc n hole₁ >>
autoDUARelDesc n hole₂
tryProd = tryBinary (quote DUARelDesc.prod)
trySigma = tryBinary (quote DUARelDesc.sigma)
tryPi = tryBinary (quote DUARelDesc.pi)
tryPiˢ : R.TC Unit
tryPiˢ =
newMeta R.unknown >>= λ hole₁ →
newMeta R.unknown >>= λ hole₂ →
R.unify (R.con (quote DUARelDesc.piˢ) (hole₁ v∷ hole₂ v∷ [])) hole >>
autoSubstRelDesc n hole₁ >>
autoDUARelDesc n hole₂
useGeneric : R.TC Unit
useGeneric = R.unify (R.con (quote DUARelDesc.generic) []) hole
module DisplayedAutoMacro where
autoUARel : ∀ {ℓA} (A : Type ℓA) → ℕ → R.Term → R.TC Unit
autoUARel A n hole =
R.quoteTC A >>= λ `A` →
newMeta R.unknown >>= λ desc →
makeAuxiliaryDef "autoUA"
(R.def (quote UARel) (`A` v∷ R.unknown v∷ []))
(R.def (quote getUARel) [ varg desc ])
>>= λ uaTerm →
R.unify hole uaTerm >>
autoUARelDesc n desc
autoDUARel : ∀ {ℓA ℓ≅A ℓB} {A : Type ℓA} (𝒮-A : UARel A ℓ≅A) (B : A → Type ℓB)
→ ℕ → R.Term → R.TC Unit
autoDUARel 𝒮-A B n hole =
R.quoteTC 𝒮-A >>= λ `𝒮-A` →
R.quoteTC B >>= λ `B` →
newMeta R.unknown >>= λ desc →
makeAuxiliaryDef "autoDUA"
(R.def (quote DUARel) (`𝒮-A` v∷ `B` v∷ R.unknown v∷ []))
(R.def (quote getDUARel) [ varg desc ])
>>= λ duaTerm →
R.unify hole duaTerm >>
autoDUARelDesc n desc
macro
autoUARel : ∀ {ℓA} (A : Type ℓA) → R.Term → R.TC Unit
autoUARel A = DisplayedAutoMacro.autoUARel A FUEL
autoDUARel : ∀ {ℓA ℓ≅A ℓB} {A : Type ℓA} (𝒮-A : UARel A ℓ≅A) (B : A → Type ℓB)
→ R.Term → R.TC Unit
autoDUARel 𝒮-A B = DisplayedAutoMacro.autoDUARel 𝒮-A B FUEL
private
module Example (A : Type) (a₀ : A) where
example0 : DUARel (autoUARel Type) (λ X → X → A × X) ℓ-zero
example0 = autoDUARel _ _
example0' : {X Y : Type} (e : X ≃ Y)
(f : X → A × X) (g : Y → A × Y)
→ (∀ x → (f x .fst ≡ g (e .fst x) .fst) × (e .fst (f x .snd) ≡ g (e .fst x) .snd))
→ PathP (λ i → ua e i → A × ua e i) f g
example0' e f g = example0 .DUARel.uaᴰ f e g .fst
-- An example where a DUARel is parameterized over a pair of types
example1 : DUARel (autoUARel (Type × Type)) (λ (X , Z) → X → Z) ℓ-zero
example1 = autoDUARel _ _
example1' : {X Y : Type} (e : X ≃ Y) {Z W : Type} (h : Z ≃ W)
(f : X → Z) (g : Y → W)
→ (∀ x → h .fst (f x) ≡ g (e .fst x))
→ PathP (λ i → ua e i → ua h i) f g
example1' e h f g = example1 .DUARel.uaᴰ f (e , h) g .fst
-- An example where a DUARel is parameterized over a family of types
example2 : DUARel (autoUARel (A → Type)) (λ B → B a₀) ℓ-zero
example2 = autoDUARel _ _
example2' : {B C : A → Type} (e : (a : A) → B a ≃ C a)
(b : B a₀) (c : C a₀)
→ e a₀ .fst b ≡ c
→ PathP (λ i → ua (e a₀) i) b c
example2' e b c = example2 .DUARel.uaᴰ b e c .fst
|
The convex hull of a set $p$ is the set of all points that can be written as a convex combination of at most $d+1$ points of $p$, where $d$ is the affine dimension of $p$. |
SUBROUTINE decsf(x,y,zz,fi0,tet0,FI,TET,h)
! this program calculates fi and tet of a point in global coordinates from
! given x, y, z coordinates relatively the point fi0, tet0
REAL PI/3.1415926/, rz/6371./
PER=PI/180.
sqr=(rz-zz)*(rz-zz)-x*x-y*y
!write(*,*)' dz=',rz-sqrt(sqr)
if(sqr.lt.0.) then
write(*,*)' problem in DECSF:'
write(*,*)' x=',x,' y=',y,' zz=',zz
pause
end if
z=sqrt(sqr)
!write(*,*)' z=',z,'sqr=',sqr
X1=x
Y1=-Y*SIN(tet0*PER)+Z*COS(tet0*PER)
Z1=Y*COS(tet0*PER)+Z*SIN(tet0*PER)
!write(*,*)' x1=',x1,' y1=',y1,' z1=',z1
if(x1.eq.0.) then
fi=fi0
else
IF(X1.GT.0..AND.Y1.GT.0.)PA=0.
IF(X1.LT.0.)PA=-PI
IF(X1.GT.0..AND.Y1.LT.0.)PA=2.*PI
ff=atan(y1/x1)/per
FI=fi0-(ff+PA/PER)+90.
end if
!write(*,*)' fi=',fi
if(fi.gt.360.)fi=fi-360.
if(abs(fi-fi0).gt.abs(fi-fi0-360)) fi=fi-360
if(abs(fi-fi0).gt.abs(fi-fi0+360)) fi=fi+360
r=sqrt(x1*x1+y1*y1+z1*z1)
TET=ASIN(Z1/r)/PER
h=rz-r
RETURN
END
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj28synthconj6 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (Succ (plus (mult lv0 lv1) (plus lv1 lv0))) (plus lv2 (Succ lv0))).
Admitted.
QuickChick conj28synthconj6.
|
#include <iostream>
#include <stdio.h>
#include <cstring>
#include <cmath>
#include <fstream>
#include <string>
#include <regex>
#include <iterator>
#include <python3.7m/Python.h>
#include <boost/python.hpp>
#include <boost/python/numpy.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include <boost/python/module.hpp>
#include <boost/python/def.hpp>
#include <boost/python/implicit.hpp>
#include <boost/python/extract.hpp>
#include <arpa/inet.h>
#include <netinet/in.h>
namespace p = boost::python;
namespace np = boost::python::numpy;
inline void destroyManagerCObject(PyObject *obj)
{
double *b = reinterpret_cast<double *>(PyCapsule_GetPointer(obj, NULL));
if (b)
{
delete[] b;
}
}
struct AdvParser
{
bool checkBase = true;
bool checkNodes = true;
bool shapeCreated = true;
int xnodes, ynodes, znodes;
double xbase, ybase, zbase;
AdvParser()
{
xnodes = 0;
ynodes = 0;
znodes = 0;
xbase = 0.0;
ybase = 0.0;
zbase = 0.0;
};
void setXnodes(int val) { this->xnodes = val; }
void setYnodes(int val) { this->ynodes = val; }
void setZnodes(int val) { this->znodes = val; }
void setXbase(int val) { this->xbase = val; }
void setYbase(int val) { this->ybase = val; }
void setZbase(int val) { this->zbase = val; }
int getXnodes() { return xnodes; }
int getYnodes() { return ynodes; }
int getZnodes() { return znodes; }
double getXbase() { return xbase; }
double getYbase() { return ybase; }
double getZbase() { return zbase; }
np::ndarray getOmfToList(std::string path)
{
// is a text file
std::ifstream miffile;
miffile.open(path, std::ios::out);
int buffer_size = getMifHeader(miffile);
int num_vectors = xnodes * ynodes * znodes;
double *fut_ndarray = (double *)(malloc(sizeof(double) * num_vectors * 3));
int i = 0;
if (miffile.is_open())
{
std::string line;
while (std::getline(miffile, line))
{
std::istringstream iss(line);
if (line[0] == '#')
continue;
else
{
double x, y, z;
if (!(iss >> x >> y >> z))
break;
fut_ndarray[i + 0] = x;
fut_ndarray[i + 1] = y;
fut_ndarray[i + 2] = z;
i += 3;
}
}
}
miffile.close();
boost::python::numpy::dtype dt1 = boost::python::numpy::dtype::get_builtin<double>();
boost::python::tuple shape = boost::python::make_tuple(num_vectors, 3);
boost::python::tuple stride = boost::python::make_tuple(3 * sizeof(double), sizeof(double));
boost::python::numpy::ndarray vectorData = boost::python::numpy::from_data(fut_ndarray,
dt1,
shape,
stride,
boost::python::object());
// last entry is object owner
return vectorData;
}
int getMifHeader(std::ifstream &miffile)
{
std::string line;
int buffer_size = 0;
std::string node_reg("# xnodes:");
std::string base_reg("# xbase:");
// we have to change strategy here
std::string data_text("# Begin: Data Text");
if (miffile.is_open())
{
while (std::getline(miffile, line))
{
if (line.at(0) == '#')
{
if (line == "# Begin: Data Binary 8")
{
buffer_size = 8;
break;
}
else if (line == "# Begin: Data Binary 4")
{
buffer_size = 4;
break;
}
else if (data_text.compare(line.substr(0, data_text.length())) == 0)
{
buffer_size = 1;
break;
}
if (checkNodes && line.substr(0, node_reg.length()) == node_reg)
{
if (node_reg.at(2) == 'x')
{
xnodes = std::stoi(line.substr(node_reg.length(), line.length()));
node_reg.at(2) = 'y';
}
else if (node_reg.at(2) == 'y')
{
ynodes = std::stoi(line.substr(node_reg.length(), line.length()));
node_reg.at(2) = 'z';
}
else
{
znodes = std::stoi(line.substr(node_reg.length(), line.length()));
checkNodes = false;
}
}
if (checkBase && line.substr(0, base_reg.length()) == base_reg)
{
if (base_reg.at(2) == 'x')
{
xbase = std::stod(line.substr(base_reg.length(), line.length()));
base_reg.at(2) = 'y';
}
else if (base_reg.at(2) == 'y')
{
ybase = std::stod(line.substr(base_reg.length(), line.length()));
base_reg.at(2) = 'z';
}
else
{
zbase = std::stod(line.substr(base_reg.length(), line.length()));
checkBase = false;
}
}
}
else
break;
}
if (buffer_size <= 0)
{
throw std::runtime_error("Invalid buffer size");
}
char IEEE_BUF[buffer_size + 1];
miffile.read(IEEE_BUF, buffer_size);
if (buffer_size == 4)
{
float IEEE_val;
float IEEE_VALIDATION = 1234567.0;
std::memcpy(&IEEE_val, IEEE_BUF, sizeof(float));
if (IEEE_val != IEEE_VALIDATION)
{
printf("%f\n", IEEE_val);
throw std::runtime_error("IEEE value not consistent");
}
}
else if (buffer_size == 8)
{
double IEEE_val;
double IEEE_VALIDATION = 123456789012345.0;
std::memcpy(&IEEE_val, IEEE_BUF, sizeof(double));
if (IEEE_val != IEEE_VALIDATION)
{
printf("%f\n", IEEE_val);
throw std::runtime_error("IEEE value not consistent");
}
}
else if (buffer_size == 1)
{
// this is a text file
// actaully I don't know what I should do here
}
else
{
throw std::runtime_error("Unspecified or invalid buffer size for binary input data");
}
return buffer_size;
}
else
{
throw std::runtime_error("Invalid mif file");
}
return 0;
}
void matrix_vector_mul(double mat[3][3], double vec[3])
{
// only 3 x 1, inplace
double c[3] = {vec[0], vec[1], vec[2]};
vec[0] = mat[0][0] * c[0] + mat[0][1] * c[1] + mat[0][2] * c[2];
vec[1] = mat[1][0] * c[0] + mat[1][1] * c[1] + mat[1][2] * c[2];
vec[2] = mat[2][0] * c[0] + mat[2][1] * c[1] + mat[2][2] * c[2];
}
void matrix_vector_cpy(double mat[3][3], double c[3], double vec[3])
{
// only 3 x 1, not inplace
vec[0] = mat[0][0] * c[0] + mat[0][1] * c[1] + mat[0][2] * c[2];
vec[1] = mat[1][0] * c[0] + mat[1][1] * c[1] + mat[1][2] * c[2];
vec[2] = mat[2][0] * c[0] + mat[2][1] * c[1] + mat[2][2] * c[2];
}
void a_cross_b(double a[3], double b[3], double c[3])
{
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
}
void generateVBO(double *vbo,
int *offset,
int *normal_offset,
double position[3],
double vector[3], // from omf
double color[3], // from omf
double t_rotation[3][3],
double height,
double radius,
int resolution)
{
double height_operator[3] = {0, 0, height * 1.5};
double mag = sqrt(pow(vector[0], 2) +
pow(vector[1], 2) +
pow(vector[2], 2));
double phi = acos(vector[2] / mag); // z rot
double theta = atan2(vector[1], vector[0]); // y rot
double ct = cos(theta);
double st = sin(theta);
double cp = cos(phi);
double sp = sin(phi);
double rotation_matrix[3][3] = {
{ct, -st * cp, st * sp},
{st, cp * ct, -sp * ct},
{0, sp, cp}};
double origin_base[3], cpy[3];
double cyllinder_co_rot[3] = {radius, radius, 0};
double cone_co_rot[3] = {2 * radius, 2 * radius, 0};
std::memcpy(origin_base, position, sizeof(double) * 3);
double n[3], u[3], v[3];
int prev_off = 36;
for (int i = 0; i < (resolution - 1); i++)
{
// colors first
std::memcpy(vbo + *offset, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 9, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 18, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 27, color, sizeof(double) * 3);
// bottom triangle - cyllinder
matrix_vector_cpy(rotation_matrix, cyllinder_co_rot, cpy);
vbo[*offset + 3] = origin_base[0] + cpy[0];
vbo[*offset + 4] = origin_base[1] + cpy[1];
vbo[*offset + 5] = origin_base[2] + cpy[2];
// bottom triangle - cone
cone_co_rot[2] += height;
matrix_vector_cpy(rotation_matrix, cone_co_rot, cpy);
vbo[*offset + 6] = origin_base[0] + cpy[0];
vbo[*offset + 7] = origin_base[1] + cpy[1];
vbo[*offset + 8] = origin_base[2] + cpy[2];
// top triangle - cyllinder
cyllinder_co_rot[2] += height;
matrix_vector_cpy(rotation_matrix, cyllinder_co_rot, cpy);
vbo[*offset + 21] = origin_base[0] + cpy[0];
vbo[*offset + 22] = origin_base[1] + cpy[1];
vbo[*offset + 23] = origin_base[2] + cpy[2];
// top triangle - cone
matrix_vector_mul(rotation_matrix, height_operator);
vbo[*offset + 24] = origin_base[0] + height_operator[0];
vbo[*offset + 25] = origin_base[1] + height_operator[1];
vbo[*offset + 26] = origin_base[2] + height_operator[2];
height_operator[0] = 0;
height_operator[1] = 0;
height_operator[2] = 1.5 * height;
cyllinder_co_rot[2] -= height;
cone_co_rot[2] -= height;
matrix_vector_mul(t_rotation, cyllinder_co_rot);
matrix_vector_mul(t_rotation, cone_co_rot);
if (i > 0)
{
// firstly compute the later indices for vertex
// because we will use them to compute the normals
u[0] = vbo[*offset + 3] - vbo[*offset - prev_off + 3];
u[1] = vbo[*offset + 4] - vbo[*offset - prev_off + 4];
u[2] = vbo[*offset + 5] - vbo[*offset - prev_off + 5];
v[0] = vbo[*offset - prev_off + 21] - vbo[*offset - prev_off + 3];
v[1] = vbo[*offset - prev_off + 22] - vbo[*offset - prev_off + 4];
v[2] = vbo[*offset - prev_off + 23] - vbo[*offset - prev_off + 5];
// cross product
a_cross_b(u, v, n);
vbo[*normal_offset + 0] = n[0];
vbo[*normal_offset + 1] = n[1];
vbo[*normal_offset + 2] = n[2];
vbo[*normal_offset + 18] = n[0];
vbo[*normal_offset + 19] = n[1];
vbo[*normal_offset + 20] = n[2];
// normals to the cone triangle
u[0] = vbo[*offset + 6] - vbo[*offset - prev_off + 6];
u[1] = vbo[*offset + 7] - vbo[*offset - prev_off + 7];
u[2] = vbo[*offset + 8] - vbo[*offset - prev_off + 8];
v[0] = vbo[*offset - prev_off + 24] - vbo[*offset - prev_off + 6];
v[1] = vbo[*offset - prev_off + 25] - vbo[*offset - prev_off + 7];
v[2] = vbo[*offset - prev_off + 26] - vbo[*offset - prev_off + 8];
// cross product
a_cross_b(u, v, n);
vbo[*normal_offset + 3] = n[0];
vbo[*normal_offset + 4] = n[1];
vbo[*normal_offset + 5] = n[2];
vbo[*normal_offset + 21] = n[0];
vbo[*normal_offset + 22] = n[1];
vbo[*normal_offset + 23] = n[2];
*normal_offset += 18;
*normal_offset += 18;
}
*offset += prev_off;
}
// colors first
std::memcpy(vbo + *offset, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 9, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 18, color, sizeof(double) * 3);
std::memcpy(vbo + *offset + 27, color, sizeof(double) * 3);
// reset rotational vectors to their defaults
cyllinder_co_rot[0] = radius;
cyllinder_co_rot[1] = radius;
cyllinder_co_rot[2] = 0;
matrix_vector_mul(rotation_matrix, cyllinder_co_rot);
vbo[*offset + 3] = origin_base[0] + cyllinder_co_rot[0];
vbo[*offset + 4] = origin_base[1] + cyllinder_co_rot[1];
vbo[*offset + 5] = origin_base[2] + cyllinder_co_rot[2];
cone_co_rot[0] = 2 * radius;
cone_co_rot[1] = 2 * radius;
cone_co_rot[2] = height;
matrix_vector_mul(rotation_matrix, cone_co_rot);
vbo[*offset + 6] = origin_base[0] + cone_co_rot[0];
vbo[*offset + 7] = origin_base[1] + cone_co_rot[1];
vbo[*offset + 8] = origin_base[2] + cone_co_rot[2];
cyllinder_co_rot[0] = radius;
cyllinder_co_rot[1] = radius;
cyllinder_co_rot[2] += height;
matrix_vector_mul(rotation_matrix, cyllinder_co_rot);
vbo[*offset + 21] = origin_base[0] + cyllinder_co_rot[0];
vbo[*offset + 22] = origin_base[1] + cyllinder_co_rot[1];
vbo[*offset + 23] = origin_base[2] + cyllinder_co_rot[2];
height_operator[0] = 0;
height_operator[1] = 0;
height_operator[2] = 1.5 * height;
matrix_vector_mul(rotation_matrix, height_operator);
vbo[*offset + 24] = origin_base[0] + height_operator[0];
vbo[*offset + 25] = origin_base[1] + height_operator[1];
vbo[*offset + 26] = origin_base[2] + height_operator[2];
// normals to the cyllinder triangle
u[0] = vbo[*offset + 3] - vbo[*offset - prev_off + 3];
u[1] = vbo[*offset + 4] - vbo[*offset - prev_off + 4];
u[2] = vbo[*offset + 5] - vbo[*offset - prev_off + 5];
v[0] = vbo[*offset - prev_off + 21] - vbo[*offset - prev_off + 3];
v[1] = vbo[*offset - prev_off + 22] - vbo[*offset - prev_off + 4];
v[2] = vbo[*offset - prev_off + 23] - vbo[*offset - prev_off + 5];
// cross product
a_cross_b(u, v, n);
vbo[*normal_offset + 0] = n[0];
vbo[*normal_offset + 1] = n[1];
vbo[*normal_offset + 2] = n[2];
vbo[*normal_offset + 18] = n[0];
vbo[*normal_offset + 19] = n[1];
vbo[*normal_offset + 20] = n[2];
// normals to the cone triangle
u[0] = vbo[*offset + 6] - vbo[*offset - prev_off + 6];
u[1] = vbo[*offset + 7] - vbo[*offset - prev_off + 7];
u[2] = vbo[*offset + 8] - vbo[*offset - prev_off + 8];
v[0] = vbo[*offset - prev_off + 24] - vbo[*offset - prev_off + 6];
v[1] = vbo[*offset - prev_off + 25] - vbo[*offset - prev_off + 7];
v[2] = vbo[*offset - prev_off + 26] - vbo[*offset - prev_off + 8];
// cross product
a_cross_b(u, v, n);
vbo[*normal_offset + 3] = n[0];
vbo[*normal_offset + 4] = n[1];
vbo[*normal_offset + 5] = n[2];
vbo[*normal_offset + 21] = n[0];
vbo[*normal_offset + 22] = n[1];
vbo[*normal_offset + 23] = n[2];
*normal_offset += 18;
*normal_offset += 18;
// LAST NORMALS MUST CATCH UP and are the same as previous
vbo[*normal_offset + 0] = n[0];
vbo[*normal_offset + 1] = n[1];
vbo[*normal_offset + 2] = n[2];
vbo[*normal_offset + 3] = n[0];
vbo[*normal_offset + 4] = n[1];
vbo[*normal_offset + 5] = n[2];
*normal_offset += 18;
vbo[*normal_offset + 0] = n[0];
vbo[*normal_offset + 1] = n[1];
vbo[*normal_offset + 2] = n[2];
vbo[*normal_offset + 3] = n[0];
vbo[*normal_offset + 4] = n[1];
vbo[*normal_offset + 5] = n[2];
*normal_offset += 18;
*offset += prev_off;
}
void generateCubes2(double *sh, double position[3], double dimensions[3], int current_pos)
{
double arr[144] = {
//TOP FACE
position[0] + dimensions[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
position[0] + dimensions[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
//BOTTOM FACE
position[0] + dimensions[0], position[1], position[2],
0, 0, 0,
position[0], position[1], position[2],
0, 0, 0,
position[0], position[1] + dimensions[1], position[2],
0, 0, 0,
position[0] + dimensions[0], position[1] + dimensions[1], position[2],
0, 0, 0,
//FRONT FACE
position[0] + dimensions[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1] + dimensions[1], position[2],
0, 0, 0,
position[0] + dimensions[0], position[1] + dimensions[1], position[2],
0, 0, 0,
//BACK FACE
position[0] + dimensions[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1], position[2],
0, 0, 0,
position[0] + dimensions[0], position[1], position[2],
0, 0, 0,
//RIGHT FACE
position[0] + dimensions[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0] + dimensions[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
position[0] + dimensions[0], position[1] + dimensions[1], position[2],
0, 0, 0,
position[0] + dimensions[0], position[1], position[2],
0, 0, 0,
//LEFT FACE
position[0], position[1] + dimensions[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1], position[2] + dimensions[2],
0, 0, 0,
position[0], position[1], position[2],
0, 0, 0,
position[0], position[1] + dimensions[1], position[2],
0, 0, 0};
// normals here
double ux, uy, uz;
double vx, vy, vz;
int offset = 0;
for (int i = 0; i < 6; i++)
{
ux = arr[offset + 0] - arr[offset + 6]; // first vertex
uy = arr[offset + 1] - arr[offset + 7];
uz = arr[offset + 2] - arr[offset + 8];
vx = arr[offset + 0] - arr[offset + 12]; // second vertex
vy = arr[offset + 1] - arr[offset + 13];
vz = arr[offset + 2] - arr[offset + 14];
// cross product
for (int j = 0; j < 4; j++)
{
arr[offset + 3 + j * 6 + 0] = uy * vz - vy * uz;
arr[offset + 3 + j * 6 + 1] = ux * uz - vx - vz;
arr[offset + 3 + j * 6 + 2] = ux * vy - uy * vx;
}
offset += 24;
}
std::memcpy(sh + current_pos, arr, sizeof(double) * 144);
}
np::ndarray generateIndices(int N, int index_required)
{
int start_index = 0;
int *indices = (int *)malloc(sizeof(int) * (3 * (index_required - 2) * N));
for (int n = 0; n < N; n++)
{
start_index = n * index_required + 3;
for (int i = 0; i < (index_required - 2); i++)
{
indices[n * (index_required - 2) * 3 + i * 3 + 0] = start_index + i - 3;
indices[n * (index_required - 2) * 3 + i * 3 + 1] = start_index + i - 2;
indices[n * (index_required - 2) * 3 + i * 3 + 2] = start_index + i - 1;
}
}
np::dtype dt = np::dtype::get_builtin<int>();
p::tuple shape = p::make_tuple(3 * (index_required - 2) * N);
p::tuple stride = p::make_tuple(sizeof(int));
p::handle<> h(::PyCapsule_New((void *)indices,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
return np::from_data(indices,
dt,
shape,
stride,
p::object(h));
}
np::ndarray getCubeOutline(int xn, int yn, int zn,
double xb, double yb, double zb,
int sampling,
int start_layer, int stop_layer)
{
/*
Cube MIF OUTLINE
2 VBOs because the geometry is constant
1) Geometry + Normal VBO
2) Color VBO
*/
int per_vertex = 144;
// per_vertex = 72;
double *sh = (double *)(malloc(sizeof(double) * xn * yn * zn * per_vertex));
double dimensions[3] = {xb * sampling, yb * sampling, zb};
int current_pos = 0;
for (int z = start_layer; z < stop_layer; z += 1)
{
for (int y = 0; y < ynodes; y += sampling)
{
for (int x = 0; x < xnodes; x += sampling)
{
double position[3] = {xb * (x % xn) - xn * xb / 2,
yb * (y % yn) - yn * yb / 2,
zb * (z % zn) - zn * zb / 2};
generateCubes2(sh, position, dimensions, current_pos);
current_pos += per_vertex;
}
}
}
np::dtype dt = np::dtype::get_builtin<double>();
p::tuple shape = p::make_tuple(xn * yn * zn * per_vertex);
p::tuple stride = p::make_tuple(sizeof(double));
p::handle<> h(::PyCapsule_New((void *)sh,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
return np::from_data(sh,
dt,
shape,
stride,
p::object(h));
}
void getHeader(std::string filepath)
{
std::ifstream miffile;
miffile.open(filepath, std::ios::out | std::ios_base::binary);
getMifHeader(miffile);
miffile.close();
}
np::ndarray getShapeAsNdarray(double xb, double yb, double zb)
{
double *sh = (double *)(malloc(sizeof(double) * xnodes * ynodes * znodes * 3));
int current_pos = 0;
for (int z = 0; z < znodes; z++)
{
for (int y = 0; y < ynodes; y++)
{
for (int x = 0; x < xnodes; x++)
{
sh[current_pos + 0] = xb * (x % xnodes) - xnodes * xb / 2;
sh[current_pos + 1] = yb * (y % ynodes) - ynodes * yb / 2;
sh[current_pos + 2] = zb * (z % znodes) - znodes * zb / 2;
current_pos += 3;
}
}
}
np::dtype dt = np::dtype::get_builtin<double>();
p::tuple shape = p::make_tuple(current_pos);
p::tuple stride = p::make_tuple(sizeof(double));
p::handle<> h(::PyCapsule_New((void *)sh,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
return np::from_data(sh,
dt,
shape,
stride,
p::object(h));
}
np::ndarray getMifAsNdarrayWithColor(std::string path,
p::list color_vector_l,
p::list positive_color_l,
p::list negative_color_l,
int sampling,
int start_layer,
int stop_layer,
int binary)
{
double color_vector[3] = {
p ::extract<double>(color_vector_l[0]),
p ::extract<double>(color_vector_l[1]),
p ::extract<double>(color_vector_l[2])};
double positive_color[3] = {
p ::extract<double>(positive_color_l[0]),
p ::extract<double>(positive_color_l[1]),
p ::extract<double>(positive_color_l[2])};
double negative_color[3] = {
p ::extract<double>(negative_color_l[0]),
p ::extract<double>(negative_color_l[1]),
p ::extract<double>(negative_color_l[2])};
std::ifstream miffile;
std::ios::openmode openmod;
if (binary)
{
openmod = std::ios::out | std::ios_base::binary;
}
else
{
openmod = std::ios::out;
}
miffile.open(path, openmod);
int buffer_size = getMifHeader(miffile);
if (buffer_size == 0)
{
miffile.close();
throw std::runtime_error("Invalid mif/ovf file");
}
int lines = znodes * xnodes * ynodes;
double *vals;
vals = (double *)malloc(sizeof(double) * lines * 3);
if (buffer_size == 4)
{
char buffer[buffer_size * lines * 3];
miffile.read(buffer, buffer_size * lines * 3);
float fvals[lines * 3];
std::memcpy(fvals, buffer, lines * 3 * sizeof(float));
for (int i = 0; i < lines * 3; i++)
{
vals[i] = fvals[i];
}
}
else if (buffer_size == 8)
{
char buffer[buffer_size * lines * 3];
miffile.read(buffer, buffer_size * lines * 3);
vals = (double *)(buffer);
}
else if (buffer_size == 1)
{
int i = 0;
if (miffile.is_open())
{
std::string line;
while (std::getline(miffile, line))
{
std::istringstream iss(line);
if (line[0] == '#')
continue;
else
{
double x, y, z;
if (!(iss >> x >> y >> z))
break;
vals[i + 0] = x;
vals[i + 1] = y;
vals[i + 2] = z;
i += 3;
}
}
}
}
miffile.close();
int inflate = 24; // 24 vetrtivesd in a cube
double *fut_ndarray = (double *)(malloc(sizeof(double) * znodes * xnodes * ynodes * 3 * inflate));
double mag, dot;
double *array_to_cpy = (double *)(malloc(sizeof(double) * 3));
int offset = 0;
int index = 0;
for (int z = start_layer; z < stop_layer; z += 1)
{
for (int y = 0; y < ynodes; y += sampling)
{
for (int x = 0; x < xnodes; x += sampling)
{
index = 3 * (x + xnodes * y + xnodes * ynodes * z);
mag = sqrt(pow(vals[index + 0], 2) +
pow(vals[index + 1], 2) +
pow(vals[index + 2], 2));
if (mag != 0.0)
{
dot = (vals[index + 0] / mag) * color_vector[0] +
(vals[index + 1] / mag) * color_vector[1] +
(vals[index + 2] / mag) * color_vector[2];
if (dot > 0)
{
array_to_cpy[0] = positive_color[0] * dot + (1.0 - dot);
array_to_cpy[1] = positive_color[1] * dot + (1.0 - dot);
array_to_cpy[2] = positive_color[2] * dot + (1.0 - dot);
}
else
{
dot *= -1;
array_to_cpy[0] = negative_color[0] * dot + (1.0 - dot);
array_to_cpy[1] = negative_color[1] * dot + (1.0 - dot);
array_to_cpy[2] = negative_color[2] * dot + (1.0 - dot);
}
}
else
{
array_to_cpy[0] = 0.0;
array_to_cpy[1] = 0.0;
array_to_cpy[2] = 0.0;
}
for (int inf = 0; inf < inflate; inf++)
{
std::memcpy(fut_ndarray + offset, array_to_cpy, sizeof(double) * 3);
offset += 3;
}
}
}
}
np::dtype dt = np::dtype::get_builtin<double>();
p::tuple shape = p::make_tuple(offset);
p::tuple stride = p::make_tuple(sizeof(double));
p::handle<> h(::PyCapsule_New((void *)fut_ndarray,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
return np::from_data(fut_ndarray,
dt,
shape,
stride,
p::object(h));
}
np::ndarray getMifVBO(std::string path,
int resolution,
p::list color_vector_l,
p::list positive_color_l,
p::list negative_color_l,
int sampling,
double height,
double radius,
int start_layer,
int stop_layer,
int xscaler,
int yscaler,
int zscaler,
int binary)
{
if (start_layer < 0 || start_layer > stop_layer)
{
throw std::invalid_argument("Start layer cannot be smaller than stop layer");
}
if (stop_layer > znodes)
{
throw std::invalid_argument("Stop layer too large");
}
double color_vector[3] = {
p ::extract<double>(color_vector_l[0]),
p ::extract<double>(color_vector_l[1]),
p ::extract<double>(color_vector_l[2])};
double positive_color[3] = {
p ::extract<double>(positive_color_l[0]),
p ::extract<double>(positive_color_l[1]),
p ::extract<double>(positive_color_l[2])};
double negative_color[3] = {
p ::extract<double>(negative_color_l[0]),
p ::extract<double>(negative_color_l[1]),
p ::extract<double>(negative_color_l[2])};
std::ifstream miffile;
std::ios::openmode openmod;
if (binary)
{
openmod = std::ios::out | std::ios_base::binary;
}
else
{
openmod = std::ios::out;
}
miffile.open(path, openmod);
int buffer_size = getMifHeader(miffile);
if (buffer_size == 0)
{
miffile.close();
throw std::runtime_error("Invalid mif/ovf file");
}
int lines = znodes * xnodes * ynodes;
double *vals;
vals = (double *)malloc(sizeof(double) * lines * 3);
if (buffer_size == 4)
{
char buffer[buffer_size * lines * 3];
miffile.read(buffer, buffer_size * lines * 3);
float fvals[lines * 3];
std::memcpy(fvals, buffer, lines * 3 * sizeof(float));
for (int i = 0; i < lines * 3; i++)
{
vals[i] = fvals[i];
}
}
else if (buffer_size == 8)
{
char buffer[buffer_size * lines * 3];
miffile.read(buffer, buffer_size * lines * 3);
// research why this casting with memcpy causes SIGSEV
// std::memcpy(vals, buffer, lines * 3 * sizeof(double));
vals = (double *)(buffer);
}
else if (buffer_size == 1)
{
int i = 0;
if (miffile.is_open())
{
std::string line;
while (std::getline(miffile, line))
{
std::istringstream iss(line);
if (line[0] == '#')
continue;
else
{
double x, y, z;
if (!(iss >> x >> y >> z))
break;
vals[i + 0] = x;
vals[i + 1] = y;
vals[i + 2] = z;
i += 3;
}
}
}
}
miffile.close();
int size = xnodes * ynodes * znodes * resolution * 10 * 3;
double *fut_ndarray = (double *)(malloc(sizeof(double) * size));
// double fut_ndarray[size];
if (fut_ndarray == NULL)
{
throw std::runtime_error("Failed to allocate memory for a large array");
}
double pos[3], vec[3], col[3];
double mag, dot;
int offset = 0;
int index = 0;
int normal_offset = 12;
double theta = 2 * M_PI / resolution;
double c = cos(theta);
double s = sin(theta);
double t_rotation[3][3] = {
{c, -s, 0},
{s, c, 0},
{0, 0, 1}};
double xb = xscaler * 1e9 * xbase / sampling;
double yb = yscaler * 1e9 * ybase / sampling;
double zb = zscaler * 1e9 * zbase / sampling;
double xoffset = xnodes * xb / 2;
double yoffset = ynodes * yb / 2;
double zoffset = znodes * zb / 2;
for (int z = start_layer; z < stop_layer; z += 1)
{
for (int y = 0; y < ynodes; y += sampling)
{
for (int x = 0; x < xnodes; x += sampling)
{
index = 3 * (x + xnodes * y + xnodes * ynodes * z);
mag = sqrt(pow(vals[index + 0], 2) +
pow(vals[index + 1], 2) +
pow(vals[index + 2], 2));
if (mag == 0.0)
continue;
pos[0] = xb * (x % xnodes) - xoffset;
pos[1] = yb * (y % ynodes) - yoffset;
pos[2] = zb * (z % znodes) - zoffset;
vec[0] = vals[index + 0] / mag;
vec[1] = vals[index + 1] / mag;
vec[2] = vals[index + 2] / mag;
dot = vec[0] * color_vector[0] +
vec[1] * color_vector[1] +
vec[2] * color_vector[2];
if (dot > 0)
{
col[0] = positive_color[0] * dot + (1.0 - dot);
col[1] = positive_color[1] * dot + (1.0 - dot);
col[2] = positive_color[2] * dot + (1.0 - dot);
}
else
{
dot *= -1;
col[0] = negative_color[0] * dot + (1.0 - dot);
col[1] = negative_color[1] * dot + (1.0 - dot);
col[2] = negative_color[2] * dot + (1.0 - dot);
}
generateVBO(
fut_ndarray,
&offset,
&normal_offset,
pos,
vec,
col,
t_rotation,
height,
radius,
resolution);
}
}
}
np::dtype dt = np::dtype::get_builtin<double>();
p::tuple shape = p::make_tuple(offset);
p::tuple stride = p::make_tuple(sizeof(double));
p::handle<> h(::PyCapsule_New((void *)fut_ndarray,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
return np::from_data(fut_ndarray,
dt,
shape,
stride,
p::object(h));
}
np::ndarray getMifAsNdarray(std::string path)
{
std::ifstream miffile;
miffile.open(path, std::ios::out | std::ios_base::binary);
int buffer_size = getMifHeader(miffile);
if (buffer_size == 0)
{
miffile.close();
throw std::runtime_error("Invalid mif file");
}
int lines = znodes * xnodes * ynodes;
char buffer[buffer_size * lines * 3];
miffile.read(buffer, buffer_size * lines * 3);
double *vals = (double *)buffer;
double *fut_ndarray = (double *)(malloc(sizeof(double) * lines * 3));
double mag;
for (int i = 0; i < lines * 3; i += 3)
{
mag = sqrt(pow(vals[i + 0], 2) + pow(vals[i + 1], 2) + pow(vals[i + 2], 2));
if (mag == 0.0)
mag = 1.0;
fut_ndarray[i + 0] = vals[i + 0] / mag;
fut_ndarray[i + 1] = vals[i + 1] / mag;
fut_ndarray[i + 2] = vals[i + 2] / mag;
}
miffile.close();
// use explicit namespace here to make sure it does not mix the functions
np::dtype dt1 = np::dtype::get_builtin<double>();
p::tuple shape = p::make_tuple(lines, 3);
p::tuple stride = p::make_tuple(3 * sizeof(double), sizeof(double));
p::handle<> h(::PyCapsule_New((void *)fut_ndarray,
NULL,
(PyCapsule_Destructor)&destroyManagerCObject));
np::ndarray vectorData = np::from_data(fut_ndarray,
dt1,
shape,
stride,
p::object(h));
// last entry is object owner
return vectorData;
}
};
BOOST_PYTHON_MODULE(AdvParser)
{
// avoids the SIGSEV on dtype in numpy initialization
boost::python::numpy::initialize();
using namespace boost::python;
class_<AdvParser>("AdvParser")
.def(init<>())
.def(init<AdvParser>())
.def("getMifAsNdarrayWithColor", &AdvParser::getMifAsNdarrayWithColor)
.def("getMifVBO", &AdvParser::getMifVBO)
.def("getCubeOutline", &AdvParser::getCubeOutline)
.def("getOmfToList", &AdvParser::getOmfToList)
.def("getMifAsNdarray", &AdvParser::getMifAsNdarray)
.def("getHeader", &AdvParser::getHeader)
.def("getShapeAsNdarray", &AdvParser::getShapeAsNdarray)
.def("generateIndices", &AdvParser::generateIndices)
.add_property("xnodes", &AdvParser::getXnodes, &AdvParser::setXnodes)
.add_property("ynodes", &AdvParser::getYnodes, &AdvParser::setYnodes)
.add_property("znodes", &AdvParser::getZnodes, &AdvParser::setZnodes)
.add_property("xbase", &AdvParser::getXbase, &AdvParser::setXbase)
.add_property("ybase", &AdvParser::getYbase, &AdvParser::setYbase)
.add_property("zbase", &AdvParser::getZbase, &AdvParser::setZbase);
}
|
#ifndef TAG_TYPES_4NNM8B5T
#define TAG_TYPES_4NNM8B5T
// Copyright 2010 Dean Michael Berris.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpl/vector.hpp>
#include <boost/network/protocol/http/tags.hpp>
namespace http = boost::network::http;
typedef boost::mpl::vector<http::tags::http_default_8bit_tcp_resolve,
http::tags::http_default_8bit_udp_resolve,
http::tags::http_keepalive_8bit_tcp_resolve,
http::tags::http_keepalive_8bit_udp_resolve,
http::tags::http_async_8bit_udp_resolve,
http::tags::http_async_8bit_tcp_resolve> tag_types;
#endif /* TAG_TYPES_4NNM8B5T */
|
/-
Copyright (c) 2022 Antoine Labelle. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Antoine Labelle
! This file was ported from Lean 3 source module category_theory.monoidal.subcategory
! leanprover-community/mathlib commit 70fd9563a21e7b963887c9360bd29b2393e6225a
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Monoidal.Braided
import Mathbin.CategoryTheory.Monoidal.Linear
import Mathbin.CategoryTheory.Preadditive.AdditiveFunctor
import Mathbin.CategoryTheory.Linear.LinearFunctor
import Mathbin.CategoryTheory.Closed.Monoidal
/-!
# Full monoidal subcategories
Given a monidal category `C` and a monoidal predicate on `C`, that is a function `P : C → Prop`
closed under `𝟙_` and `⊗`, we can put a monoidal structure on `{X : C // P X}` (the category
structure is defined in `category_theory.full_subcategory`).
When `C` is also braided/symmetric, the full monoidal subcategory also inherits the
braided/symmetric structure.
## TODO
* Add monoidal/braided versions of `category_theory.full_subcategory.lift`
-/
universe u v
namespace CategoryTheory
namespace MonoidalCategory
open Iso
variable {C : Type u} [Category.{v} C] [MonoidalCategory C] (P : C → Prop)
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/-- A property `C → Prop` is a monoidal predicate if it is closed under `𝟙_` and `⊗`.
-/
class MonoidalPredicate : Prop where
prop_id' : P (𝟙_ C) := by obviously
prop_tensor' : ∀ {X Y}, P X → P Y → P (X ⊗ Y) := by obviously
#align category_theory.monoidal_category.monoidal_predicate CategoryTheory.MonoidalCategory.MonoidalPredicate
restate_axiom monoidal_predicate.prop_id'
restate_axiom monoidal_predicate.prop_tensor'
open MonoidalPredicate
variable [MonoidalPredicate P]
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/--
When `P` is a monoidal predicate, the full subcategory for `P` inherits the monoidal structure of
`C`.
-/
instance fullMonoidalSubcategory : MonoidalCategory (FullSubcategory P)
where
tensorObj X Y := ⟨X.1 ⊗ Y.1, prop_tensor X.2 Y.2⟩
tensorHom X₁ Y₁ X₂ Y₂ f g := by
change X₁.1 ⊗ X₂.1 ⟶ Y₁.1 ⊗ Y₂.1
change X₁.1 ⟶ Y₁.1 at f
change X₂.1 ⟶ Y₂.1 at g
exact f ⊗ g
tensorUnit := ⟨𝟙_ C, prop_id⟩
associator X Y Z :=
⟨(α_ X.1 Y.1 Z.1).Hom, (α_ X.1 Y.1 Z.1).inv, hom_inv_id (α_ X.1 Y.1 Z.1),
inv_hom_id (α_ X.1 Y.1 Z.1)⟩
leftUnitor X := ⟨(λ_ X.1).Hom, (λ_ X.1).inv, hom_inv_id (λ_ X.1), inv_hom_id (λ_ X.1)⟩
rightUnitor X := ⟨(ρ_ X.1).Hom, (ρ_ X.1).inv, hom_inv_id (ρ_ X.1), inv_hom_id (ρ_ X.1)⟩
tensor_id' X Y := tensor_id X.1 Y.1
tensor_comp' X₁ Y₁ Z₁ X₂ Y₂ Z₂ f₁ f₂ g₁ g₂ := tensor_comp f₁ f₂ g₁ g₂
associator_naturality' X₁ X₂ X₃ Y₁ Y₂ Y₃ f₁ f₂ f₃ := associator_naturality f₁ f₂ f₃
leftUnitor_naturality' X Y f := leftUnitor_naturality f
rightUnitor_naturality' X Y f := rightUnitor_naturality f
pentagon' W X Y Z := pentagon W.1 X.1 Y.1 Z.1
triangle' X Y := triangle X.1 Y.1
#align category_theory.monoidal_category.full_monoidal_subcategory CategoryTheory.MonoidalCategory.fullMonoidalSubcategory
/-- The forgetful monoidal functor from a full monoidal subcategory into the original category
("forgetting" the condition).
-/
@[simps]
def fullMonoidalSubcategoryInclusion : MonoidalFunctor (FullSubcategory P) C
where
toFunctor := fullSubcategoryInclusion P
ε := 𝟙 _
μ X Y := 𝟙 _
#align category_theory.monoidal_category.full_monoidal_subcategory_inclusion CategoryTheory.MonoidalCategory.fullMonoidalSubcategoryInclusion
instance fullMonoidalSubcategory.full : Full (fullMonoidalSubcategoryInclusion P).toFunctor :=
FullSubcategory.full P
#align category_theory.monoidal_category.full_monoidal_subcategory.full CategoryTheory.MonoidalCategory.fullMonoidalSubcategory.full
instance fullMonoidalSubcategory.faithful :
Faithful (fullMonoidalSubcategoryInclusion P).toFunctor :=
FullSubcategory.faithful P
#align category_theory.monoidal_category.full_monoidal_subcategory.faithful CategoryTheory.MonoidalCategory.fullMonoidalSubcategory.faithful
section
variable [Preadditive C]
instance fullMonoidalSubcategoryInclusion_additive :
(fullMonoidalSubcategoryInclusion P).toFunctor.Additive :=
Functor.fullSubcategoryInclusion_additive _
#align category_theory.monoidal_category.full_monoidal_subcategory_inclusion_additive CategoryTheory.MonoidalCategory.fullMonoidalSubcategoryInclusion_additive
instance [MonoidalPreadditive C] : MonoidalPreadditive (FullSubcategory P) :=
monoidalPreadditive_of_faithful (fullMonoidalSubcategoryInclusion P)
variable (R : Type _) [Ring R] [Linear R C]
instance fullMonoidalSubcategoryInclusion_linear :
(fullMonoidalSubcategoryInclusion P).toFunctor.Linear R :=
Functor.fullSubcategoryInclusionLinear R _
#align category_theory.monoidal_category.full_monoidal_subcategory_inclusion_linear CategoryTheory.MonoidalCategory.fullMonoidalSubcategoryInclusion_linear
instance [MonoidalPreadditive C] [MonoidalLinear R C] : MonoidalLinear R (FullSubcategory P) :=
monoidalLinearOfFaithful R (fullMonoidalSubcategoryInclusion P)
end
variable {P} {P' : C → Prop} [MonoidalPredicate P']
/-- An implication of predicates `P → P'` induces a monoidal functor between full monoidal
subcategories. -/
@[simps]
def fullMonoidalSubcategory.map (h : ∀ ⦃X⦄, P X → P' X) :
MonoidalFunctor (FullSubcategory P) (FullSubcategory P')
where
toFunctor := FullSubcategory.map h
ε := 𝟙 _
μ X Y := 𝟙 _
#align category_theory.monoidal_category.full_monoidal_subcategory.map CategoryTheory.MonoidalCategory.fullMonoidalSubcategory.map
instance fullMonoidalSubcategory.mapFull (h : ∀ ⦃X⦄, P X → P' X) :
Full (fullMonoidalSubcategory.map h).toFunctor where preimage X Y f := f
#align category_theory.monoidal_category.full_monoidal_subcategory.map_full CategoryTheory.MonoidalCategory.fullMonoidalSubcategory.mapFull
instance fullMonoidalSubcategory.map_faithful (h : ∀ ⦃X⦄, P X → P' X) :
Faithful (fullMonoidalSubcategory.map h).toFunctor where
#align category_theory.monoidal_category.full_monoidal_subcategory.map_faithful CategoryTheory.MonoidalCategory.fullMonoidalSubcategory.map_faithful
section Braided
variable (P) [BraidedCategory C]
/-- The braided structure on a full subcategory inherited by the braided structure on `C`.
-/
instance fullBraidedSubcategory : BraidedCategory (FullSubcategory P) :=
braidedCategoryOfFaithful (fullMonoidalSubcategoryInclusion P)
(fun X Y =>
⟨(β_ X.1 Y.1).Hom, (β_ X.1 Y.1).inv, (β_ X.1 Y.1).hom_inv_id, (β_ X.1 Y.1).inv_hom_id⟩)
fun X Y => by tidy
#align category_theory.monoidal_category.full_braided_subcategory CategoryTheory.MonoidalCategory.fullBraidedSubcategory
/-- The forgetful braided functor from a full braided subcategory into the original category
("forgetting" the condition).
-/
@[simps]
def fullBraidedSubcategoryInclusion : BraidedFunctor (FullSubcategory P) C
where
toMonoidalFunctor := fullMonoidalSubcategoryInclusion P
braided' X Y := by
rw [is_iso.eq_inv_comp]
tidy
#align category_theory.monoidal_category.full_braided_subcategory_inclusion CategoryTheory.MonoidalCategory.fullBraidedSubcategoryInclusion
instance fullBraidedSubcategory.full : Full (fullBraidedSubcategoryInclusion P).toFunctor :=
fullMonoidalSubcategory.full P
#align category_theory.monoidal_category.full_braided_subcategory.full CategoryTheory.MonoidalCategory.fullBraidedSubcategory.full
instance fullBraidedSubcategory.faithful : Faithful (fullBraidedSubcategoryInclusion P).toFunctor :=
fullMonoidalSubcategory.faithful P
#align category_theory.monoidal_category.full_braided_subcategory.faithful CategoryTheory.MonoidalCategory.fullBraidedSubcategory.faithful
variable {P}
/-- An implication of predicates `P → P'` induces a braided functor between full braided
subcategories. -/
@[simps]
def fullBraidedSubcategory.map (h : ∀ ⦃X⦄, P X → P' X) :
BraidedFunctor (FullSubcategory P) (FullSubcategory P')
where
toMonoidalFunctor := fullMonoidalSubcategory.map h
braided' X Y := by
rw [is_iso.eq_inv_comp]
tidy
#align category_theory.monoidal_category.full_braided_subcategory.map CategoryTheory.MonoidalCategory.fullBraidedSubcategory.map
instance fullBraidedSubcategory.mapFull (h : ∀ ⦃X⦄, P X → P' X) :
Full (fullBraidedSubcategory.map h).toFunctor :=
fullMonoidalSubcategory.mapFull h
#align category_theory.monoidal_category.full_braided_subcategory.map_full CategoryTheory.MonoidalCategory.fullBraidedSubcategory.mapFull
instance fullBraidedSubcategory.map_faithful (h : ∀ ⦃X⦄, P X → P' X) :
Faithful (fullBraidedSubcategory.map h).toFunctor :=
fullMonoidalSubcategory.map_faithful h
#align category_theory.monoidal_category.full_braided_subcategory.map_faithful CategoryTheory.MonoidalCategory.fullBraidedSubcategory.map_faithful
end Braided
section Symmetric
variable (P) [SymmetricCategory C]
instance fullSymmetricSubcategory : SymmetricCategory (FullSubcategory P) :=
symmetricCategoryOfFaithful (fullBraidedSubcategoryInclusion P)
#align category_theory.monoidal_category.full_symmetric_subcategory CategoryTheory.MonoidalCategory.fullSymmetricSubcategory
end Symmetric
section Closed
variable (P) [MonoidalClosed C]
/-- A property `C → Prop` is a closed predicate if it is closed under taking internal homs
-/
class ClosedPredicate : Prop where
prop_ihom' : ∀ {X Y}, P X → P Y → P ((ihom X).obj Y) := by obviously
#align category_theory.monoidal_category.closed_predicate CategoryTheory.MonoidalCategory.ClosedPredicate
restate_axiom closed_predicate.prop_ihom'
open ClosedPredicate
variable [ClosedPredicate P]
instance fullMonoidalClosedSubcategory : MonoidalClosed (FullSubcategory P)
where closed' X :=
{
isAdj :=
{ right :=
FullSubcategory.lift P (fullSubcategoryInclusion P ⋙ ihom X.1) fun Y =>
prop_ihom X.2 Y.2
adj :=
Adjunction.mkOfUnitCounit
{ Unit :=
{ app := fun Y => (ihom.coev X.1).app Y.1
naturality' := fun Y Z f => ihom.coev_naturality X.1 f }
counit :=
{ app := fun Y => (ihom.ev X.1).app Y.1
naturality' := fun Y Z f => ihom.ev_naturality X.1 f }
left_triangle := by
ext Y
simp
exact ihom.ev_coev X.1 Y.1
right_triangle := by
ext Y
simp
exact ihom.coev_ev X.1 Y.1 } } }
#align category_theory.monoidal_category.full_monoidal_closed_subcategory CategoryTheory.MonoidalCategory.fullMonoidalClosedSubcategory
@[simp]
theorem fullMonoidalClosedSubcategory_ihom_obj (X Y : FullSubcategory P) :
((ihom X).obj Y).obj = (ihom X.obj).obj Y.obj :=
rfl
#align category_theory.monoidal_category.full_monoidal_closed_subcategory_ihom_obj CategoryTheory.MonoidalCategory.fullMonoidalClosedSubcategory_ihom_obj
@[simp]
theorem fullMonoidalClosedSubcategory_ihom_map (X : FullSubcategory P) {Y Z : FullSubcategory P}
(f : Y ⟶ Z) : (ihom X).map f = (ihom X.obj).map f :=
rfl
#align category_theory.monoidal_category.full_monoidal_closed_subcategory_ihom_map CategoryTheory.MonoidalCategory.fullMonoidalClosedSubcategory_ihom_map
end Closed
end MonoidalCategory
end CategoryTheory
|
# Covid-19: From model prediction to model predictive control
## A demo of the deterministic modeling framework
*Original code by Ryan S. McGee. Modified by T.W. Alleman in consultation with the BIOMATH research unit headed by prof. Ingmar Nopens.*
Copyright (c) 2020 by T.W. Alleman, BIOMATH, Ghent University. All Rights Reserved.
Our code implements a SEIRS infectious disease dynamics model with extensions to model the effect of quarantining detected cases. Notably, this package includes stochastic implementations of these models on dynamic networks. We modified the original implementation by Ryan McGee at its source to account for additional Covid-19 disease characteristics. The code was then integrated with our previous work and allows to quickly perform Monte Carlo simulations, calibration of model parameters and the calculation of *optimal* government policies using a model predictive controller. A white paper and souce code of our previous work can be found on the Biomath website.
https://biomath.ugent.be/covid-19-outbreak-modelling-and-control
```python
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
from ipywidgets import interact,fixed,FloatSlider,IntSlider,ToggleButtons
import pandas as pd
import datetime
import scipy
import coronaHelper2 as cH
from scipy.integrate import odeint
import matplotlib.dates as mdates
import matplotlib
import scipy.stats as st
import networkx
import models
```
## Introduction
### Model dynamics
In this work, we extended the SEIR model to model the disease spread with a higher resolution. To this end, the *infected pool* is split into four types of infectiousness: 1) supermild (SM): people who show little to no symptoms at all, 2) mild (M): people with noticable, mild symptoms, 3) heavy (H): people hospitalised but not in need of intensive care and 4) critical (C): people hospitalised and in need of intensive care. It takes several days before a heavy or critical infection becomes so severe that these patients need to be hospitalised. To this end, the pool of heavily and critically infected patients is split in two: not yet hospitalised (H, C) and hospitalised (HH,CH). The *removed pool* from the classical SEIR model is explicitly split into an immune (I) and dead (F) pool. People from the susceptible, exposed, supermild infected, mild infected and immune pool can be quarantined after having tested positive for covid-19. In the flowchart below, quarantine is denoted by the *Q*-suffix. Note that for individuals in the susceptible and immune pools, this corresponds to receiving a false positive test.
### Deterministic vs. Stochastic framework
The extended SEIR model is implemented using two frameworks: a deterministic and a stochastic (network based) framework. A deterministic implementation of the extended SEIRS model captures important features of infectious disease dynamics, but it assumes uniform mixing of the population (i.e. every individual in the population is equally likely to interact with every other individual). The deterministic approach results in a set of N ordinary differential equations, one for every of the N ’population pools’ considered. The main advantage of the deterministic model is that a low amount of computational resources are required while still maintaining an acceptable accuracy. The deterministic framework allows to rapidly explore scenarios and perform optimisations which require thousands of function evaluations.
However, it is often important to consider the structure of contact networks when studying disease transmission and the effect of interventions such as social distancing and contact tracing. The main drawback of the deterministic approach is the inability to simulate contact tracing, which is one of the most promising measures against the spread of sars-cov-2. For this reason, the SEIRS dynamics depicted in on the above flowchart are simulated on a stochastic dynamical network. This advantages include a more detailed analysis of the relationship between social network structure and effective transmission rates, including the effect of network-based interventions such as social distancing, quarantining, and contact tracing. The first drawback is the increased amount of computational resources required, leading to the inability to perform optimisations on a personal computer. Instead, high performance computing infrastructure is needed. The second drawback is the need for more data and/or assumptions on social interactions and how government measures affect these social interactions.
#### Deterministic equations
The dynamics of the deterministic system are mathematically formulated as the rate of change of each population pool shown in the above flowchart. This results in the following system of ordinary differential equations (subject to change),
\begin{eqnarray}
\dot{S} &=& - \beta \cdot N_c \Big( \frac{E+SM}{N} \Big) \cdot S - \theta_{\text{S}} \psi_{\text{FP}} \cdot S + SQ/d_{\text{q,FP}} + \zeta \cdot R,\\
\dot{E} &=& \beta \cdot N_c \Big( \frac{E+SM}{N} \Big) \cdot S - B / \sigma - \theta_{\text{E}} \psi_{\text{PP}} \cdot E,\\
\dot{SM} &=& \text{sm}/\sigma \cdot E - SM/d_{\text{sm}} - \theta_{\text{SM}} \psi_{\text{PP}} \cdot SM,\\
\dot{M} &=& \text{m} / \sigma \cdot E - M/d_{\text{m}} - \theta_{\text{M}} \psi_{\text{PP}} \cdot M,\\
\dot{H} &=& \text{h} / \sigma \cdot E + h / \sigma \cdot EQ - H/d_{\text{hospital}},\\
\dot{C} &=& \text{c} / \sigma \cdot E + c / \sigma \cdot EQ - C/d_{\text{hospital}},\\
\dot{HH} &=& H/d_{\text{hospital}} - HH/d_{h}\\
\dot{CH} &=& C/d_{\text{hospital}} - m_c \cdot CH/d_{\text{cf}} - (1-m_c) \cdot CH/d_{\text{cr}}\\
\dot{F} &=& m_c \cdot CH/d_{\text{cf}}\\
\dot{R} &=& SM/d_{\text{sm}} + M/d_{\text{m}} + HH/d_{\text{h}} + (1-m_h) \cdot CH/d_{\text{cr}} + SMQ/d_{\text{sm}} \\ && + MQ/d_{\text{m}} + RQ/d_{\text{q,FP}} - \zeta \cdot R \\
\dot{SQ} &=& \theta_{\text{S}} \psi_{\text{FP}} \cdot S - SQ/d_{\text{q,FP}} \\
\dot{EQ} &=& \theta_{\text{E}} \psi_{\text{PP}} \cdot E - EQ/\sigma\\
\dot{SMQ} &=& \theta_{\text{SM}} \psi_{\text{PP}} \cdot SM + sm/\sigma \cdot EQ - SMQ/d_{\text{sm}} \\
\dot{MQ} &=& \theta_{\text{M}} \psi_{\text{PP}} \cdot M + m/\sigma \cdot EQ - MQ/d_{\text{m}}\\
\dot{RQ} &=& \theta_{\text{R}} \psi_{\text{FP}} \cdot R - RQ/d_{\text{q,FP}}
\end{eqnarray}
#### Stochastic equations
Consider a network graph G like the one depicted below, representing individuals (nodes) and their interactions (edges). Each individual (node) has a state (S, E, SM, M, H, C, HH, CH, R, F, SQ, SMQ, MQ or RQ). The set of nodes adjacent (connected by an edge) to an individual defines their set of "close contacts" (highlighted in black). At a given time, each individual makes contact with a random individual from their set of close contacts with probability (1-p) or with a random individual from anywhere in the network (highlighted in blue) with probability p. The latter global contacts represent individuals interacting with the population at large. These are individuals outside of ones inner social circle, such as on public transit, at an event or chatting to an old acquaintance etc. When a susceptible individual interacts with an exposed or infectious individual they become exposed themselves. The parameter p defines the locality of the network: for p=0 an individual only interacts with their close contacts, while p=1 represents a uniformly mixed population. **For a sufficiently large network G, and $p = 1$, the stochastic and deterministic framework will yield the same result**. Social distancing interventions ifluences both the locality and connectivity of the network. It is expected that social distancing lowers the average number of interactions an individual has each day (lower connectivity of the network). But as a result, the fraction of interactions in the inner circle increases, lowering p.
Each node $i$ has a state $X_i$ that updates according to a probability transition rate. The dynamics of the stochastic system are mathematically formulated as probabilities of transitioning between the pools shown in the model flowchart. In addition, back tracking of confirmed cases can be included in the model,
\begin{eqnarray}
\text{Pr}(X_i = S \rightarrow E) &=& \Big[ \underbrace{p \cdot \frac{\beta (E + SM)}{N}}_{\text{infection through random encounter}} \\
&& + \underbrace{(1-p) \cdot \frac{\beta \sum_{j \in C_G(i)} (\delta_{X_j = E}+\delta_{X_j = SM})}{C_G (i)}}_{\text{infection through inner circle}} \Big] \delta_{X_i=S},\\
\text{Pr}(X_i = E \rightarrow SM) &=& (sm/\sigma) \cdot \delta_{X_i = E},\\
\text{Pr}(X_i = E \rightarrow M) &=& (m/\sigma) \cdot \delta_{X_i = E},\\
\text{Pr}(X_i = E \rightarrow H) &=& (h/\sigma) \cdot \delta_{X_i = E},\\
\text{Pr}(X_i = E \rightarrow C) &=& (c/\sigma) \cdot \delta_{X_i = E},\\
\text{Pr}(X_i = H \rightarrow HH) &=& (1/d_{\text{hospital}}) \delta_{X_i = H},\\
\text{Pr}(X_i = C \rightarrow CH) &=& (1/d_{\text{hospital}}) \delta_{X_i = C},\\
\text{Pr}(X_i = SM \rightarrow R) &=& (1/d_{\text{sm}}) \cdot \delta_{X_i = SM},\\
\text{Pr}(X_i = M \rightarrow R) &=& (1/d_{\text{m}}) \cdot \delta_{X_i = M},\\
\text{Pr}(X_i = HH \rightarrow R) &=& (1/d_{\text{h}}) \cdot \delta_{X_i = HH},\\
\text{Pr}(X_i = CH \rightarrow R) &=& (1-m_c)/d_{\text{cr}} \cdot \delta_{X_i = HH},\\
\text{Pr}(X_i = CH \rightarrow F) &=& (m_c/d_{\text{cf}}) \cdot \delta_{X_i = HH},\\
\text{Pr}(X_i = S \rightarrow SQ) &=& \Big[\theta_S + \phi_S \big[ \sum_{j \in C_G(i)} (\delta_{X_j = SQ}+\delta_{X_j = EQ}+\delta_{X_j = SMQ} \\
&&+\delta_{X_j = MQ}+\delta_{X_j = RQ}+\delta_{X_j = HH}+\delta_{X_j = CH}) \big] \Big] \cdot \psi_{\text{FP}} \cdot \delta_{X_i = S}, \\
\text{Pr}(X_i = E \rightarrow EQ) &=& \Big[\theta_E + \phi_E \big[ \sum_{j \in C_G(i)} (\delta_{X_j = SQ}+\delta_{X_j = EQ}+\delta_{X_j = SMQ} \\
&&+\delta_{X_j = MQ}+\delta_{X_j = RQ}+\delta_{X_j = HH}+\delta_{X_j = CH}) \big] \Big] \cdot \psi_{\text{PP}} \cdot \delta_{X_i = E}, \\
\text{Pr}(X_i = SM \rightarrow SMQ) &=& \Big[\theta_{SM} + \phi_{SM} \big[ \sum_{j \in C_G(i)} (\delta_{X_j = SQ}+\delta_{X_j = EQ}+\delta_{X_j = SMQ} \\
&&+\delta_{X_j = MQ}+\delta_{X_j = RQ}+\delta_{X_j = HH}+\delta_{X_j = CH}) \big] \Big] \cdot \psi_{\text{PP}} \cdot \delta_{X_i = SM}, \\
\text{Pr}(X_i = M \rightarrow MQ) &=& \Big[\theta_{M} + \phi_{M} \big[ \sum_{j \in C_G(i)} (\delta_{X_j = SQ}+\delta_{X_j = EQ}+\delta_{X_j = SMQ} \\
&&+\delta_{X_j = MQ}+\delta_{X_j = RQ}+\delta_{X_j = HH}+\delta_{X_j = CH}) \big] \Big] \cdot \psi_{\text{PP}} \cdot \delta_{X_i = M} \\
\text{Pr}(X_i = R \rightarrow RQ) &=& \Big[\theta_{R} + \phi_{R} \big[ \sum_{j \in C_G(i)} (\delta_{X_j = SQ}+\delta_{X_j = EQ}+\delta_{X_j = SMQ} \\
&&+\delta_{X_j = MQ}+\delta_{X_j = RQ}+\delta_{X_j = HH}+\delta_{X_j = CH}) \big] \Big] \cdot \psi_{\text{FP}} \cdot \delta_{X_i = R}\\
\text{Pr}(X_i = SQ \rightarrow S) &=& (1/d_{q,FP}) \cdot \delta_{X_i = SQ},\\
\text{Pr}(X_i = EQ \rightarrow SMQ) &=& sm \cdot \sigma \cdot \delta_{X_i = EQ},\\
\text{Pr}(X_i = EQ \rightarrow MQ) &=& m \cdot \sigma \cdot \delta_{X_i = EQ},\\
\text{Pr}(X_i = EQ \rightarrow H) &=& h \cdot \sigma \cdot \delta_{X_i = EQ},\\
\text{Pr}(X_i = EQ \rightarrow C) &=& h \cdot \sigma \cdot \delta_{X_i = EQ},\\
\text{Pr}(X_i = SMQ \rightarrow R) &=& h (1/d_{sm}) \cdot \delta_{X_i = SMQ},\\
\text{Pr}(X_i = MQ \rightarrow R) &=& h (1/d_{m}) \cdot \delta_{X_i = MQ},\\
\text{Pr}(X_i = RQ \rightarrow R) &=& h (1/d_{\text{hospital}}) \cdot \delta_{X_i = RQ},\\
\text{Pr}(X_i = R \rightarrow S) &=& \zeta \cdot \delta_{X_i = R},\\
\end{eqnarray}
where $\delta_{X_i = A} = 1$ if the state of $X_i$ is A, or 0 if not, and where $C_G(i)$ denotes the set of close contacts of node i (adjacent nodes). For large populations and $p = 1$, this stochastic model approaches the same dynamics as the deterministic SEIRS model.
### Model parameters
In the above equations, S stands for susceptible, E for exposed, SM for supermild, M for mild, H for heavy, C for critical, HH for heavy and hospitalised, CH for critical and hospitalised, F for dead, R for immune. The quarantined states are SQ, which stands for susceptible and quarantined, EQ for exposed and quarantined, SMQ for supermild infected and quarantined, MQ for mildly infected and quarantined and RQ for recovered and quarantined. N stands for the total population. The clinical parameters are: sm, m, z, h: the chance of having a supermild, mild, heavy or critical infection. Based on reported cases in China and travel data, Li et al. (2020b) estimated that 86 % of coronavirus infections in the country were "undocumented" in the weeks before officials instituted stringent quarantines. In this work it is assumed that 86 % of all infected cases are supermild (asymptotic) and hence,
$$sm = 0.86$$
Based on previously reported estimates of the distribution between mild, severe and critical cases (Wu and McGoogan, 2020), it is then calculated that the chance of contrapting a mild, severe and critical infection is,
$$m = (1 − 0.86) · 0.81 = 0.1133,$$
$$h = (1 − 0.86) · 0.14 = 0.0193,$$
$$c = (1 − 0.86) · 0.05 = 0.0066.$$
$d_{sm}$ , $d_m$ , $d_h$ : the number of symptomic days in case of a supermild, mild or heavy infection. $d_{hf}$ is the time from hospitalisation until death in case of a critical infection while $d_{hr}$ is the recovery time for critical cases. Zhou et al. (2020) performed a retrospective study on 191 Chinese hospital patients and determined that the time from illness onset to discharge or death was 22.0 days (18.0-25.0, IQR) and 18.5 days (15.0-22.0, IQR) for survivors and victims respectively. Using available preliminary data, the World Health Organisation estimated the median time from onset to clinical recovery for mild cases to be approximately 2 weeks and to be 3-6 weeks for patients with severe or critical disease (WHO, 2020). Based on this report, we assume a recovery time of three weeks for heavy infections. d hospital : the time before heavily or critically infected patients reach the hospital. On average this is 9.1 days (Li et al., 2020a). m c : the mortality in case of a critical infection, which is roughly 50% (Wu and McGoogan, 2020). It is assumed that if the number of critical cases surpasses the total number of ICU beds, the mortality is changed to the average between 0.49 and 1, with the fraction of patients in critical condition receiving care as a weight. This is formulated mathematically in the following way,
\begin{equation}
m_h = \underbrace{\Big( \frac{ICU_{\text{max}}}{H} \Big) \cdot 0.49}_{\text{critical patient receives care}} + \underbrace{\Big( \frac{H - ICU_{\text{max}}}{H} \Big) \cdot 1.00}_{\text{critical patient receives no care}}\ .
\end{equation}
$\sigma$: the incubation period in days. The incubation period is assumed to be Erlang distributed as reported by Li et al. (2020a). The average incubation time is 5.2 days. $\zeta$: can be used to model the effect of re-susceptibility and seasonality of a disease. Throughout this demo, we assume $\zeta = 0$ because no data on re-susceptibility is available at the moment. We thus assume permanent immunity after recovering from the infection.
The transmission rate of the disease in the deterministic model depends on the product of three contributions. The first contribution, $(E+SM)/N$, is the probability of encountering a contagious individual. The second contribution, $N_c$, is the average number of human-to-human interactions per day. As previously explained, **in the deterministic framework, this means all $N_c$ contacts within the population are random**. In this work, we explictly split $\beta$ and $N_c$ because this has the following advantages:1) $\beta$ is now a disease characteric, independent of social interactions. 2) The goal of this work is to demonstrate the concept of social control to contain the outbreak using model predictive control. By splitting $\beta$ and $N_c$, the controlled variable is the number of random contacts $N_c$ which is more comprehendible to the reader of this text. The number of human-human interactions per day is estimated using the Social Contact Rates (SOCRATES) Data Tool (Willem et al., 2020). The dataset on human-human interactions for Belgium is based on a 2008 study by Mossong, which kept track of 750 participants and their interactions (8878 interactions) in Belgium. The dataset includes both physical and non-physical interactions of any duration. The third contribution, $\beta$, is the probability of contracting sars-cov-2 when encoutering a contagious individual. The testing and quarantine parameters are: $\theta_{S}$, $\theta_{E}$, $\theta_{SM}$, $\theta_{M}$, expressed as the number of susceptibles, exposed, supermild and mild individuals tested each day. $\psi_{PP}$: probability of correctly identifiying and quarantining an exposed or infected person. $\psi_{FP}$: probability of falsly identifying and quarantining a susceptible or recovered individual. Both parameters can be calculated using the reliability of the test and Bayes theorem. The latest reported reliability was only 71 \% (need ref). $d_{\text{q,FP}}$: the duration of the quarantine in the case of a false positive, assumed to be 14 days.
### Gathering social interaction data:
#### Social Contact Rates (SOCRATES) Data Tool
https://lwillem.shinyapps.io/socrates_rshiny/
1. What is the average number of daily human-to-human contacts of the Belgian population? Include all ages, all genders and both physical and non-physical interactions of any duration. To include all ages, type: *0,60+* in the *Age Breaks* dialog box.
2. What is the average number of physical human-to-human contacts of the Belgian population? Include all ages, all genders and all durations of physical contact.
3. What is the average number of physical human-to-human contacts of at least 1 hour of the Belgian population?
4. Based on the above results, how would you estimate $N_c$ in the deterministic model?
5. Based on the above results, how would you estimate $p$ in the stochastic model? Recall that $p$ is the fraction of *random contacts* a person has on a daily basis, while $(1-p)$ is the fraction of *inner circle contacts* a person has on a daily basis.
#### Google COVID-19 Community Mobility Reports
https://www.google.com/covid19/mobility/
#### London School of Hygiene
https://www.thelancet.com/journals/lanpub/article/PIIS2468-2667(20)30073-6/fulltext
```python
# -----------------------
# Define model parameters
# -----------------------
# Clinical parameters
beta = 0.032 # already calibrated for the deterministic model, change to 0.29 for stochastic
Nc = np.array([[11.2]]) # average number of human-to-human contacts (USE SOCRATES TO DETERMINE VALUE)
zeta = 0 # re-susceptibility parameter (0 = permanent immunity)
dsm = 14 # length of disease for asymptotic (SM) infections
dm = 14 # length of disease for mildly symptomic infections
dhospital = 9.1 # average time from symptom onset to hospitalisation for H and C infections
dh = 21 # average recovery time for heavy infection
mc0 = 0.49 # mortality for critical cases when they receive care
ICU = 2600 # number of available ICU beds in Belgium
# Testing parameters
totalTests = 0
theta_S = 0 # no. of daily tests of susceptibles
theta_E = 0 # no. of daily tests of exposed
theta_SM = 0 # no. of daily tests of SM infected
theta_M = 0 # no. of daily tests of M infected
theta_R = 0 # no. of daily tests of recovered patients
phi_S = 0 # backtracking of susceptibles
phi_E = 0 # backtracking of exposed
phi_SM = 0 # backtracking of supermild cases
phi_R = 0 # backtracking of recovered cases
psi_FP = 0 # odds of a false positive test
psi_PP = 1 # odds of a correct test
dq = 14 # length of quarantine for false positives
# Interessant artikel over serologische testen:
#https://www.tijd.be/dossiers/coronavirus/kunnen-bloedtests-onze-maatschappij-snel-weer-in-gang-trappen/10218259.html
```
## Performing simulations
#### Additional arguments to run a stochastic simulation
Before a stochastic simulation can be performed, two more parameters must be defined. The first is the interaction network G, which determines the connectivity of the network, the second is the parameter p, which determines the locality of the network. After defining both parameters, all stochastic-specific parameters are grouped in a dictionary *stoArgs*, which must be passed as an optional keyworded argument to the *simModel* function.
```python
# Construct the network G
numNodes = 8000
baseGraph = networkx.barabasi_albert_graph(n=numNodes, m=7)
# Baseline normal interactions:
G_norm = models.custom_exponential_graph(baseGraph, scale=200)
models.plot_degree_distn(G_norm, max_degree=40)
# Construct the network G under social distancing
numNodes = 8000
baseGraph = networkx.barabasi_albert_graph(n=numNodes, m=2)
# Baseline normal interactions:
G_dist = models.custom_exponential_graph(baseGraph, scale=20000)
models.plot_degree_distn(G_dist, max_degree=40)
```
#### The use of checkpoints to change parameters on the fly
A cool feature of the original SEIRSplus package by Ryan McGee was the use of so-called *checkpoints* dictionary to change simulation parameters on the fly. In our modification, this feature is preserved. Below you can find an example of a *checkpoints* dictionary. The simulation will be started with the provided parameters. After 25 days, social interaction will be limited by changing beta to 0.10. After 85 days, social restrictions are lifted and beta once more assumes its *business-as-usual* value. When simulating a dynamic network, it is possible to change the connectivity of the network. In the example below, the connectivity is changed from an average of 12 interactions per day to only 4 interactions. If no checkpoints are needed, *checkpoints* should be set to *None*.
```python
checkpoints = {'t': [25,40,140],
'p': [0.1,1-4.1/11.2,1-4.1/11.2],
'G': [G_dist,G_norm,G_norm],
'totalTests': [0,1000000,1000000]
}
```
#### Monte-Carlo sampling
One of the layers added to the existing SEIRSplus package is the ability to perform Monte-Carlo sampling of selected parameters. In our simulation, we assume four parameters are distributed: $s_m$, $d_{hf}$, $d_{hr}$ and $\sigma$.
```python
monteCarlo = False # perform monte Carlo Sampling (recommend disabling for stochastic simulation)
n_samples = 400 # From 200 upwards no observable change
```
```python
# ------------------------
# Define initial condition
# ------------------------
initN = np.array([6e6])
initE= np.array([1])
initSM=np.zeros(1)
initM=np.zeros(1)
initH=np.zeros(1)
initC=np.zeros(1)
initHH=np.zeros(1)
initCH=np.zeros(1)
initR=np.zeros(1)
initF=np.zeros(1)
initSQ=np.zeros(1)
initEQ=np.zeros(1)
initSMQ=np.zeros(1)
initMQ=np.zeros(1)
initRQ=np.zeros(1)
# ----------------------------
# Define simulation parameters
# ----------------------------
simtime = 300 # length of simulation (days)
modelType = 'deterministic' # alternative: stochastic
method = 'none' # default
```
```python
# --------------
# Run simulation
# --------------
simout=cH.simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,
psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,
initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,
modelType,checkpoints,**stoArgs)
```
```python
# -----------
# Plot result
# -----------
t = simout['t']
I = simout['SM']+ simout['M'] + simout['H'] + simout['C'] + simout['HH'] + simout['CH']
plt.figure(1)
plt.plot(t,np.mean(simout['S'],axis=1),color="black")
plt.fill_between(t, np.percentile(simout['S'],90,axis=1), np.percentile(simout['S'],10,axis=1),color="black",alpha=0.2)
plt.plot(t,np.mean(simout['E'],axis=1),color="blue")
plt.fill_between(t, np.percentile(simout['E'],90,axis=1), np.percentile(simout['E'],10,axis=1),color="blue",alpha=0.2)
plt.plot(t,np.mean(I,axis=1),color="red")
plt.fill_between(t, np.percentile(I,90,axis=1), np.percentile(I,10,axis=1),color="red",alpha=0.2)
plt.plot(t,np.mean(simout['R'],axis=1),color="green")
plt.fill_between(t, np.percentile(simout['R'],90,axis=1), np.percentile(simout['R'],10,axis=1),color="green",alpha=0.2)
plt.legend(('susceptible','exposed','total infected','immune'))
plt.xlabel('days')
plt.ylabel('number of patients')
plt.figure(2)
plt.plot(t,np.mean(simout['HH'],axis=1),color="orange")
plt.fill_between(t, np.percentile(simout['HH'],90,axis=1), np.percentile(simout['HH'],10,axis=1),color="orange",alpha=0.2)
plt.plot(t,np.mean(simout['CH'],axis=1),color="red")
plt.fill_between(t, np.percentile(simout['CH'],90,axis=1), np.percentile(simout['CH'],10,axis=1),color="red",alpha=0.2)
plt.plot(t,np.mean(simout['F'],axis=1),color="black")
plt.fill_between(t, np.percentile(simout['F'],90,axis=1), np.percentile(simout['F'],10,axis=1),color="black",alpha=0.2)
plt.xlabel('days')
plt.ylabel('number of patients')
# vlines=checkpoints['t']
# vline_colors=['red','green','black','black']
# vline_labels=[]
# vline_styles=[]
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Draw the vertical line annotations:
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# if(len(vlines)>0 and len(vline_colors)==0):
# vline_colors = ['black']*len(vlines)
# if(len(vlines)>0 and len(vline_labels)==0):
# vline_labels = [None]*len(vlines)
# if(len(vlines)>0 and len(vline_styles)==0):
# vline_styles = [':']*len(vlines)
# for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
# if(vline_x is not None):
# plt.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
plt.legend(('heavy','critical','dead','start lockdown','end lockdown'))
plt.figure(3)
plt.plot(t,np.mean(simout['SQ'],axis=1),color="green")
plt.fill_between(t, np.percentile(simout['SQ'],90,axis=1), np.percentile(simout['SQ'],10,axis=1),color="green",alpha=0.2)
plt.plot(t,np.mean(simout['EQ'],axis=1),color="orange")
plt.fill_between(t, np.percentile(simout['EQ'],90,axis=1), np.percentile(simout['EQ'],10,axis=1),color="orange",alpha=0.2)
plt.plot(t,np.mean(simout['SMQ'],axis=1),color="red")
plt.fill_between(t, np.percentile(simout['SMQ'],90,axis=1), np.percentile(simout['SMQ'],10,axis=1),color="red",alpha=0.2)
plt.legend(('SQ','EQ','SMQ'))
plt.xlabel('days')
plt.ylabel('number of patients')
plt.figure(4)
plt.plot(t,np.mean(simout['M'],axis=1),color="red")
plt.fill_between(t, np.percentile(simout['M'],90,axis=1), np.percentile(simout['M'],10,axis=1),color="red",alpha=0.2)
plt.plot(t,np.mean(simout['MQ'],axis=1),color="green")
plt.fill_between(t, np.percentile(simout['MQ'],90,axis=1), np.percentile(simout['MQ'],10,axis=1),color="green",alpha=0.2)
plt.legend(('M','MQ','number of tests on S'))
plt.xlabel('days')
plt.ylabel('number of patients')
```
## Calibrating $\beta$ in a *business-as-usual* scenario ($N_c = 11.2$)
```python
modelType = 'deterministic'
```
```python
columns = ['hospital','ICU','dead']
hospital = np.array([[58,97,163,264,368,496,648,841,1096,1380,1643,1881,2137,2715,3068,3640,4077,4468,4884,4975,5206,5358,5492,5509,5600,5738,5692,5590,5610,5635,5409,5393,5536]])
ICUvect= np.array([[5,24,33,53,79,100,130,164,238,290,322,381,474,605,690,789,867,927,1021,1088,1144,1205,1245,1261,1257,1260,1276,1285,1278,1262,1232,1234,1223]])
dead = np.array([[3,4,4,10,10,14,21,37,67,75,88,122,178,220,289,353,431,513,705,828,1011,1143,1283,1447,1632,2035,2240,2523,3019,3346,3600,3903,4157]])
index=pd.date_range('2020-03-13', freq='D', periods=ICUvect.size)
data = np.concatenate((hospital,ICUvect,dead),axis=0)
data = np.transpose(data)
data_belgie=pd.DataFrame(data,index=index, columns=columns)
```
```python
betaZonderIngrijpen=[]
```
```python
# -------------------------------
# Parameters of fitting algorithm
# -------------------------------
monteCarlo = False
n_samples = 100
maxiter=30
popsize=5
polish=True
disp = True
bounds=[(0.01,0.05),(1,60)]
idx=-22
print(index[idx])
idx = idx+1
data=np.transpose(ICUvect[:,0:idx])
method = 'findTime'
modelType = 'deterministic'
checkpoints=None
fitTo = np.array([8]) #positions in output of runSimulation that must be added together, here: CH
# -----------
# Perform fit
# -----------
estimate = cH.modelFit(bounds,data,fitTo,initN,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,n_samples,method,modelType,checkpoints,disp,polish,maxiter,popsize)
betaZonderIngrijpen.append(estimate[0])
print(estimate)
```
2020-03-24 00:00:00
/home/twallema/anaconda3/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py:494: UserWarning: differential_evolution: the 'workers' keyword has overridden updating='immediate' to updating='deferred'
" updating='deferred'", UserWarning)
differential_evolution step 1: f(x)= 115802
differential_evolution step 2: f(x)= 75693.2
differential_evolution step 3: f(x)= 75693.2
differential_evolution step 4: f(x)= 70107.6
differential_evolution step 5: f(x)= 44227
differential_evolution step 6: f(x)= 23912.3
differential_evolution step 7: f(x)= 12047.1
differential_evolution step 8: f(x)= 8827.33
differential_evolution step 9: f(x)= 5751.76
differential_evolution step 10: f(x)= 5751.76
differential_evolution step 11: f(x)= 4390.76
differential_evolution step 12: f(x)= 4390.76
differential_evolution step 13: f(x)= 4390.76
differential_evolution step 14: f(x)= 4390.76
differential_evolution step 15: f(x)= 4390.76
differential_evolution step 16: f(x)= 4390.76
differential_evolution step 17: f(x)= 4390.76
differential_evolution step 18: f(x)= 4390.76
differential_evolution step 19: f(x)= 4390.76
differential_evolution step 20: f(x)= 4390.76
differential_evolution step 21: f(x)= 4389.06
differential_evolution step 22: f(x)= 4388.69
differential_evolution step 23: f(x)= 4388.69
differential_evolution step 24: f(x)= 4388.69
differential_evolution step 25: f(x)= 4388.25
differential_evolution step 26: f(x)= 4388
differential_evolution step 27: f(x)= 4388
differential_evolution step 28: f(x)= 4388
differential_evolution step 29: f(x)= 4388
differential_evolution step 30: f(x)= 4387.93
[2.39776575e-02 4.77193463e+01]
```python
estimate=[0.0311, 35.99]
beta = estimate[0]
extraTime = estimate[1]
```
```python
obj = index[0]
timestampStr = obj.strftime("%Y-%m-%d")
print(timestampStr)
```
2020-03-13
```python
# -----------------------
# Fitted model parameters
# -----------------------
beta = estimate[0]
extraTime = estimate[1]
simtime=data.size+int(extraTime)-1
method = 'none'
# inital condition
initN = initN
initE = np.ones(Nc.shape[0])
initSM = np.zeros(Nc.shape[0])
initM = np.zeros(Nc.shape[0])
initH = np.zeros(Nc.shape[0])
initC = np.zeros(Nc.shape[0])
initHH = np.zeros(Nc.shape[0])
initCH = np.zeros(Nc.shape[0])
initR = np.zeros(Nc.shape[0])
initF = np.zeros(Nc.shape[0])
initSQ = np.zeros(Nc.shape[0])
initEQ = np.zeros(Nc.shape[0])
initSMQ = np.zeros(Nc.shape[0])
initMQ = np.zeros(Nc.shape[0])
initRQ = np.zeros(Nc.shape[0])
# --------------
# Run simulation
# --------------
simout = cH.simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,
theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,
initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,
n_samples,method,modelType,checkpoints)
# -----------
# Plot result
# -----------
t=pd.date_range('2020-03-13', freq='D', periods=data.size)
tacc=pd.date_range('2020-03-13', freq='D', periods=data.size+int(extraTime))-datetime.timedelta(days=int(extraTime)-1)
fig=plt.figure(1)
plt.figure(figsize=(7,5),dpi=100)
plt.scatter(t,data_belgie.iloc[:idx,1],color="black",marker="v")
plt.scatter(t,data_belgie.iloc[:idx,2],color="black",marker="o")
plt.scatter(t,data_belgie.iloc[:idx,0],color="black",marker="s")
plt.plot(tacc,np.mean(simout['HH']+simout['CH'],axis=1),'--',color="green")
plt.fill_between(tacc,np.percentile(simout['HH']+simout['CH'],95,axis=1),
np.percentile(simout['HH']+simout['CH'],5,axis=1),color="green",alpha=0.2)
plt.plot(tacc,np.mean(simout['CH'],axis=1),'--',color="orange")
plt.fill_between(tacc,np.percentile(simout['CH'],95,axis=1),
np.percentile(simout['CH'],5,axis=1),color="orange",alpha=0.2)
plt.plot(tacc,np.mean(simout['F'],axis=1),'--',color="red")
plt.fill_between(tacc,np.percentile(simout['F'],95,axis=1),
np.percentile(simout['F'],5,axis=1),color="red",alpha=0.20)
plt.legend(('Hospital (model)','ICU (model)','Deaths (model)'),loc='upper left')
plt.xlim(pd.to_datetime(tacc[24]),pd.to_datetime(tacc[-1]))
plt.title('Belgium',{'fontsize':18})
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d-%m-%Y'))
plt.setp(plt.gca().xaxis.get_majorticklabels(),
'rotation', 90)
plt.ylabel('number of patients')
plt.savefig('belgiumFit.svg',dpi=100,bbox_inches='tight')
```
## Model predictive control (use deterministic model)
### Bring model to current outbreak state
```python
# -----------------------
# Define model parameters
# -----------------------
# Clinical parameters
#beta = 0.032 # already calibrated for the deterministic model, change to 0.29 for stochastic
Nc = np.array([[11.2]]) # average number of human-to-human contacts (USE SOCRATES TO DETERMINE VALUE)
zeta = 0 # re-susceptibility parameter (0 = permanent immunity)
dsm = 14 # length of disease for asymptotic (SM) infections
dm = 14 # length of disease for mildly symptomic infections
dhospital = 9.1 # average time from symptom onset to hospitalisation for H and C infections
dh = 21 # average recovery time for heavy infection
mc0 = 0.49 # mortality for critical cases when they receive care
ICU = 2600 # number of available ICU beds in Belgium
# Testing parameters
totalTests = 0
theta_S = 0 # no. of daily tests of susceptibles
theta_E = 0 # no. of daily tests of exposed
theta_SM = 0 # no. of daily tests of SM infected
theta_M = 0 # no. of daily tests of M infected
theta_R = 0 # no. of daily tests of recovered patients
phi_S = 0 # backtracking of susceptibles
phi_E = 0 # backtracking of exposed
phi_SM = 0 # backtracking of supermild cases
phi_R = 0 # backtracking of recovered cases
psi_FP = 0 # odds of a false positive test
psi_PP = 1 # odds of a correct test
dq = 14 # length of quarantine for false positives
# Interessant artikel over serologische testen:
#https://www.tijd.be/dossiers/coronavirus/kunnen-bloedtests-onze-maatschappij-snel-weer-in-gang-trappen/10218259.html
monteCarlo = False # perform monte Carlo Sampling (recommend disabling for stochastic simulation)
n_samples = 400 # From 200 upwards no observable change
# ------------------------
# Define initial condition
# ------------------------
initN = np.array([11.43e6])
initE= np.array([1])
initSM=np.zeros(1)
initM=np.zeros(1)
initH=np.zeros(1)
initC=np.zeros(1)
initHH=np.zeros(1)
initCH=np.zeros(1)
initR=np.zeros(1)
initF=np.zeros(1)
initSQ=np.zeros(1)
initEQ=np.zeros(1)
initSMQ=np.zeros(1)
initMQ=np.zeros(1)
initRQ=np.zeros(1)
# ----------------------------
# Define simulation parameters
# ----------------------------
simtime=ICUvect.size+int(extraTime)-1 # simuleer tot vandaag
modelType = 'deterministic' # alternative: stochastic
method = 'none' # default
checkpoints={
't': [39+3],
'Nc': [np.array([0.3])] #1.78
}
# --------------
# Run simulation
# --------------
simout=cH.simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,
psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,
initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,
modelType,checkpoints,**stoArgs)
# -----------
# Plot result
# -----------
t=pd.date_range('2020-03-13', freq='D', periods=ICUvect.size)
tacc=pd.date_range('2020-03-13', freq='D', periods=ICUvect.size+int(extraTime))-datetime.timedelta(days=int(extraTime))
fig=plt.figure(1)
plt.figure(figsize=(7,5),dpi=100)
plt.scatter(t,data_belgie.iloc[:,1],color="black",marker="v")
plt.plot(tacc,np.mean(simout['CH'],axis=1),'--',color="orange")
plt.fill_between(tacc,np.percentile(simout['CH'],95,axis=1),
np.percentile(simout['CH'],5,axis=1),color="orange",alpha=0.2)
plt.legend(('ICU (model)','ICU (data)'),loc='upper left')
plt.xlim(pd.to_datetime(tacc[24]),pd.to_datetime(tacc[-1]))
plt.title('Belgium',{'fontsize':18})
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d-%m-%Y'))
plt.setp(plt.gca().xaxis.get_majorticklabels(),
'rotation', 90)
plt.ylabel('number of patients')
```
```python
# Extract initial condition
initN = np.array([11.43e6])
initE= np.ones(1)*np.mean(simout['E'],axis=1)[-1]
initSM=np.ones(1)*np.mean(simout['SM'],axis=1)[-1]
initM=np.ones(1)*np.mean(simout['M'],axis=1)[-1]
initH=np.ones(1)*np.mean(simout['H'],axis=1)[-1]
initC=np.ones(1)*np.mean(simout['C'],axis=1)[-1]
initHH=np.ones(1)*np.mean(simout['HH'],axis=1)[-1]
initCH=np.ones(1)*np.mean(simout['CH'],axis=1)[-1]
initR=np.ones(1)*np.mean(simout['R'],axis=1)[-1]
initF=np.ones(1)*np.mean(simout['F'],axis=1)[-1]
initSQ=np.ones(1)*np.mean(simout['SQ'],axis=1)[-1]
initEQ=np.ones(1)*np.mean(simout['EQ'],axis=1)[-1]
initSMQ=np.ones(1)*np.mean(simout['SMQ'],axis=1)[-1]
initMQ=np.ones(1)*np.mean(simout['MQ'],axis=1)[-1]
initRQ=np.ones(1)*np.mean(simout['RQ'],axis=1)[-1]
print(initCH)
print(initR)
```
[1650.47821057]
[409768.92313793]
### Optimise the controller
```python
modelType = 'deterministic'
method = 'none'
```
```python
monteCarlo = False
n_samples=100
period = 7
P = 20
N = 16
discrete=False
roundOff = (0,1,2)
ICU = initCH
```
```python
polish=True
disp = True
maxiter = 100
popsize = 10
phi_S = 0
phi_E = 0
phi_SM = 0
phi_R = 0
totalTests = 0
policy = cH.MPCoptimize(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,
psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,
initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,
modelType,discrete,roundOff,period,P,N,disp,polish,maxiter,popsize)
```
/home/twallema/anaconda3/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py:494: UserWarning: differential_evolution: the 'workers' keyword has overridden updating='immediate' to updating='deferred'
" updating='deferred'", UserWarning)
differential_evolution step 1: f(x)= 3.99606e+08
differential_evolution step 2: f(x)= 6.66733e+06
differential_evolution step 3: f(x)= 6.66733e+06
differential_evolution step 4: f(x)= 6.66733e+06
differential_evolution step 5: f(x)= 6.66733e+06
differential_evolution step 6: f(x)= 6.66733e+06
differential_evolution step 7: f(x)= 6.66733e+06
differential_evolution step 8: f(x)= 5.80729e+06
differential_evolution step 9: f(x)= 5.80729e+06
differential_evolution step 10: f(x)= 5.80729e+06
differential_evolution step 11: f(x)= 5.80729e+06
differential_evolution step 12: f(x)= 5.80729e+06
differential_evolution step 13: f(x)= 5.80729e+06
differential_evolution step 14: f(x)= 5.80729e+06
differential_evolution step 15: f(x)= 5.80729e+06
differential_evolution step 16: f(x)= 5.80729e+06
differential_evolution step 17: f(x)= 5.80729e+06
differential_evolution step 18: f(x)= 5.80729e+06
differential_evolution step 19: f(x)= 5.80729e+06
differential_evolution step 20: f(x)= 5.80729e+06
differential_evolution step 21: f(x)= 5.80729e+06
differential_evolution step 22: f(x)= 5.80729e+06
differential_evolution step 23: f(x)= 5.80729e+06
differential_evolution step 24: f(x)= 5.80729e+06
differential_evolution step 25: f(x)= 5.80729e+06
differential_evolution step 26: f(x)= 5.80729e+06
differential_evolution step 27: f(x)= 5.80729e+06
differential_evolution step 28: f(x)= 5.80729e+06
differential_evolution step 29: f(x)= 5.80729e+06
differential_evolution step 30: f(x)= 5.80729e+06
differential_evolution step 31: f(x)= 5.80729e+06
differential_evolution step 32: f(x)= 5.80729e+06
differential_evolution step 33: f(x)= 5.80729e+06
differential_evolution step 34: f(x)= 5.80729e+06
differential_evolution step 35: f(x)= 5.80729e+06
differential_evolution step 36: f(x)= 5.80729e+06
differential_evolution step 37: f(x)= 5.80729e+06
differential_evolution step 38: f(x)= 5.80729e+06
differential_evolution step 39: f(x)= 5.80729e+06
differential_evolution step 40: f(x)= 5.80729e+06
differential_evolution step 41: f(x)= 5.80729e+06
differential_evolution step 42: f(x)= 5.80729e+06
differential_evolution step 43: f(x)= 5.80729e+06
differential_evolution step 44: f(x)= 5.80729e+06
differential_evolution step 45: f(x)= 5.80729e+06
differential_evolution step 46: f(x)= 5.80729e+06
differential_evolution step 47: f(x)= 5.80729e+06
differential_evolution step 48: f(x)= 5.80729e+06
differential_evolution step 49: f(x)= 5.80729e+06
differential_evolution step 50: f(x)= 5.80729e+06
differential_evolution step 51: f(x)= 5.80729e+06
differential_evolution step 52: f(x)= 5.80729e+06
differential_evolution step 53: f(x)= 5.80729e+06
differential_evolution step 54: f(x)= 5.80729e+06
differential_evolution step 55: f(x)= 5.80729e+06
differential_evolution step 56: f(x)= 5.80729e+06
differential_evolution step 57: f(x)= 5.80729e+06
differential_evolution step 58: f(x)= 5.80729e+06
differential_evolution step 59: f(x)= 5.80729e+06
differential_evolution step 60: f(x)= 5.80729e+06
differential_evolution step 61: f(x)= 5.80729e+06
differential_evolution step 62: f(x)= 5.80729e+06
differential_evolution step 63: f(x)= 5.80729e+06
differential_evolution step 64: f(x)= 5.80729e+06
differential_evolution step 65: f(x)= 5.80729e+06
differential_evolution step 66: f(x)= 5.80729e+06
differential_evolution step 67: f(x)= 5.80729e+06
differential_evolution step 68: f(x)= 5.80729e+06
differential_evolution step 69: f(x)= 5.80729e+06
differential_evolution step 70: f(x)= 5.80729e+06
differential_evolution step 71: f(x)= 5.80729e+06
differential_evolution step 72: f(x)= 5.80729e+06
differential_evolution step 73: f(x)= 5.80729e+06
differential_evolution step 74: f(x)= 5.80729e+06
differential_evolution step 75: f(x)= 5.80729e+06
differential_evolution step 76: f(x)= 5.80729e+06
differential_evolution step 77: f(x)= 5.80729e+06
differential_evolution step 78: f(x)= 5.80729e+06
differential_evolution step 79: f(x)= 5.80729e+06
differential_evolution step 80: f(x)= 5.80729e+06
differential_evolution step 81: f(x)= 5.80729e+06
differential_evolution step 82: f(x)= 5.80729e+06
differential_evolution step 83: f(x)= 5.80729e+06
differential_evolution step 84: f(x)= 5.80729e+06
differential_evolution step 85: f(x)= 5.80729e+06
differential_evolution step 86: f(x)= 5.80729e+06
differential_evolution step 87: f(x)= 5.80729e+06
differential_evolution step 88: f(x)= 5.80729e+06
[2.62598999 5.34071978 3.42186642 2.82354412 2.61860091 3.91813672
4.15741651 6.73629882 5.59529611 4.64341206 2.36575502 3.42227157
2.17402135 3.81241576 2.80546971 0.49376903]
```python
controlDoF = 1
if controlDoF == 1:
thetas1 = policy
#Discretise thetas
thetas1[thetas1<5.6] = 1.8
thetas1[(thetas1>=5.6)&(thetas1<8)] = 6
thetas1[thetas1>=8] = 11.2
Ncs=[]
for i in range(policy.size):
Ncs.append(np.array([policy[i]]))
checkpoints = cH.constructHorizon(Ncs,period)
policyVect = cH.constructHorizonPlot(policy,period)
simtime = policy.size*period
elif controlDoF == 2:
# split policy vector in two
length = policy.size
middle_index = length//2
thetas1 = policy[:middle_index]
#Discretise thetas
thetas1[thetas1<5.6] = 1.8
thetas1[(thetas1>=5.6)&(thetas1<8)] = 6
thetas1[thetas1>=8] = 11.2
thetas2 = policy[middle_index:]
# add to separate lists
Ncs1=[]
for i in range(thetas1.size):
Ncs1.append(np.array([thetas1[i]]))
Ncs2=[]
for i in range(thetas2.size):
Ncs2.append(np.array([thetas2[i]]))
checkpoints = cH.constructHorizonTesting(Ncs1,Ncs2,period)
Ncvect,totalTestsvect = cH.constructHorizonTestingPlot(thetas1,thetas2,period)
simtime = thetas1.size*period
print(checkpoints)
```
{'t': array([ 7., 14., 21., 28., 35., 42., 49., 56., 63., 70., 77.,
84., 91., 98., 105.]), 'Nc': [array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([6.]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8]), array([1.8])]}
```python
# # --------------
# # Run simulation
# # --------------
# Nc = np.array([thetas1[0]])
# totalTests = thetas2[0]
# simout=cH.simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,
# dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,
# initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints)
```
```python
# -------------------------------------------------------
# Plot Belgian results first from march 13th untill today
# -------------------------------------------------------
plt.figure(1)
t=pd.date_range('2020-03-13', freq='D', periods=ICUvect.size+int(extraTime)+simtime+1)-datetime.timedelta(days=int(extraTime))
t = np.linspace(0,t.size,t.size+1)
plt.figure(figsize=(7,5),dpi=100)
plt.scatter(t[int(extraTime):int(extraTime)+27],data_belgie.iloc[:,1],color="black",marker="v")
plt.plot(t[0:int(extraTime)+27],np.mean(simout['CH'],axis=1),color="black")
# --------------
# Run simulation
# --------------
Nc = np.array([thetas1[0]])
if controlDoF == 1:
totalTests = 0
elif controlDoF == 2:
totalTests = thetas2[0]
simout=cH.simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,
dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,
initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints)
# --------------------------
# Plot controller prediction
# --------------------------
plt.plot(t[int(extraTime)+27:-1],np.mean(simout['CH'],axis=1),color="red")
plt.fill_between(t[int(extraTime)+27:-1], np.percentile(simout['CH'],90,axis=1), np.percentile(simout['CH'],10,axis=1),color="red",alpha=0.2)
plt.legend(('ICU (actual)','ICU (prediction)'),loc='center left')
plt.xlabel('days')
plt.ylabel('number of patients')
thetas1[thetas1<5.6] = 0
thetas1[(thetas1>=5.6)&(thetas1<8)] = 1
thetas1[thetas1>=8] = 2
a = [0.3,0.10,0]
if period == 3:
plt.axvspan(int(extraTime)+4, int(extraTime)+27, alpha=0.3, color='black')
plt.axvspan(int(extraTime)+27, int(extraTime)+27+3, alpha=a[int(thetas1[0])], color='black')
plt.axvspan(int(extraTime)+27+3, int(extraTime)+27+6, alpha=a[int(thetas1[1])], color='black')
plt.axvspan(int(extraTime)+27+6, int(extraTime)+27+9, alpha=a[int(thetas1[2])], color='black')
plt.axvspan(int(extraTime)+27+9, int(extraTime)+27+12, alpha=a[int(thetas1[3])], color='black')
plt.axvspan(int(extraTime)+27+12, int(extraTime)+27+15, alpha=a[int(thetas1[4])], color='black')
plt.axvspan(int(extraTime)+27+15, int(extraTime)+27+18, alpha=a[int(thetas1[5])], color='black')
# als het negen intervallen is
plt.axvspan(int(extraTime)+27+18, int(extraTime)+27+21, alpha=a[int(thetas1[6])], color='black')
plt.axvspan(int(extraTime)+27+21, int(extraTime)+27+24, alpha=a[int(thetas1[7])], color='black')
plt.axvspan(int(extraTime)+27+24, int(extraTime)+27+27, alpha=a[int(thetas1[8])], color='black')
elif period == 7:
plt.axvspan(int(extraTime)+4, int(extraTime)+27, alpha=0.3, color='black')
plt.axvspan(int(extraTime)+27, int(extraTime)+27+7, alpha=a[int(thetas1[0])], color='black')
plt.axvspan(int(extraTime)+27+7, int(extraTime)+27+14, alpha=a[int(thetas1[1])], color='black')
plt.axvspan(int(extraTime)+27+14, int(extraTime)+27+21, alpha=a[int(thetas1[2])], color='black')
plt.axvspan(int(extraTime)+27+21, int(extraTime)+27+28, alpha=a[int(thetas1[3])], color='black')
plt.axvspan(int(extraTime)+27+28, int(extraTime)+27+35, alpha=a[int(thetas1[4])], color='black')
plt.axvspan(int(extraTime)+27+35, int(extraTime)+27+42, alpha=a[int(thetas1[5])], color='black')
# als het negen intervallen is
plt.axvspan(int(extraTime)+27+42, int(extraTime)+27+49, alpha=a[int(thetas1[6])], color='black')
plt.axvspan(int(extraTime)+27+49, int(extraTime)+27+56, alpha=a[int(thetas1[7])], color='black')
plt.axvspan(int(extraTime)+27+56, int(extraTime)+27+63, alpha=a[int(thetas1[8])], color='black')
# als het er zestien zijn
plt.axvspan(int(extraTime)+27+63, int(extraTime)+27+70, alpha=a[int(thetas1[9])], color='black')
plt.axvspan(int(extraTime)+27+70, int(extraTime)+27+77, alpha=a[int(thetas1[10])], color='black')
plt.axvspan(int(extraTime)+27+77, int(extraTime)+27+84, alpha=a[int(thetas1[11])], color='black')
plt.axvspan(int(extraTime)+27+84, int(extraTime)+27+91, alpha=a[int(thetas1[12])], color='black')
plt.axvspan(int(extraTime)+27+91, int(extraTime)+27+98, alpha=a[int(thetas1[13])], color='black')
plt.axvspan(int(extraTime)+27+98, int(extraTime)+27+105, alpha=a[int(thetas1[14])], color='black')
plt.axvspan(int(extraTime)+27+105, int(extraTime)+27+112, alpha=a[int(thetas1[15])], color='black')
elif period == 14:
plt.axvspan(int(extraTime)+4, int(extraTime)+27, alpha=0.3, color='black')
plt.axvspan(int(extraTime)+27, int(extraTime)+27+14, alpha=a[int(thetas1[0])], color='black')
plt.axvspan(int(extraTime)+27+14, int(extraTime)+27+28, alpha=a[int(thetas1[1])], color='black')
plt.axvspan(int(extraTime)+27+28, int(extraTime)+27+42, alpha=a[int(thetas1[2])], color='black')
plt.axvspan(int(extraTime)+27+42, int(extraTime)+27+56, alpha=a[int(thetas1[3])], color='black')
plt.axvspan(int(extraTime)+27+56, int(extraTime)+27+70, alpha=a[int(thetas1[4])], color='black')
plt.axvspan(int(extraTime)+27+70, int(extraTime)+27+84, alpha=a[int(thetas1[5])], color='black')
# als het negen intervallen is
plt.axvspan(int(extraTime)+27+84, int(extraTime)+27+98, alpha=a[int(thetas1[6])], color='black')
plt.axvspan(int(extraTime)+27+98, int(extraTime)+27+112, alpha=a[int(thetas1[7])], color='black')
plt.axvspan(int(extraTime)+27+112, int(extraTime)+27+126, alpha=a[int(thetas1[8])], color='black')
ax2 = plt.twinx()
totalTeststar = np.zeros(t.size)
totalTeststar[0:int(extraTime)+27] = 0
totalTeststar[int(extraTime)+27:-1] = totalTestsvect
plt.plot(t[0:-2],totalTeststar[0:-2],'--',color='black')
ax2.set_ylabel("Number of daily tracked infections")
plt.savefig('belgiumController.svg',dpi=100,bbox_inches='tight')
```
```python
#A biotech company in Iceland that has tested more than 9,000 people found that around 50 percent
#of those who tested positive said they were asymptomatic, the researchers told CNN.
model = models.SEIRSAgeModel(initN = np.array([11.43e6]),
beta = 0.032,
sigma = 5.2,
Nc = np.array([11.2]),
sm = 0.50,
m = (1-0.50)*0.81,
h = (1-0.50)*0.15,
c = (1-0.50)*0.04,
dsm = 14,
dm = 14,
dhospital = 1,
dh = 21,
dcf = 18.5,
dcr = 22.0,
mc0 = 0.49,
ICU = 2000,
totalTests = 0,
psi_FP = 0,
psi_PP = 1,
dq = 14,
initE = np.array([1]),
initSM = np.zeros(1),
initM = np.zeros(1),
initH = np.zeros(1),
initC = np.zeros(1),
initHH = np.zeros(1),
initCH = np.zeros(1),
initR = np.zeros(1),
initF = np.zeros(1),
initSQ = np.zeros(1),
initEQ = np.zeros(1),
initSMQ = np.zeros(1),
initMQ = np.zeros(1),
initRQ = np.zeros(1),
monteCarlo = False,
n_samples = 50
)
y = model.sim(70)
```
```python
model.plotPopulationStatus()
model.plotInfected()
```
```python
# make data vector
ICUvect= np.array([[5,24,33,53,79,100,130,164,238,290,322,381,474,605,690,789,867,927,1021,1088,1144,1205,1245,1261,1257,1260,1276,1285,1278,1262,1232,1234,1223]])
hospital = np.array([[58,97,163,264,368,496,648,841,1096,1380,1643,1881,2137,2715,3068,3640,4077,4468,4884,4975,5206,5358,5492,5509,5600,5738,5692,5590,5610,5635,5409,5393,5536]])
index=pd.date_range('2020-03-13', freq='D', periods=ICUvect.size)
idx=-26
index = index[0:idx]
print(index)
data=[np.transpose(ICUvect[:,0:idx]),np.transpose(hospital[:,0:idx])]
# set optimisation settings
bounds=[(10,60),(0.01,0.035)]
positions = [np.array([7]),np.array([6,7])]
# run optimisation
theta = model.fit(data,['beta'],positions,bounds,np.array([1,1]),setvar=True,maxiter=100)
model.plotFit(index,data,positions,modelClr=['red','orange'],legendText=('ICU (model)','ICU (data)','Hospital (model)','Hospital (data)'),titleText='Belgium')
```
```python
```
|
= = = Conspirators = = =
|
#include <gsl/gsl_spline.h>
typedef struct FastPMFDInterp{
size_t size;
gsl_interp * F;
gsl_interp * DF;
gsl_interp * DDF;
gsl_interp_accel * acc;
} FastPMFDInterp;
void fastpm_fd_interp_init(FastPMFDInterp * FDinterp);
double fastpm_do_fd_interp(FastPMFDInterp * FDinterp, int F_id, double y);
void fastpm_fd_interp_destroy(FastPMFDInterp * FDinterp);
|
Require Import Coq.Unicode.Utf8_core.
Require Import Coq.Program.Tactics.
Require Export Homotopy.Core.
Set Automatic Introduction.
Set Implicit Arguments.
Set Shrink Obligations.
Set Universe Polymorphism.
(** H-Levels *)
(* h-levels 0..2 *)
Definition is_contractible (A : Type) := {x : A & ∀ y : A, y ~ x}.
Definition is_prop (A : Type) := ∀ (x y : A), is_contractible (x ~ y).
Definition is_set (A : Type) := ∀ (x y : A), is_prop (x ~ y).
Program Fixpoint is_level (n: nat) (A: Type) : Type :=
match n with
| O => is_contractible A
| S n => ∀ (x y: A), is_level n (paths x y)
end.
Program Fixpoint n_path (n : nat) (A: Type) : Type :=
match n with
| O => ∀ (x y : A), x ~ y
| S n => ∀ (x y : A), n_path n (paths x y)
end.
Definition contractible := sigT is_contractible.
Definition prop := sigT is_prop.
Definition set := sigT is_set.
Definition level (n: nat) := sigT (is_level n).
Definition contractible_Type (p : contractible) := p.1.
Coercion contractible_Type : contractible >-> Sortclass.
Definition prop_Type (p : prop) := p.1.
Coercion prop_Type : prop >-> Sortclass.
Definition set_Type (p : set) := p.1.
Coercion set_Type : set >-> Sortclass.
Definition level_Type {n} (p : level n) := p.1.
Coercion level_Type : level >-> Sortclass.
Class is_category1 (C : category) :=
is_category1_prop : ∀ {x y : C}, is_set (x ~> y).
Class is_thin (C : category) :=
is_thin_prop : ∀ {x y : C}, is_prop (x ~> y).
Class is_strict (C : category) :=
is_strict_prop : is_set C.
|
prelude
import init.core init.system.io init.data.ordering
universe u v w
inductive Rbcolor
| red | black
inductive Rbnode (α : Type u) (β : α → Type v)
| leaf {} : Rbnode
| Node (c : Rbcolor) (lchild : Rbnode) (key : α) (val : β key) (rchild : Rbnode) : Rbnode
instance Rbcolor.DecidableEq : DecidableEq Rbcolor :=
{decEq := fun a b => Rbcolor.casesOn a
(Rbcolor.casesOn b (isTrue rfl) (isFalse (fun h => Rbcolor.noConfusion h)))
(Rbcolor.casesOn b (isFalse (fun h => Rbcolor.noConfusion h)) (isTrue rfl))}
namespace Rbnode
variable {α : Type u} {β : α → Type v} {σ : Type w}
open Rbcolor
def depth (f : Nat → Nat → Nat) : Rbnode α β → Nat
| leaf => 0
| Node _ l _ _ r => (f (depth l) (depth r)) + 1
protected def min : Rbnode α β → Option (Sigma (fun k => β k))
| leaf => none
| Node _ leaf k v _ => some ⟨k, v⟩
| Node _ l k v _ => min l
protected def max : Rbnode α β → Option (Sigma (fun k => β k))
| leaf => none
| Node _ _ k v leaf => some ⟨k, v⟩
| Node _ _ k v r => max r
@[specialize] def fold (f : ∀ (k : α), β k → σ → σ) : Rbnode α β → σ → σ
| leaf, b => b
| Node _ l k v r, b => fold r (f k v (fold l b))
@[specialize] def revFold (f : ∀ (k : α), β k → σ → σ) : Rbnode α β → σ → σ
| leaf, b => b
| Node _ l k v r, b => revFold l (f k v (revFold r b))
@[specialize] def all (p : ∀ (k : α), β k → Bool) : Rbnode α β → Bool
| leaf => true
| Node _ l k v r => p k v && all l && all r
@[specialize] def any (p : ∀ (k : α), β k → Bool) : Rbnode α β → Bool
| leaf => false
| Node _ l k v r => p k v || any l || any r
def isRed : Rbnode α β → Bool
| Node red _ _ _ _ => true
| _ => false
def rotateLeft : ∀ (n : Rbnode α β), n ≠ leaf → Rbnode α β
| n@(Node hc hl hk hv (Node red xl xk xv xr)), _ =>
if !isRed hl
then (Node hc (Node red hl hk hv xl) xk xv xr)
else n
| leaf, h => absurd rfl h
| e, _ => e
theorem ifNodeNodeNeLeaf {c : Prop} [Decidable c] {l1 l2 : Rbnode α β} {c1 k1 v1 r1 c2 k2 v2 r2} : (if c then Node c1 l1 k1 v1 r1 else Node c2 l2 k2 v2 r2) ≠ leaf :=
fun h => if hc : c
then have h1 : (if c then Node c1 l1 k1 v1 r1 else Node c2 l2 k2 v2 r2) = Node c1 l1 k1 v1 r1 from ifPos hc;
Rbnode.noConfusion (Eq.trans h1.symm h)
else have h1 : (if c then Node c1 l1 k1 v1 r1 else Node c2 l2 k2 v2 r2) = Node c2 l2 k2 v2 r2 from ifNeg hc;
Rbnode.noConfusion (Eq.trans h1.symm h)
theorem rotateLeftNeLeaf : ∀ (n : Rbnode α β) (h : n ≠ leaf), rotateLeft n h ≠ leaf
| Node _ hl _ _ (Node red _ _ _ _), _, h => ifNodeNodeNeLeaf h
| leaf, h, _ => absurd rfl h
| Node _ _ _ _ (Node black _ _ _ _), _, h => Rbnode.noConfusion h
def rotateRight : ∀ (n : Rbnode α β), n ≠ leaf → Rbnode α β
| n@(Node hc (Node red xl xk xv xr) hk hv hr), _ =>
if isRed xl
then (Node hc xl xk xv (Node red xr hk hv hr))
else n
| leaf, h => absurd rfl h
| e, _ => e
theorem rotateRightNeLeaf : ∀ (n : Rbnode α β) (h : n ≠ leaf), rotateRight n h ≠ leaf
| Node _ (Node red _ _ _ _) _ _ _, _, h => ifNodeNodeNeLeaf h
| leaf, h, _ => absurd rfl h
| Node _ (Node black _ _ _ _) _ _ _, _, h => Rbnode.noConfusion h
def flip : Rbcolor → Rbcolor
| red => black
| black => red
def flipColor : Rbnode α β → Rbnode α β
| Node c l k v r => Node (flip c) l k v r
| leaf => leaf
def flipColors : ∀ (n : Rbnode α β), n ≠ leaf → Rbnode α β
| n@(Node c l k v r), _ =>
if isRed l ∧ isRed r
then Node (flip c) (flipColor l) k v (flipColor r)
else n
| leaf, h => absurd rfl h
def fixup (n : Rbnode α β) (h : n ≠ leaf) : Rbnode α β :=
let n₁ := rotateLeft n h;
let h₁ := (rotateLeftNeLeaf n h);
let n₂ := rotateRight n₁ h₁;
let h₂ := (rotateRightNeLeaf n₁ h₁);
flipColors n₂ h₂
def setBlack : Rbnode α β → Rbnode α β
| Node red l k v r => Node black l k v r
| n => n
section insert
variable (lt : α → α → Prop) [DecidableRel lt]
def ins (x : α) (vx : β x) : Rbnode α β → Rbnode α β
| leaf => Node red leaf x vx leaf
| Node c l k v r =>
if lt x k then fixup (Node c (ins l) k v r) (fun h => Rbnode.noConfusion h)
else if lt k x then fixup (Node c l k v (ins r)) (fun h => Rbnode.noConfusion h)
else Node c l x vx r
def insert (t : Rbnode α β) (k : α) (v : β k) : Rbnode α β :=
setBlack (ins lt k v t)
end insert
section membership
variable (lt : α → α → Prop)
variable [DecidableRel lt]
def findCore : Rbnode α β → ∀ (k : α), Option (Sigma (fun k => β k))
| leaf, x => none
| Node _ a ky vy b, x =>
(match cmpUsing lt x ky with
| Ordering.lt => findCore a x
| Ordering.Eq => some ⟨ky, vy⟩
| Ordering.gt => findCore b x)
def find {β : Type v} : Rbnode α (fun _ => β) → α → Option β
| leaf, x => none
| Node _ a ky vy b, x =>
(match cmpUsing lt x ky with
| Ordering.lt => find a x
| Ordering.Eq => some vy
| Ordering.gt => find b x)
def lowerBound : Rbnode α β → α → Option (Sigma β) → Option (Sigma β)
| leaf, x, lb => lb
| Node _ a ky vy b, x, lb =>
(match cmpUsing lt x ky with
| Ordering.lt => lowerBound a x lb
| Ordering.Eq => some ⟨ky, vy⟩
| Ordering.gt => lowerBound b x (some ⟨ky, vy⟩))
end membership
inductive WellFormed (lt : α → α → Prop) : Rbnode α β → Prop
| leafWff : WellFormed leaf
| insertWff {n n' : Rbnode α β} {k : α} {v : β k} [DecidableRel lt] : WellFormed n → n' = insert lt n k v → WellFormed n'
end Rbnode
open Rbnode
/- TODO(Leo): define dRbmap -/
def Rbmap (α : Type u) (β : Type v) (lt : α → α → Prop) : Type (max u v) :=
{t : Rbnode α (fun _ => β) // t.WellFormed lt }
@[inline] def mkRbmap (α : Type u) (β : Type v) (lt : α → α → Prop) : Rbmap α β lt :=
⟨leaf, WellFormed.leafWff lt⟩
namespace Rbmap
variable {α : Type u} {β : Type v} {σ : Type w} {lt : α → α → Prop}
def depth (f : Nat → Nat → Nat) (t : Rbmap α β lt) : Nat :=
t.val.depth f
@[inline] def fold (f : α → β → σ → σ) : Rbmap α β lt → σ → σ
| ⟨t, _⟩, b => t.fold f b
@[inline] def revFold (f : α → β → σ → σ) : Rbmap α β lt → σ → σ
| ⟨t, _⟩, b => t.revFold f b
@[inline] def empty : Rbmap α β lt → Bool
| ⟨leaf, _⟩ => true
| _ => false
@[specialize] def toList : Rbmap α β lt → List (α × β)
| ⟨t, _⟩ => t.revFold (fun k v ps => (k, v)::ps) []
@[inline] protected def min : Rbmap α β lt → Option (α × β)
| ⟨t, _⟩ =>
match t.min with
| some ⟨k, v⟩ => some (k, v)
| none => none
@[inline] protected def max : Rbmap α β lt → Option (α × β)
| ⟨t, _⟩ =>
match t.max with
| some ⟨k, v⟩ => some (k, v)
| none => none
instance [Repr α] [Repr β] : Repr (Rbmap α β lt) :=
⟨fun t => "rbmapOf " ++ repr t.toList⟩
variable [DecidableRel lt]
def insert : Rbmap α β lt → α → β → Rbmap α β lt
| ⟨t, w⟩, k, v => ⟨t.insert lt k v, WellFormed.insertWff w rfl⟩
@[specialize] def ofList : List (α × β) → Rbmap α β lt
| [] => mkRbmap _ _ _
| ⟨k,v⟩::xs => (ofList xs).insert k v
def findCore : Rbmap α β lt → α → Option (Sigma (fun (k : α) => β))
| ⟨t, _⟩, x => t.findCore lt x
def find : Rbmap α β lt → α → Option β
| ⟨t, _⟩, x => t.find lt x
/-- (lowerBound k) retrieves the kv pair of the largest key smaller than or equal to `k`,
if it exists. -/
def lowerBound : Rbmap α β lt → α → Option (Sigma (fun (k : α) => β))
| ⟨t, _⟩, x => t.lowerBound lt x none
@[inline] def contains (t : Rbmap α β lt) (a : α) : Bool :=
(t.find a).isSome
def fromList (l : List (α × β)) (lt : α → α → Prop) [DecidableRel lt] : Rbmap α β lt :=
l.foldl (fun r p => r.insert p.1 p.2) (mkRbmap α β lt)
@[inline] def all : Rbmap α β lt → (α → β → Bool) → Bool
| ⟨t, _⟩, p => t.all p
@[inline] def any : Rbmap α β lt → (α → β → Bool) → Bool
| ⟨t, _⟩, p => t.any p
end Rbmap
def rbmapOf {α : Type u} {β : Type v} (l : List (α × β)) (lt : α → α → Prop) [DecidableRel lt] : Rbmap α β lt :=
Rbmap.fromList l lt
/- Test -/
@[reducible] def map : Type := Rbmap Nat Bool Less.Less
def mkMapAux : Nat → map → map
| 0, m => m
| n+1, m => mkMapAux n (m.insert n (n % 10 = 0))
def mkMap (n : Nat) :=
mkMapAux n (mkRbmap Nat Bool Less.Less)
def main (xs : List String) : IO UInt32 :=
let m := mkMap xs.head.toNat;
let v := Rbmap.fold (fun (k : Nat) (v : Bool) (r : Nat) => if v then r + 1 else r) m 0;
IO.println (toString v) *>
pure 0
|
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon.
Hey Johnathon, and welcome to the wiki! My names Evan, pleased to meet you. Thanks for contributing all the reviews, but keep in mind that youre welcome to add your opinions directly to the text of the entry. The wiki incorporates all kinds of viewpoints from the folk of Davis, and you can edit anything on the wiki (as can everybody else). Just click the Edit icon next to the page title. Once again, welcome to the wiki! Users/JabberWokky Evan JabberWokky Edwards
|
/-
Copyright (c) 2022 Adam Topaz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Adam Topaz, Junyan Xu
-/
import ring_theory.localization.localization_localization
/-!
# Localizations of domains as subalgebras of the fraction field.
Given a domain `A` with fraction field `K`, and a submonoid `S` of `A` which
does not contain zero, this file constructs the localization of `A` at `S`
as a subalgebra of the field `K` over `A`.
-/
namespace localization
open_locale non_zero_divisors
variables {A : Type*} (K : Type*) [comm_ring A] (S : submonoid A) (hS : S ≤ A⁰)
section comm_ring
variables [comm_ring K] [algebra A K] [is_fraction_ring A K]
lemma map_is_unit_of_le (hS : S ≤ A⁰) (s : S) : is_unit (algebra_map A K s) :=
by apply is_localization.map_units K (⟨s.1, hS s.2⟩ : A⁰)
/-- The canonical map from a localization of `A` at `S` to the fraction ring
of `A`, given that `S ≤ A⁰`. -/
noncomputable
def map_to_fraction_ring (B : Type*) [comm_ring B] [algebra A B]
[is_localization S B] (hS : S ≤ A⁰) :
B →ₐ[A] K :=
{ commutes' := λ a, by simp,
..is_localization.lift (map_is_unit_of_le K S hS) }
@[simp]
lemma map_to_fraction_ring_apply {B : Type*} [comm_ring B] [algebra A B]
[is_localization S B] (hS : S ≤ A⁰) (b : B) :
map_to_fraction_ring K S B hS b = is_localization.lift (map_is_unit_of_le K S hS) b := rfl
lemma mem_range_map_to_fraction_ring_iff (B : Type*) [comm_ring B] [algebra A B]
[is_localization S B] (hS : S ≤ A⁰) (x : K) :
x ∈ (map_to_fraction_ring K S B hS).range ↔
∃ (a s : A) (hs : s ∈ S), x = is_localization.mk' K a ⟨s, hS hs⟩ :=
⟨ by { rintro ⟨x,rfl⟩, obtain ⟨a,s,rfl⟩ := is_localization.mk'_surjective S x,
use [a, s, s.2], apply is_localization.lift_mk' },
by { rintro ⟨a,s,hs,rfl⟩, use is_localization.mk' _ a ⟨s,hs⟩,
apply is_localization.lift_mk' } ⟩
instance is_localization_range_map_to_fraction_ring (B : Type*) [comm_ring B] [algebra A B]
[is_localization S B] (hS : S ≤ A⁰) :
is_localization S (map_to_fraction_ring K S B hS).range :=
is_localization.is_localization_of_alg_equiv S $ show B ≃ₐ[A] _, from alg_equiv.of_bijective
(map_to_fraction_ring K S B hS).range_restrict
begin
refine ⟨λ a b h, _, set.surjective_onto_range⟩,
refine (is_localization.lift_injective_iff _).2 (λ a b, _) (subtype.ext_iff.1 h),
exact ⟨λ h, congr_arg _ (is_localization.injective _ hS h),
λ h, congr_arg _ (is_fraction_ring.injective A K h)⟩,
end
instance is_fraction_ring_range_map_to_fraction_ring
(B : Type*) [comm_ring B] [algebra A B]
[is_localization S B] (hS : S ≤ A⁰) :
is_fraction_ring (map_to_fraction_ring K S B hS).range K :=
is_fraction_ring.is_fraction_ring_of_is_localization S _ _ hS
/--
Given a commutative ring `A` with fraction ring `K`, and a submonoid `S` of `A` which
contains no zero divisor, this is the localization of `A` at `S`, considered as
a subalgebra of `K` over `A`.
The carrier of this subalgebra is defined as the set of all `x : K` of the form
`is_localization.mk' K a ⟨s, _⟩`, where `s ∈ S`.
-/
noncomputable
def subalgebra (hS : S ≤ A⁰) : subalgebra A K :=
(map_to_fraction_ring K S (localization S) hS).range.copy
{ x | ∃ (a s : A) (hs : s ∈ S), x = is_localization.mk' K a ⟨s, hS hs⟩ } $
by { ext, symmetry, apply mem_range_map_to_fraction_ring_iff }
namespace subalgebra
instance is_localization_subalgebra :
is_localization S (subalgebra K S hS) :=
by { dunfold localization.subalgebra, rw subalgebra.copy_eq, apply_instance }
instance is_fraction_ring : is_fraction_ring (subalgebra K S hS) K :=
is_fraction_ring.is_fraction_ring_of_is_localization S _ _ hS
end subalgebra
end comm_ring
section field
variables [field K] [algebra A K] [is_fraction_ring A K]
namespace subalgebra
lemma mem_range_map_to_fraction_ring_iff_of_field
(B : Type*) [comm_ring B] [algebra A B] [is_localization S B] (x : K) :
x ∈ (map_to_fraction_ring K S B hS).range ↔
∃ (a s : A) (hs : s ∈ S), x = algebra_map A K a * (algebra_map A K s)⁻¹ :=
begin
rw mem_range_map_to_fraction_ring_iff,
iterate 3 { congr' with }, convert iff.rfl, rw units.coe_inv, refl,
end
/--
Given a domain `A` with fraction field `K`, and a submonoid `S` of `A` which
contains no zero divisor, this is the localization of `A` at `S`, considered as
a subalgebra of `K` over `A`.
The carrier of this subalgebra is defined as the set of all `x : K` of the form
`algebra_map A K a * (algebra_map A K s)⁻¹` where `a s : A` and `s ∈ S`.
-/
noncomputable
def of_field : _root_.subalgebra A K :=
(map_to_fraction_ring K S (localization S) hS).range.copy
{ x | ∃ (a s : A) (hs : s ∈ S), x = algebra_map A K a * (algebra_map A K s)⁻¹ } $
by { ext, symmetry, apply mem_range_map_to_fraction_ring_iff_of_field }
instance is_localization_of_field :
is_localization S (subalgebra.of_field K S hS) :=
by { dunfold localization.subalgebra.of_field, rw subalgebra.copy_eq, apply_instance }
instance is_fraction_ring_of_field : is_fraction_ring (subalgebra.of_field K S hS) K :=
is_fraction_ring.is_fraction_ring_of_is_localization S _ _ hS
end subalgebra
end field
end localization
|
Estudio comparativo entre aislados clínicos y no clínicos de S. cerevisiae y su papel como patógeno emergente.
Saccharomyces cerevisiae has traditionally been used in industrial fermentative processes, beer and wine production, baking, and as a nutritional supplement or even as a probiotic. This is the case of the probiotic strain of this species, S. cerevisiae var. boulardii which has been widely used in Europe to treat several types of diarrhoea. Although S. cerevisiae and S. boulardii have commonly been considered as safe microorganism, this concept has changed due to an increased number of human infections. Consequently, S. cerevisiae is now considered an emerging opportunistic pathogen that can cause clinically relevant infections, mainly associated with immunocompromised patients. With the aim of know the possible role of this yeast as an opportunistic pathogen, the present work tackles a comparative study between clinical and no clinical isolates of S. cerevisiae from different point of view. This study could be of great interest for all food industry that include S. cerevisiae inside their preparation, just as in the hospital environment, warning of the risk that entail the administration of products which contain the above mentioned yeast to immunosuppressed person. |
[STATEMENT]
lemma [simp]: "fvs(map Val vs) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fvs (map Val vs) = {}
[PROOF STEP]
by (induct vs) auto |
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import logic.equiv.basic
/-!
# Extra lemmas about `ulift` and `plift`
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we provide `subsingleton`, `unique`, `decidable_eq`, and `is_empty` instances for
`ulift α` and `plift α`. We also prove `ulift.forall`, `ulift.exists`, `plift.forall`, and
`plift.exists`.
-/
universes u v
open function
namespace plift
variables {α : Sort u} {β : Sort v}
instance [subsingleton α] : subsingleton (plift α) := equiv.plift.subsingleton
instance [nonempty α] : nonempty (plift α) := equiv.plift.nonempty
instance [unique α] : unique (plift α) := equiv.plift.unique
instance [decidable_eq α] : decidable_eq (plift α) := equiv.plift.decidable_eq
instance [is_empty α] : is_empty (plift α) := equiv.plift.is_empty
lemma up_injective : injective (@up α) := equiv.plift.symm.injective
lemma up_surjective : surjective (@up α) := equiv.plift.symm.surjective
lemma up_bijective : bijective (@up α) := equiv.plift.symm.bijective
@[simp] lemma up_inj {x y : α} : up x = up y ↔ x = y := up_injective.eq_iff
lemma down_surjective : surjective (@down α) := equiv.plift.surjective
lemma down_bijective : bijective (@down α) := equiv.plift.bijective
@[simp] lemma «forall» {p : plift α → Prop} : (∀ x, p x) ↔ ∀ x : α, p (plift.up x) :=
up_surjective.forall
@[simp] lemma «exists» {p : plift α → Prop} : (∃ x, p x) ↔ ∃ x : α, p (plift.up x) :=
up_surjective.exists
end plift
namespace ulift
variables {α : Type u} {β : Type v}
instance [subsingleton α] : subsingleton (ulift α) := equiv.ulift.subsingleton
instance [nonempty α] : nonempty (ulift α) := equiv.ulift.nonempty
instance [unique α] : unique (ulift α) := equiv.ulift.unique
instance [decidable_eq α] : decidable_eq (ulift α) := equiv.ulift.decidable_eq
instance [is_empty α] : is_empty (ulift α) := equiv.ulift.is_empty
lemma up_injective : injective (@up α) := equiv.ulift.symm.injective
lemma up_surjective : surjective (@up α) := equiv.ulift.symm.surjective
lemma up_bijective : bijective (@up α) := equiv.ulift.symm.bijective
@[simp] lemma up_inj {x y : α} : up x = up y ↔ x = y := up_injective.eq_iff
lemma down_surjective : surjective (@down α) := equiv.ulift.surjective
lemma down_bijective : bijective (@down α) := equiv.ulift.bijective
@[simp] lemma «forall» {p : ulift α → Prop} : (∀ x, p x) ↔ ∀ x : α, p (ulift.up x) :=
up_surjective.forall
@[simp] lemma «exists» {p : ulift α → Prop} : (∃ x, p x) ↔ ∃ x : α, p (ulift.up x) :=
up_surjective.exists
end ulift
|
-- Idris2
import System
import System.Concurrency
||| Test releasing without acquiring errors correctly
main : IO ()
main =
do m <- makeMutex
mutexRelease m
putStrLn "Released w/o acquiring (SHOULDN'T HAPPEN)"
|
\section{Global definitions}
\label{sec:formalisations:global_definitions}
This section defines a multiplicity, which is a two tuple consisting of a lower and upper bound. In Ecore, the notion of a multiplicity is used within a field signature (\cref{defin:formalisations:ecore_formalisation:type_models:type_model}) in order to specify a limit on the allowed amount of values for a field. In GROOVE, multiplicities are used to bound the number of incoming and outgoing edges for each node type via multiplicity pairs (\cref{defin:formalisations:groove_formalisation:type_graphs:multiplicity_pair}).
\begin{defin}[Multiplicity]
\label{defin:formalisations:global_definitions:multiplicity}
A multiplicity is a two tuple consisting of a lower bound (which is any natural number) and an upper bound (which is possibly unbounded).
\begin{equation*}
\mathbb{M} \subseteq (\mathbb{N} \times \mathbb{N^+} \cup {\mstar})\ \cap \leq
\end{equation*}
The first value represents the lower bound, the second value of the tuple represents the upper bound. The set of multiplicities $\mathbb{M}$ is formally defined as
\begin{equation*}
\mathbb{M} = \{ (l, u) \mid l \in \mathbb{N} \land u \in (\mathbb{N^+} \cup {\mstar}) \land l \leq u \}
\end{equation*}
It holds that $\mstar$ is larger than each natural number, so $\forall n \in \mathbb{N}: n < \mstar$. Furthermore, the notation $l..u$ is used to denote $(l, u) \in \mathbb{M}$.
Finally, any natural number $n$ is said to be part of a multiplicity if it is within bounds, meaning:
\begin{equation*}
\forall m = l..u \in \mathbb{M}, n \in \mathbb{N} : n \in m \Leftrightarrow l \leq n \leq u
\end{equation*}
\isabellelref{multiplicity}{Ecore.Multiplicity}
\end{defin} |
/*
Copyright (C) 2003-2013 by David White <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "graphics.hpp"
#include <boost/shared_ptr.hpp>
#include <map>
#include <vector>
#include <sstream>
#include "asserts.hpp"
#include "custom_object_type.hpp"
#include "filesystem.hpp"
#include "foreach.hpp"
#include "formatter.hpp"
#include "frame.hpp"
#include "geometry.hpp"
#include "json_parser.hpp"
#include "module.hpp"
#include "string_utils.hpp"
#include "surface.hpp"
#include "surface_cache.hpp"
#include "unit_test.hpp"
#include "variant_utils.hpp"
#include "IMG_savepng.h"
void UTILITY_query(const std::vector<std::string>& args);
namespace {
const int TextureImageSize = 1024;
struct animation_area {
explicit animation_area(variant node) : anim(new frame(node)), is_particle(false)
{
width = 0;
height = 0;
foreach(const frame::frame_info& f, anim->frame_layout()) {
width += f.area.w();
if(f.area.h() > height) {
height = f.area.h();
}
}
src_image = node["image"].as_string();
dst_image = -1;
}
boost::intrusive_ptr<frame> anim;
int width, height;
std::string src_image;
int dst_image;
rect dst_area;
bool is_particle;
};
typedef boost::shared_ptr<animation_area> animation_area_ptr;
bool operator==(const animation_area& a, const animation_area& b)
{
return a.src_image == b.src_image && a.anim->area() == b.anim->area() && a.anim->pad() == b.anim->pad() && a.anim->num_frames() == b.anim->num_frames() && a.anim->num_frames_per_row() == b.anim->num_frames_per_row();
}
std::set<animation_area_ptr> animation_areas_with_alpha;
bool animation_area_height_compare(animation_area_ptr a, animation_area_ptr b)
{
if(a->is_particle != b->is_particle) {
return a->is_particle;
}
if(animation_areas_with_alpha.count(a) != animation_areas_with_alpha.count(b)) {
return animation_areas_with_alpha.count(a) != 0;
}
return a->height > b->height;
}
struct output_area {
explicit output_area(int n) : image_id(n)
{
area = rect(0, 0, TextureImageSize, TextureImageSize);
}
int image_id;
rect area;
};
rect use_output_area(const output_area& input, int width, int height, std::vector<output_area>& areas)
{
ASSERT_LE(width, input.area.w());
ASSERT_LE(height, input.area.h());
rect result(input.area.x(), input.area.y(), width, height);
if(input.area.h() > height) {
areas.push_back(output_area(input.image_id));
areas.back().area = rect(input.area.x(), input.area.y() + height, input.area.w(), input.area.h() - height);
}
if(input.area.w() > width) {
areas.push_back(output_area(input.image_id));
areas.back().area = rect(input.area.x() + width, input.area.y(), input.area.w() - width, height);
}
return result;
}
}
namespace graphics {
void set_alpha_for_transparent_colors_in_rgba_surface(SDL_Surface* s, int options=0);
}
namespace {
bool animation_area_has_alpha_channel(animation_area_ptr anim)
{
using namespace graphics;
surface surf = graphics::surface_cache::get(anim->src_image);
if(!surf || surf->format->BytesPerPixel != 4) {
return false;
}
const uint32_t* pixels = reinterpret_cast<const uint32_t*>(surf->pixels);
for(int f = 0; f != anim->anim->num_frames(); ++f) {
const frame::frame_info& info = anim->anim->frame_layout()[f];
const int x = f%anim->anim->num_frames_per_row();
const int y = f/anim->anim->num_frames_per_row();
const rect& base_area = anim->anim->area();
const int xpos = base_area.x() + (base_area.w()+anim->anim->pad())*x;
const int ypos = base_area.y() + (base_area.h()+anim->anim->pad())*y;
SDL_Rect blit_src = {xpos + info.x_adjust, ypos + info.y_adjust, info.area.w(), info.area.h()};
for(int x = 0; x != blit_src.w; ++x) {
for(int y = 0; y != blit_src.h; ++y) {
const int index = (blit_src.y + y)*surf->w + (blit_src.x + x);
const uint32_t pixel = pixels[index];
const uint32_t mask = (pixels[index]&surf->format->Amask);
if(mask != 0 && mask != surf->format->Amask) {
return true;
}
}
}
}
return false;
}
}
UTILITY(compile_objects)
{
#ifndef IMPLEMENT_SAVE_PNG
std::cerr
<< "This build wasn't done with IMPLEMENT_SAVE_PNG defined. "
<< "Consquently image files will not be written, aborting requested operation."
<< std::endl;
return;
#endif
using graphics::surface;
int num_output_images = 0;
std::vector<output_area> output_areas;
output_areas.push_back(output_area(num_output_images++));
std::map<variant, std::string> nodes_to_files;
std::vector<variant> objects;
std::vector<animation_area_ptr> animation_areas;
std::map<variant, animation_area_ptr> nodes_to_animation_areas;
std::vector<variant> animation_containing_nodes;
std::vector<std::string> no_compile_images;
variant gui_node = json::parse_from_file("data/gui.cfg");
animation_containing_nodes.push_back(gui_node);
std::map<std::string, variant> gui_nodes;
std::vector<std::string> gui_files;
module::get_files_in_dir("data/gui", &gui_files);
foreach(const std::string& gui, gui_files) {
if(gui[0] == '.') {
continue;
}
gui_nodes[gui] = json::parse_from_file("data/gui/" + gui);
animation_containing_nodes.push_back(gui_nodes[gui]);
if(gui_nodes[gui].has_key("no_compile_image")) {
std::vector<std::string> images = util::split(gui_nodes[gui][variant("no_compile_image")].as_string());
no_compile_images.insert(no_compile_images.end(), images.begin(), images.end());
}
}
std::vector<const_custom_object_type_ptr> types = custom_object_type::get_all();
foreach(const_custom_object_type_ptr type, types) {
const std::string* path = custom_object_type::get_object_path(type->id() + ".cfg");
//skip any experimental stuff so it isn't compiled
const std::string Experimental = "experimental";
if(std::search(path->begin(), path->end(), Experimental.begin(), Experimental.end()) != path->end()) {
continue;
}
std::cerr << "OBJECT: " << type->id() << " -> " << *path << "\n";
variant obj_node = json::parse_from_file(*path);
obj_node = custom_object_type::merge_prototype(obj_node);
obj_node.remove_attr(variant("prototype"));
if(obj_node["editor_info"].is_map() && obj_node["editor_info"]["var"].is_list()) {
std::vector<std::string> names;
foreach(variant entry, obj_node["editor_info"]["var"].as_list()) {
names.push_back(entry["name"].as_string());
}
if(names.empty() == false) {
std::map<variant, variant> m;
if(obj_node["vars"].is_map()) {
m = obj_node["vars"].as_map();
}
foreach(const std::string& name, names) {
variant v(name);
if(m.count(v) == 0) {
m[v] = variant();
}
}
obj_node.add_attr(variant("vars"), variant(&m));
}
}
objects.push_back(obj_node);
nodes_to_files[obj_node] = "data/compiled/objects/" + type->id() + ".cfg";
if(obj_node.has_key("no_compile_image")) {
std::vector<std::string> images = util::split(obj_node["no_compile_image"].as_string());
no_compile_images.insert(no_compile_images.end(), images.begin(), images.end());
}
animation_containing_nodes.push_back(obj_node);
foreach(variant v, obj_node["particle_system"].as_list()) {
animation_containing_nodes.push_back(v);
}
//add nested objects -- disabled for now until we find bugs in it.
/*
for(wml::node::child_iterator i = obj_node->begin_child("object_type"); i != obj_node->end_child("object_type"); ++i) {
animation_containing_nodes.push_back(i->second);
}
*/
}
foreach(variant node, animation_containing_nodes) {
foreach(const variant_pair& p, node.as_map()) {
std::string attr_name = p.first.as_string();
if(attr_name != "animation" && attr_name != "framed_gui_element" && attr_name != "section") {
continue;
}
foreach(const variant& v, p.second.as_list()) {
animation_area_ptr anim(new animation_area(v));
if(anim->src_image.empty() || v.has_key(variant("palettes")) || std::find(no_compile_images.begin(), no_compile_images.end(), anim->src_image) != no_compile_images.end()) {
continue;
}
animation_areas.push_back(anim);
foreach(animation_area_ptr area, animation_areas) {
if(*area == *anim) {
anim = area;
break;
}
}
if(attr_name == "particle_system") {
anim->is_particle = true;
}
if(anim != animation_areas.back()) {
animation_areas.pop_back();
}
nodes_to_animation_areas[v] = anim;
if(animation_area_has_alpha_channel(anim)) {
animation_areas_with_alpha.insert(anim);
}
}
}
}
std::sort(animation_areas.begin(), animation_areas.end(), animation_area_height_compare);
{
std::vector<animation_area_ptr> animation_areas_alpha;
}
foreach(animation_area_ptr anim, animation_areas) {
ASSERT_LOG(anim->width <= 1024 && anim->height <= 1024,
"Bad animation area " << anim->width << "x" << anim->height << " for " << anim->src_image << ". Must be 1024x1024 or less.");
int match = -1;
int match_diff = -1;
for(int n = 0; n != output_areas.size(); ++n) {
if(anim->width <= output_areas[n].area.w() && anim->height <= output_areas[n].area.h()) {
const int diff = output_areas[n].area.w()*output_areas[n].area.h() - anim->width*anim->height;
if(match == -1 || diff < match_diff) {
match = n;
match_diff = diff;
}
}
}
if(match == -1) {
match = output_areas.size();
output_areas.push_back(output_area(num_output_images++));
}
output_area match_area = output_areas[match];
output_areas.erase(output_areas.begin() + match);
rect area = use_output_area(match_area, anim->width, anim->height, output_areas);
anim->dst_image = match_area.image_id;
anim->dst_area = area;
}
std::vector<surface> surfaces;
for(int n = 0; n != num_output_images; ++n) {
surfaces.push_back(surface(SDL_CreateRGBSurface(0,TextureImageSize,TextureImageSize,32,SURFACE_MASK)));
}
foreach(animation_area_ptr anim, animation_areas) {
foreach(animation_area_ptr other, animation_areas) {
if(anim == other || anim->dst_image != other->dst_image) {
continue;
}
ASSERT_LOG(rects_intersect(anim->dst_area, other->dst_area) == false, "RECTANGLES CLASH: " << anim->dst_image << " " << anim->dst_area << " vs " << other->dst_area);
}
ASSERT_INDEX_INTO_VECTOR(anim->dst_image, surfaces);
surface dst = surfaces[anim->dst_image];
surface src = graphics::surface_cache::get(anim->src_image);
ASSERT_LOG(src.get() != NULL, "COULD NOT LOAD IMAGE: '" << anim->src_image << "'");
int xdst = 0;
for(int f = 0; f != anim->anim->num_frames(); ++f) {
const frame::frame_info& info = anim->anim->frame_layout()[f];
const int x = f%anim->anim->num_frames_per_row();
const int y = f/anim->anim->num_frames_per_row();
const rect& base_area = anim->anim->area();
const int xpos = base_area.x() + (base_area.w()+anim->anim->pad())*x;
const int ypos = base_area.y() + (base_area.h()+anim->anim->pad())*y;
SDL_Rect blit_src = {xpos + info.x_adjust, ypos + info.y_adjust, info.area.w(), info.area.h()};
SDL_Rect blit_dst = {anim->dst_area.x() + xdst,
anim->dst_area.y(),
info.area.w(), info.area.h()};
xdst += info.area.w();
ASSERT_GE(blit_dst.x, anim->dst_area.x());
ASSERT_GE(blit_dst.y, anim->dst_area.y());
ASSERT_LE(blit_dst.x + blit_dst.w, anim->dst_area.x() + anim->dst_area.w());
ASSERT_LE(blit_dst.y + blit_dst.h, anim->dst_area.y() + anim->dst_area.h());
SDL_SetSurfaceBlendMode(src.get(), SDL_BLENDMODE_NONE);
SDL_BlitSurface(src.get(), &blit_src, dst.get(), &blit_dst);
}
}
for(int n = 0; n != num_output_images; ++n) {
std::ostringstream fname;
fname << "images/compiled-" << n << ".png";
graphics::set_alpha_for_transparent_colors_in_rgba_surface(surfaces[n].get());
IMG_SavePNG((module::get_module_path() + fname.str()).c_str(), surfaces[n].get(), -1);
}
typedef std::pair<variant, animation_area_ptr> anim_pair;
foreach(const anim_pair& a, nodes_to_animation_areas) {
variant node = a.first;
animation_area_ptr anim = a.second;
std::ostringstream fname;
fname << "compiled-" << anim->dst_image << ".png";
node.add_attr_mutation(variant("image"), variant(fname.str()));
node.remove_attr_mutation(variant("x"));
node.remove_attr_mutation(variant("y"));
node.remove_attr_mutation(variant("w"));
node.remove_attr_mutation(variant("h"));
node.remove_attr_mutation(variant("pad"));
const frame::frame_info& first_frame = anim->anim->frame_layout().front();
rect r(anim->dst_area.x() - first_frame.x_adjust, anim->dst_area.y() - first_frame.y_adjust, anim->anim->area().w(), anim->anim->area().h());
node.add_attr_mutation(variant("rect"), r.write());
int xpos = anim->dst_area.x();
std::vector<int> v;
foreach(const frame::frame_info& f, anim->anim->frame_layout()) {
ASSERT_EQ(f.area.w() + f.x_adjust + f.x2_adjust, anim->anim->area().w());
ASSERT_EQ(f.area.h() + f.y_adjust + f.y2_adjust, anim->anim->area().h());
v.push_back(f.x_adjust);
v.push_back(f.y_adjust);
v.push_back(f.x2_adjust);
v.push_back(f.y2_adjust);
v.push_back(xpos);
v.push_back(anim->dst_area.y());
v.push_back(f.area.w());
v.push_back(f.area.h());
xpos += f.area.w();
}
std::vector<variant> vs;
foreach(int n, v) {
vs.push_back(variant(n));
}
node.add_attr_mutation(variant("frame_info"), variant(&vs));
}
for(std::map<variant, std::string>::iterator i = nodes_to_files.begin(); i != nodes_to_files.end(); ++i) {
variant node = i->first;
module::write_file(i->second, node.write_json());
}
module::write_file("data/compiled/gui.cfg", gui_node.write_json());
for(std::map<std::string, variant>::iterator i = gui_nodes.begin();
i != gui_nodes.end(); ++i) {
module::write_file("data/compiled/gui/" + i->first, i->second.write_json());
}
if(sys::file_exists("./compile-objects.cfg")) {
variant script = json::parse(sys::read_file("./compile-objects.cfg"));
if(script["query"].is_list()) {
foreach(variant query, script["query"].as_list()) {
std::vector<std::string> args;
foreach(variant arg, query.as_list()) {
args.push_back(arg.as_string());
}
UTILITY_query(args);
}
}
}
}
namespace {
struct SpritesheetCell {
int begin_col, end_col;
};
struct SpritesheetRow {
int begin_row, end_row;
std::vector<SpritesheetCell> cells;
};
struct SpritesheetAnimation {
std::vector<rect> frames;
variant node;
rect target_area;
int cell_width() const {
int result = 0;
for(const rect& r : frames) {
result = std::max<int>(r.w(), result);
}
return result;
}
int cell_height() const {
int result = 0;
for(const rect& r : frames) {
result = std::max<int>(r.h(), result);
}
return result;
}
int height() const {
return cell_height() + 4;
}
int width() const {
return (cell_width()+3)*frames.size() + 4;
}
};
bool is_row_blank(graphics::surface surf, const unsigned char* pixels)
{
for(int x = 0; x < surf->w; ++x) {
if(pixels[3] > 64) {
return false;
}
pixels += 4;
}
return true;
}
bool is_col_blank(graphics::surface surf, const SpritesheetRow& row, int col)
{
if(col >= surf->w) {
return true;
}
const unsigned char* pixels = (const unsigned char*)surf->pixels;
pixels += row.begin_row*surf->w*4 + col*4;
for(int y = row.begin_row; y < row.end_row; ++y) {
if(pixels[3] > 64) {
return false;
}
pixels += surf->w*4;
}
return true;
}
std::vector<SpritesheetRow> get_cells(graphics::surface surf)
{
std::vector<SpritesheetRow> rows;
const unsigned char* pixels = (const unsigned char*)surf->pixels;
int start_row = -1;
for(int row = 0; row <= surf->h; ++row) {
const bool blank = row == surf->h || is_row_blank(surf, pixels);
if(blank) {
if(start_row != -1) {
SpritesheetRow new_row;
new_row.begin_row = start_row;
new_row.end_row = row;
rows.push_back(new_row);
start_row = -1;
}
} else {
if(start_row == -1) {
start_row = row;
}
}
pixels += surf->w*4;
}
for(SpritesheetRow& sprite_row : rows) {
int start_col = -1;
for(int col = 0; col <= surf->w; ++col) {
const bool blank = is_col_blank(surf, sprite_row, col);
if(blank) {
if(start_col != -1) {
SpritesheetCell new_cell = { start_col, col };
sprite_row.cells.push_back(new_cell);
start_col = -1;
}
} else {
if(start_col == -1) {
start_col = col;
}
}
}
std::cerr << "ROW: " << sprite_row.begin_row << ", " << sprite_row.end_row << " -> " << sprite_row.cells.size() << "\n";
}
return rows;
}
void write_pixel_surface(graphics::surface surf, int x, int y, int r, int g, int b, int a)
{
if(x < 0 || y < 0 || x >= surf->w || y >= surf->h) {
return;
}
unsigned char* pixels = (unsigned char*)surf->pixels;
pixels += y * surf->w * 4 + x * 4;
*pixels++ = r;
*pixels++ = g;
*pixels++ = b;
*pixels++ = a;
}
void write_spritesheet_frame(graphics::surface src, const rect& src_area, graphics::surface dst, int target_x, int target_y)
{
const unsigned char* alpha_colors = graphics::get_alpha_pixel_colors();
std::vector<unsigned char*> border_pixels;
for(int xpos = target_x; xpos < target_x + src_area.w() + 2; ++xpos) {
unsigned char* p = (unsigned char*)dst->pixels + (target_y*dst->w + xpos)*4;
border_pixels.push_back(p);
p += (src_area.h()+1)*dst->w*4;
border_pixels.push_back(p);
}
for(int ypos = target_y; ypos < target_y + src_area.h() + 2; ++ypos) {
unsigned char* p = (unsigned char*)dst->pixels + (ypos*dst->w + target_x)*4;
border_pixels.push_back(p);
p += (src_area.w()+1)*4;
border_pixels.push_back(p);
}
for(unsigned char* p : border_pixels) {
memcpy(p, alpha_colors+3, 3);
p[3] = 255;
}
}
bool rect_in_surf_empty(graphics::surface surf, rect area)
{
const unsigned char* p = (const unsigned char*)surf->pixels;
p += (area.y()*surf->w + area.x())*4;
for(int y = 0; y < area.h(); ++y) {
for(int x = 0; x < area.w(); ++x) {
if(p[x*4 + 3]) {
return false;
}
}
p += surf->w*4;
}
return true;
}
int goodness_of_fit(graphics::surface surf, rect areaa, rect areab)
{
if(areaa.h() > areab.h()) {
std::swap(areaa, areab);
}
bool can_slice = true;
while(areaa.h() < areab.h() && can_slice) {
can_slice = false;
if(rect_in_surf_empty(surf, rect(areab.x(), areab.y(), areab.w(), 1))) {
std::cerr << "SLICE: " << areab << " -> ";
areab = rect(areab.x(), areab.y()+1, areab.w(), areab.h()-1);
std::cerr << areab << "\n";
can_slice = true;
}
if(areaa.h() < areab.h() && rect_in_surf_empty(surf, rect(areab.x(), areab.y()+areab.h()-1, areab.w(), 1))) {
std::cerr << "SLICE: " << areab << " -> ";
areab = rect(areab.x(), areab.y(), areab.w(), areab.h()-1);
std::cerr << areab << "\n";
can_slice = true;
}
if(areaa.h() == areab.h()) {
std::cerr << "SLICED DOWN: " << areab << "\n";
}
}
if(areaa.h() < areab.h() && areab.h() - areaa.h() <= 4) {
const int diff = areab.h() - areaa.h();
areab = rect(areab.x(), areab.y() + diff/2, areab.w(), areab.h() - diff);
}
if(areaa.w() != areab.w() && areaa.h() == areab.h()) {
rect a = areaa;
rect b = areab;
if(a.w() > b.w()) {
std::swap(a,b);
}
int best_score = INT_MAX;
for(int xoffset = 0; xoffset < b.w() - a.w(); ++xoffset) {
rect r(b.x() + xoffset, b.y(), a.w(), b.h());
const int score = goodness_of_fit(surf, r, a);
if(score < best_score) {
best_score = score;
}
}
return best_score;
}
if(areaa.w() != areab.w() || areaa.h() != areab.h()) {
return INT_MAX;
}
int errors = 0;
for(int y = 0; y < areaa.h(); ++y) {
const int ya = areaa.y() + y;
const int yb = areab.y() + y;
for(int x = 0; x < areaa.w(); ++x) {
const int xa = areaa.x() + x;
const int xb = areab.x() + x;
const unsigned char* pa = (const unsigned char*)surf->pixels + (ya*surf->w + xa)*4;
const unsigned char* pb = (const unsigned char*)surf->pixels + (yb*surf->w + xb)*4;
if((pa[3] > 32) != (pb[3] > 32)) {
++errors;
}
}
}
return errors;
}
int score_offset_fit(graphics::surface surf, const rect& big_area, const rect& lit_area, int offsetx, int offsety)
{
int score = 0;
for(int y = 0; y < big_area.h(); ++y) {
for(int x = 0; x < big_area.w(); ++x) {
const unsigned char* big_p = (const unsigned char*)surf->pixels + ((big_area.y() + y)*surf->w + (big_area.x() + x))*4;
const int xadj = x - offsetx;
const int yadj = y - offsety;
if(xadj < 0 || yadj < 0 || xadj >= lit_area.w() || yadj >= lit_area.h()) {
if(big_p[3] >= 32) {
++score;
}
continue;
}
const unsigned char* lit_p = (const unsigned char*)surf->pixels + ((lit_area.y() + yadj)*surf->w + (lit_area.x() + xadj))*4;
if((big_p[3] >= 32) != (lit_p[3] >= 32)) {
++score;
}
}
}
return score;
}
void get_best_offset(graphics::surface surf, const rect& big_area, const rect& lit_area, int* xoff, int* yoff)
{
std::cerr << "CALC BEST OFFSET...\n";
*xoff = *yoff = 0;
int best_score = -1;
for(int y = 0; y <= (big_area.h() - lit_area.h()); ++y) {
for(int x = 0; x <= (big_area.w() - lit_area.w()); ++x) {
const int score = score_offset_fit(surf, big_area, lit_area, x, y);
std::cerr << "OFFSET " << x << ", " << y << " SCORES " << score << "\n";
if(best_score == -1 || score < best_score) {
*xoff = x;
*yoff = y;
best_score = score;
}
}
}
std::cerr << "BEST OFFSET: " << *xoff << ", " << *yoff << "\n";
}
int find_distance_to_pixel(graphics::surface surf, const rect& area, int xoffset, int yoffset)
{
const int SearchDistance = 4;
int best_distance = SearchDistance+1;
for(int y = -SearchDistance; y <= SearchDistance; ++y) {
for(int x = -SearchDistance; x <= SearchDistance; ++x) {
const int distance = abs(x) + abs(y);
if(distance >= best_distance) {
continue;
}
int xpos = xoffset + x;
int ypos = yoffset + y;
if(xpos >= 0 && ypos >= 0 && xpos < area.w() && ypos < area.h()) {
const unsigned char* p = (const unsigned char*)surf->pixels + ((area.y() + ypos)*surf->w + (area.x() + xpos))*4;
if(p[3] >= 32) {
best_distance = distance;
}
}
}
}
return best_distance;
}
int score_spritesheet_area(graphics::surface surf, const rect& area_a, int xoff_a, int yoff_a, const rect& area_b, int xoff_b, int yoff_b, const rect& big_area)
{
unsigned char default_color[4] = {0,0,0,0};
int score = 0;
for(int y = 0; y < big_area.h(); ++y) {
for(int x = 0; x < big_area.w(); ++x) {
const int xadj_a = x - xoff_a;
const int yadj_a = y - yoff_a;
const int xadj_b = x - xoff_b;
const int yadj_b = y - yoff_b;
const unsigned char* pa = default_color;
const unsigned char* pb = default_color;
if(xadj_a >= 0 && xadj_a < area_a.w() && yadj_a >= 0 && yadj_a < area_a.h()) {
pa = (const unsigned char*)surf->pixels + ((area_a.y() + yadj_a)*surf->w + (area_a.x() + xadj_a))*4;
}
if(xadj_a >= 0 && xadj_a < area_a.w() && yadj_a >= 0 && yadj_a < area_a.h()) {
pb = (const unsigned char*)surf->pixels + ((area_b.y() + yadj_b)*surf->w + (area_b.x() + xadj_b))*4;
}
if((pa[3] >= 32) != (pb[3] >= 32)) {
if(pa[3] >= 32) {
score += find_distance_to_pixel(surf, area_b, xadj_b, yadj_b);
} else {
score += find_distance_to_pixel(surf, area_a, xadj_a, yadj_a);
}
}
}
}
return score;
}
void flip_surface_area(graphics::surface surf, const rect& area)
{
for(int y = area.y(); y < area.y() + area.h(); ++y) {
unsigned int* pixels = (unsigned int*)surf->pixels + y*surf->w + area.x();
std::reverse(pixels, pixels + area.w());
}
}
void write_spritesheet_animation(graphics::surface src, const SpritesheetAnimation& anim, graphics::surface dst, bool reorder)
{
int target_x = anim.target_area.x()+1;
int target_y = anim.target_area.y()+1;
const int cell_width = anim.cell_width();
const int cell_height = anim.cell_height();
rect biggest_rect = anim.frames.front();
for(const rect& f : anim.frames) {
std::cerr << "RECT SIZE: " << f.w() << "," << f.h() << "\n";
if(f.w()*f.h() > biggest_rect.w()*biggest_rect.h()) {
biggest_rect = f;
}
}
std::vector<int> xoffsets, yoffsets, new_xoffsets, new_yoffsets;
for(const rect& f : anim.frames) {
xoffsets.push_back(0);
yoffsets.push_back(0);
get_best_offset(src, biggest_rect, f, &xoffsets.back(), &yoffsets.back());
}
std::vector<rect> frames = anim.frames;
if(reorder) {
frames.clear();
frames.push_back(anim.frames.front());
new_xoffsets.push_back(xoffsets[0]);
new_yoffsets.push_back(yoffsets[0]);
while(frames.size() < anim.frames.size()) {
int best_frame = -1;
int best_score = INT_MAX;
for(int n = 0; n < anim.frames.size(); ++n) {
if(std::count(frames.begin(), frames.end(), anim.frames[n])) {
continue;
}
const int score = score_spritesheet_area(src, frames.back(), new_xoffsets.back(), new_yoffsets.back(), anim.frames[n], xoffsets[n], yoffsets[n], biggest_rect);
std::cerr << "SCORE: " << anim.frames[n] << " vs " << frames.back() << ": " << n << " -> " << score << "\n";
if(score < best_score || best_frame == -1) {
best_score = score;
best_frame = n;
}
}
std::cerr << "BEST : " << best_frame << ": " << best_score << "\n";
frames.push_back(anim.frames[best_frame]);
new_xoffsets.push_back(xoffsets[best_frame]);
new_yoffsets.push_back(yoffsets[best_frame]);
}
}
for(const rect& f : frames) {
int xoff = 0, yoff = 0;
get_best_offset(src, biggest_rect, f, &xoff, &yoff);
write_spritesheet_frame(src, f, dst, target_x, target_y);
SDL_Rect src_rect = { f.x(), f.y(), f.w(), f.h() };
SDL_Rect dst_rect = { target_x+1 + xoff, target_y+1 + yoff, f.w(), f.h() };
SDL_SetSurfaceBlendMode(src.get(), SDL_BLENDMODE_NONE);
SDL_BlitSurface(src.get(), &src_rect, dst.get(), &dst_rect);
flip_surface_area(dst, rect(target_x, target_y, cell_width, cell_height));
target_x += cell_width + 3;
}
}
}
COMMAND_LINE_UTILITY(bake_spritesheet)
{
std::deque<std::string> argv(args.begin(), args.end());
while(argv.empty() == false) {
std::string arg = argv.front();
argv.pop_front();
std::string cfg_fname = module::map_file(arg);
variant node;
try {
node = json::parse(sys::read_file(cfg_fname));
} catch(json::parse_error& e) {
ASSERT_LOG(false, "Parse error parsing " << arg << " -> " << cfg_fname << ": " << e.error_message());
}
variant baking_info = node["animation_baking"];
ASSERT_LOG(baking_info.is_map(), "baking info not found");
graphics::surface surf = graphics::surface_cache::get(baking_info["source_image"].as_string());
ASSERT_LOG(surf.get(), "No surface found");
std::cerr << "SURFACE SIZE: " << surf->w << "x" << surf->h << "\n";
std::cerr << "DEST SURFACE: " << module::map_file("images/" + baking_info["dest_image"].as_string()) << "\n";
ASSERT_LOG(surf->format->BytesPerPixel == 4, "Incorrect bpp: " << surf->format->BytesPerPixel);
std::vector<SpritesheetRow> rows = get_cells(surf);
unsigned char* pixels = (unsigned char*)surf->pixels;
for(const SpritesheetRow& row : rows) {
for(const SpritesheetCell& cell : row.cells) {
const int x1 = cell.begin_col - 1;
const int x2 = cell.end_col;
const int y1 = row.begin_row - 1;
const int y2 = row.end_row;
for(int x = x1; x <= x2; ++x) {
write_pixel_surface(surf, x, y1, 255, 255, 255, 255);
write_pixel_surface(surf, x, y2, 255, 255, 255, 255);
}
for(int y = y1; y <= y2; ++y) {
write_pixel_surface(surf, x1, y, 255, 255, 255, 255);
write_pixel_surface(surf, x2, y, 255, 255, 255, 255);
}
}
}
/*
typedef std::map<int, std::vector<std::pair<int,int> > > ScoresMap;
std::map<std::pair<int,int>, ScoresMap> all_scores;
for(int y = 0; y < rows.size(); ++y) {
const SpritesheetRow& row = rows[y];
for(int x = 0; x < row.cells.size(); ++x) {
const SpritesheetCell& cell = row.cells[x];
ScoresMap scores;
for(int yy = 0; yy < rows.size(); ++yy) {
const SpritesheetRow& r = rows[yy];
for(int xx = 0; xx < row.cells.size(); ++xx) {
const SpritesheetCell& c = row.cells[xx];
if(xx == x && yy == y) {
continue;
}
const rect areaa(cell.begin_col, row.begin_row, cell.end_col - cell.begin_col, row.end_row - row.begin_row);
const rect areab(c.begin_col, r.begin_row, c.end_col - c.begin_col, r.end_row - r.begin_row);
const int score = goodness_of_fit(surf, areaa, areab);
std::cerr << "SCORE: [" << y << "," << x << "] -> [" << yy << "," << xx << "]: " << score << "\n";
scores[score].resize(scores[score].size()+1);
scores[score].back().first = yy;
scores[score].back().second = xx;
}
}
auto itor = scores.begin();
std::vector<std::pair<int, int> > v = itor->second;
if(v.size() <= 1) {
++itor;
v.push_back(itor->second.front());
}
std::cerr << "BEST SCORES FOR [" << y << "," << x << "] " << (cell.end_col - cell.begin_col) << "x" << (row.end_row - row.begin_row) << ": [" << v[0].first << "," << v[0].second << "], [" << v[1].first << "," << v[1].second << "]\n";
all_scores[std::pair<int,int>(y, x)] = scores;
}
}
for(int y = 0; y < rows.size(); ++y) {
const SpritesheetRow& row = rows[y];
for(int x = 0; x < row.cells.size(); ++x) {
const SpritesheetCell& cell = row.cells[x];
std::set<std::pair<int,int> > seen;
seen.insert(std::pair<int,int>(y,x));
std::vector<std::pair<int,int> > sequence;
sequence.push_back(std::pair<int,int>(y,x));
for(;;) {
std::pair<int,int> value(-1,-1);
bool found = false;
const ScoresMap& scores = all_scores[sequence.back()];
for(auto i = scores.begin(); i != scores.end() && !found; ++i) {
for(auto j = i->second.begin(); j != i->second.end(); ++j) {
if(!seen.count(*j)) {
if(i->first < 1000) {
value = *j;
}
found = true;
break;
}
}
if(found) {
break;
}
}
if(value.first == -1) {
break;
}
seen.insert(value);
sequence.push_back(value);
}
std::cerr << "RECOMMENDED SEQUENCE: ";
for(auto p : sequence) {
std::cerr << "[" << p.first << "," << p.second << "], ";
}
std::cerr << "\n";
}
}
*/
const int TargetTextureSize = 4096;
std::vector<rect> available_space;
available_space.push_back(rect(0, 0, TargetTextureSize, TargetTextureSize));
std::vector<SpritesheetAnimation> animations;
for(const variant& anim : baking_info["animations"].as_list()) {
SpritesheetAnimation new_anim;
new_anim.node = anim;
std::vector<variant> frames = anim["frames"].as_list();
for(const variant& fr : frames) {
std::vector<int> loc = fr.as_list_int();
assert(loc.size() == 2);
ASSERT_LOG(loc[0] < rows.size(), "Invalid animation cell: " << loc[0] << "/" << rows.size());
ASSERT_LOG(loc[1] < rows[loc[0]].cells.size(), "Invalid animation cell: " << loc[1] << "/" << rows[loc[0]].cells.size());
const SpritesheetRow& r = rows[loc[0]];
const SpritesheetCell& c = r.cells[loc[1]];
const rect area(c.begin_col, r.begin_row, c.end_col - c.begin_col, r.end_row - r.begin_row);
new_anim.frames.push_back(area);
}
int best = -1;
int best_score = -1;
for(int n = 0; n != available_space.size(); ++n) {
const rect& area = available_space[n];
if(new_anim.width() <= area.w() && new_anim.height() <= area.h()) {
int score = area.w()*area.h();
fprintf(stderr, "MATCH: %dx%d %d\n", area.w(), area.h(), score);
if(best == -1 || score < best_score) {
best = n;
best_score = score;
}
break;
}
}
ASSERT_LOG(best != -1, "Could not find fit for animation " << new_anim.width() << "x" << new_anim.height() << ": " << animations.size());
new_anim.target_area = rect(available_space[best].x(), available_space[best].y(), new_anim.width(), new_anim.height());
const rect right_area(new_anim.target_area.x2(), new_anim.target_area.y(), available_space[best].w() - new_anim.target_area.w(), new_anim.target_area.h());
const rect bottom_area(new_anim.target_area.x(), new_anim.target_area.y2(), available_space[best].w(), available_space[best].h() - new_anim.target_area.h());
available_space.push_back(right_area);
available_space.push_back(bottom_area);
fprintf(stderr, "DIVIDE: %dx%d %dx%d\n", right_area.w(), right_area.h(), bottom_area.w(), bottom_area.h());
available_space.erase(available_space.begin() + best);
animations.push_back(new_anim);
fprintf(stderr, "FIT ANIM: %d, %d, %d, %d\n", new_anim.target_area.x(), new_anim.target_area.y(), new_anim.target_area.w(), new_anim.target_area.h());
}
graphics::surface target_surf(SDL_CreateRGBSurface(0,TargetTextureSize,TargetTextureSize,32,SURFACE_MASK));
const unsigned char* alpha_colors = graphics::get_alpha_pixel_colors();
unsigned char* target_pixels = (unsigned char*)target_surf->pixels;
for(int n = 0; n < target_surf->w*target_surf->h; ++n) {
memcpy(target_pixels, alpha_colors, 3);
target_pixels[3] = 255;
target_pixels += 4;
}
std::vector<variant> anim_nodes;
for(const SpritesheetAnimation& anim : animations) {
write_spritesheet_animation(surf, anim, target_surf, anim.node[variant("auto_adjust")].as_bool(false));
std::map<variant, variant> node = anim.node.as_map();
node.erase(variant("frames"));
rect area(anim.target_area.x()+2, anim.target_area.y()+2, anim.cell_width(), anim.cell_height());
node[variant("rect")] = area.write();
node[variant("image")] = baking_info["dest_image"];
node[variant("frames")] = variant(static_cast<int>(anim.frames.size()));
node[variant("pad")] = variant(3);
anim_nodes.push_back(variant(&node));
}
node.add_attr(variant("animation"), variant(&anim_nodes));
IMG_SavePNG((module::get_module_path() + "/images/" + baking_info["dest_image"].as_string()).c_str(), target_surf.get(), -1);
sys::write_file(cfg_fname, node.write_json());
}
}
COMMAND_LINE_UTILITY(build_spritesheet_from_images)
{
using namespace graphics;
std::vector<std::vector<surface> > surfaces;
surfaces.resize(surfaces.size()+1);
int row_width = 3;
int sheet_height = 3;
std::vector<int> cell_widths;
std::vector<int> row_heights;
cell_widths.push_back(0);
row_heights.push_back(0);
using namespace graphics;
for(auto img : args) {
if(img == "--newrow") {
surfaces.resize(surfaces.size()+1);
cell_widths.push_back(0);
row_heights.push_back(0);
row_width = 3;
sheet_height += 3;
continue;
}
surface s = surface_cache::get(img);
ASSERT_LOG(s.get(), "No image: " << img);
surfaces.back().push_back(s);
row_width += s->w + 3;
if(s->w > cell_widths.back()) {
cell_widths.back() = s->w;
}
if(s->h > row_heights.back()) {
sheet_height += s->h - row_heights.back();
row_heights.back() = s->h;
}
}
int sheet_width = 0;
for(int nrow = 0; nrow != surfaces.size(); ++nrow) {
const int row_width = 3 + (3+cell_widths[nrow])*surfaces[nrow].size();
if(row_width > sheet_width) {
sheet_width = row_width;
}
}
surface sheet = surface(SDL_CreateRGBSurface(0,sheet_width,sheet_height,32,SURFACE_MASK));
int ypos = 2;
int row_index = 0;
for(auto row : surfaces) {
int xpos = 2;
int max_height = 0;
for(auto src : row) {
SDL_Rect blit_src = {0, 0, src->w, src->h};
SDL_Rect blit_dst = {xpos, ypos, src->w, src->h};
SDL_Rect rect_top = {xpos-1, ypos-1, src->w+2, 1};
SDL_Rect rect_bot = {xpos-1, ypos + src->h, src->w+2, 1};
SDL_Rect rect_left = {xpos-1, ypos, 1, src->h};
SDL_Rect rect_right = {xpos + src->w, ypos, 1, src->h};
SDL_SetSurfaceBlendMode(src.get(), SDL_BLENDMODE_NONE);
SDL_BlitSurface(src.get(), &blit_src, sheet.get(), &blit_dst);
if(blit_src.h > max_height) {
max_height = blit_src.h;
}
Uint32 transparent = SDL_MapRGB(sheet->format, 0xf9, 0x30, 0x3d);
SDL_FillRect(sheet.get(), &rect_top, transparent);
SDL_FillRect(sheet.get(), &rect_bot, transparent);
SDL_FillRect(sheet.get(), &rect_left, transparent);
SDL_FillRect(sheet.get(), &rect_right, transparent);
xpos += cell_widths[row_index] + 3;
}
ypos += max_height + 3;
++row_index;
}
IMG_SavePNG("sheet.png", sheet.get(), -1);
}
//this is a template utility that can be modified to provide a nice utility
//for manipulating images.
COMMAND_LINE_UTILITY(manipulate_image_template)
{
using namespace graphics;
for(auto img : args) {
surface s = surface_cache::get(img);
uint8_t* p = (uint8_t*)s->pixels;
for(int i = 0; i != s->w*s->h; ++i) {
p[3] = p[0];
p[0] = p[1] = p[2] = 255;
p += 4;
}
IMG_SavePNG((module::get_module_path() + "/images/" + img).c_str(), s.get(), -1);
}
}
|
function term = sdlfmjMeanCompute(sdlfmKern, t , option)
% SDLFMJMEANCOMPUTE Jolt mean for the switching dynamical LFM model.
% Computes the terms $r_d$ and $q_d$ that appear in the mean function
% associated with the switching dynamical LFM model. If the mean function
% is mu(t), then
%
% mu(t) = r_d(t)y_d(t_0) + q_d(t)\dot{y}_d(t_0),
%
% where $y_d(t_0)$ is the initial condition associated to the position and
% $\dot{y}_d(t_0)$ is the initial condition associated to the velocity.
%
% FORMAT
% DESC
% ARG sdlfmKern : switching dynamical LFM kernel structure with the
% parameters.
% ARG t : input times for which the mean is to be computed.
% RETURN term : the value of $r_d$.
%
% FORMAT
% DESC
% Computes the terms that appear in the mean function associated with the
% switching dynamical LFM model.
% ARG sdlfmKern : switching dynamical LFM kernel structure with the
% parameters.
% ARG t : input times for which the mean is to be computed.
% ARG option : indicates which term of the mean should be computed. Option
% 'Pos' computes the term $r_d$ and option 'Vel' computes $q_d$ that
% accompanies the initial condition of the velocity.
% RETURN term : the value of $r_d$ or $q_d$ depending of option.
%
% COPYRIGHT : Mauricio A. Alvarez, 2010
% KERN
if nargin < 3
option = 'Pos';
end
alpha = sdlfmKern.damper/(2*sdlfmKern.mass);
omega = sqrt(sdlfmKern.spring/sdlfmKern.mass-alpha^2);
freq = omega*t;
switch option
case 'Pos'
term = (alpha^2/omega + omega)*exp(-alpha*t).*...
((omega^2 - alpha^2)*sin(freq) + 2*alpha*omega*cos(freq));
case 'Vel'
term = exp(-alpha*t).*((3*alpha*omega - alpha^3/omega)*sin(freq)...
+(3*alpha^2- omega^2)*cos(freq));
otherwise
error('No recognized option')
end
|
module PolyPairPots
import JuLIP: energy, forces, virial, alloc_temp, alloc_temp_d, cutoff
import JuLIP.Potentials: evaluate, evaluate_d, evaluate!, evaluate_d!
import Base: Dict, convert, ==
function alloc_B end
function alloc_dB end
include("jacobi.jl")
include("transforms.jl")
include("basis.jl")
include("calculator.jl")
include("repulsion.jl")
end # module
|
[STATEMENT]
lemma card_dcharacters [simp]: "card (dcharacters n) = totient n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card (dcharacters n) = totient n
[PROOF STEP]
using bij_betw_same_card [OF bij_betw_dcharacters_characters] card_characters
[PROOF STATE]
proof (prove)
using this:
card (dcharacters n) = card (characters G)
card (characters G) = order G
goal (1 subgoal):
1. card (dcharacters n) = totient n
[PROOF STEP]
by simp |
Jordan kept busy over the next few years by staying in shape , playing golf in celebrity charity tournaments , spending time with his family in Chicago , promoting his Jordan Brand clothing line , and riding motorcycles . Since 2004 , Jordan has owned Michael Jordan Motorsports , a professional closed @-@ course motorcycle road racing team that competed with two <unk> in the premier Superbike championship sanctioned by the American Motorcyclist Association ( AMA ) until the end of the 2013 season . Jordan and his then @-@ wife Juanita pledged $ 5 million to Chicago 's Hales Franciscan High School in 2006 , and the Jordan Brand has made donations to Habitat for Humanity and a Louisiana branch of the Boys & Girls Clubs of America .
|
# Programming Assignment
## RealNVP for the LSUN bedroom dataset
### Instructions
In this notebook, you will develop the RealNVP normalising flow architecture from scratch, including the affine coupling layers, checkerboard and channel-wise masking, and combining into a multiscale architecture. You will train the normalising flow on a subset of the LSUN bedroom dataset.
Some code cells are provided for you in the notebook. You should avoid editing provided code, and make sure to execute the cells in order to avoid unexpected errors. Some cells begin with the line:
`#### GRADED CELL ####`
Don't move or edit this first line - this is what the automatic grader looks for to recognise graded cells. These cells require you to write your own code to complete them, and are automatically graded when you submit the notebook. Don't edit the function name or signature provided in these cells, otherwise the automatic grader might not function properly.
### How to submit
Complete all the tasks you are asked for in the worksheet. When you have finished and are happy with your code, press the **Submit Assignment** button at the top of this notebook.
### Let's get started!
We'll start running some imports, and loading the dataset. Do not edit the existing imports in the following cell. If you would like to make further Tensorflow imports, you should add them here.
```python
#### PACKAGE IMPORTS ####
# Run this cell first to import all required packages. Do not make any imports elsewhere in the notebook
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
tfd = tfp.distributions
tfb = tfp.bijectors
# If you would like to make further imports from tensorflow, add them here
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Add
```
<table><tr>
<td> </td>
<td> </td>
<td> </td>
</tr></table>
#### The LSUN Bedroom Dataset
In this assignment, you will use a subset of the [LSUN dataset](https://www.yf.io/p/lsun). This is a large-scale image dataset with 10 scene and 20 object categories. A subset of the LSUN bedroom dataset has been provided, and has already been downsampled and preprocessed into smaller, fixed-size images.
* F. Yu, A. Seff, Y. Zhang, S. Song, T. Funkhouser and J. Xia. "LSUN: Construction of a Large-scale Image Dataset using Deep Learning with Humans in the Loop". [arXiv:1506.03365](https://arxiv.org/abs/1506.03365), 10 Jun 2015
Your goal is to develop the RealNVP normalising flow architecture using bijector subclassing, and use it to train a generative model of the LSUN bedroom data subset. For full details on the RealNVP model, refer to the original paper:
* L. Dinh, J. Sohl-Dickstein and S. Bengio. "Density estimation using Real NVP". [arXiv:1605.08803](https://arxiv.org/abs/1605.08803), 27 Feb 2017.
#### Load the dataset
The following functions will be useful for loading and preprocessing the dataset. The subset you will use for this assignment consists of 10,000 training images, 1000 validation images and 1000 test images.
The images have been downsampled to 32 x 32 x 3 in order to simplify the training process.
```python
# Functions for loading and preprocessing the images
def load_image(img):
img = tf.image.random_flip_left_right(img)
return img, img
def load_dataset(split):
train_list_ds = tf.data.Dataset.from_tensor_slices(np.load('./data/{}.npy'.format(split)))
train_ds = train_list_ds.map(load_image)
return train_ds
```
```python
# Load the training, validation and testing datasets splits
train_ds = load_dataset('train')
val_ds = load_dataset('val')
test_ds = load_dataset('test')
```
```python
# Shuffle the datasets
shuffle_buffer_size = 1000
train_ds = train_ds.shuffle(shuffle_buffer_size)
val_ds = val_ds.shuffle(shuffle_buffer_size)
test_ds = test_ds.shuffle(shuffle_buffer_size)
```
```python
# Display a few examples
n_img = 4
f, axs = plt.subplots(n_img, n_img, figsize=(14, 14))
for k, image in enumerate(train_ds.take(n_img**2)):
i = k // n_img
j = k % n_img
axs[i, j].imshow(image[0])
axs[i, j].axis('off')
f.subplots_adjust(wspace=0.01, hspace=0.03)
```
```python
# Batch the Dataset objects
batch_size = 64
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)
test_ds = test_ds.batch(batch_size)
```
### Affine coupling layer
We will begin the development of the RealNVP architecture with the core bijector that is called the _affine coupling layer_. This bijector can be described as follows: suppose that $x$ is a $D$-dimensional input, and let $d<D$. Then the output $y$ of the affine coupling layer is given by the following equations:
$$
\begin{align}
y_{1:d} &= x_{1:d} \tag{1}\\
y_{d+1:D} &= x_{d+1:D}\odot \exp(s(x_{1:d})) + t(x_{1:d}), \tag{2}
\end{align}
$$
where $s$ and $t$ are functions from $\mathbb{R}^d\rightarrow\mathbb{R}^{D-d}$, and define the log-scale and shift operations on the vector $x_{d+1:D}$ respectively.
The log of the Jacobian determinant for this layer is given by $\sum_{j}s(x_{1:d})_j$.
The inverse operation can be easily computed as
$$
\begin{align}
x_{1:d} &= y_{1:d}\tag{3}\\
x_{d+1:D} &= \left(y_{d+1:D} - t(y_{1:d})\right)\odot \exp(-s(y_{1:d})),\tag{4}
\end{align}
$$
In practice, we will implement equations $(1)$ and $(2)$ using a binary mask $b$:
$$
\begin{align}
\text{Forward pass:}\qquad y &= b\odot x + (1-b)\odot\left(x\odot\exp(s(b\odot x)) + t(b\odot x)\right),\tag{5}\\
\text{Inverse pass:}\qquad x &= b\odot y + (1-b)\odot\left(y - t(b\odot x)) \odot\exp( -s(b\odot x)\right).\tag{6}
\end{align}
$$
Our inputs $x$ will be a batch of 3-dimensional Tensors with `height`, `width` and `channels` dimensions. As in the original architecture, we will use both spatial 'checkerboard' masks and channel-wise masks:
<center>Figure 1. Spatial checkerboard mask (left) and channel-wise mask (right). From the original paper.</center>
#### Custom model for log-scale and shift
You should now create a custom model for the shift and log-scale parameters that are used in the affine coupling layer bijector. We will use a convolutional residual network, with two residual blocks and a final convolutional layer. Using the functional API, build the model according to the following specifications:
* The function takes the `input_shape` and `filters` as arguments
* The model should use the `input_shape` in the function argument to set the shape in the Input layer (call this layer `h0`).
* The first hidden layer should be a Conv2D layer with number of filters set by the `filters` argument, and a ReLU activation
* The second hidden layer should be a BatchNormalization layer
* The third hidden layer should be a Conv2D layer with the same number of filters as the input `h0` to the model, and a ReLU activation
* The fourth hidden layer should be a BatchNormalization layer
* The fifth hidden layer should be the sum of the fourth hidden layer output and the inputs `h0`. Call this layer `h1`
* The sixth hidden layer should be a Conv2D layer with filters set by the `filters` argument, and a ReLU activation
* The seventh hidden layer should be a BatchNormalization layer
* The eighth hidden layer should be a Conv2D layer with the same number of filters as `h1` (and `h0`), and a ReLU activation
* The ninth hidden layer should be a BatchNormalization layer
* The tenth hidden layer should be the sum of the ninth hidden layer output and `h1`
* The eleventh hidden layer should be a Conv2D layer with the number of filters equal to twice the number of channels of the model input, and a linear activation. Call this layer `h2`
* The twelfth hidden layer should split `h2` into two equal-sized Tensors along the final channel axis. These two Tensors are the shift and log-scale Tensors, and should each have the same shape as the model input
* The final layer should then apply the `tanh` nonlinearity to the log_scale Tensor. The outputs to the model should then be the list of Tensors `[shift, log_scale]`
All Conv2D layers should use a 3x3 kernel size, `"SAME"` padding and an $l2$ kernel regularizer with regularisation coefficient of `5e-5`.
_Hint: use_ `tf.split` _with arguments_ `num_or_size_splits=2, axis=-1` _to create the output Tensors_.
In total, the network should have 14 layers (including the `Input` layer).
```python
#### GRADED CELL ####
# Complete the following function.
# Make sure to not change the function name or arguments.
def get_conv_resnet(input_shape, filters):
"""
This function should build a CNN ResNet model according to the above specification,
using the functional API. The function takes input_shape as an argument, which should be
used to specify the shape in the Input layer, as well as a filters argument, which
should be used to specify the number of filters in (some of) the convolutional layers.
Your function should return the model.
The function takes the input_shape and filters as arguments
The model should use the input_shape in the function argument to set the shape in the Input layer (call this layer h0).
The first hidden layer should be a Conv2D layer with number of filters set by the filters argument, and a ReLU activation
The second hidden layer should be a BatchNormalization layer
The third hidden layer should be a Conv2D layer with the same number of filters as the input h0 to the model,
and a ReLU activation
The fourth hidden layer should be a BatchNormalization layer
The fifth hidden layer should be the sum of the fourth hidden layer output and the inputs h0. Call this layer h1
The sixth hidden layer should be a Conv2D layer with filters set by the filters argument, and a ReLU activation
The seventh hidden layer should be a BatchNormalization layer
The eighth hidden layer should be a Conv2D layer with the same number of filters as h1 (and h0), and a ReLU activation
The ninth hidden layer should be a BatchNormalization layer
The tenth hidden layer should be the sum of the ninth hidden layer output and h1
The eleventh hidden layer should be a Conv2D layer with the number of filters equal to
twice the number of channels of the model input, and a linear activation. Call this layer h2
The twelfth hidden layer should split h2 into two equal-sized Tensors along the final channel axis.
These two Tensors are the shift and log-scale Tensors, and should each have the same shape as the model input
The final layer should then apply the tanh nonlinearity to the log_scale Tensor.
The outputs to the model should then be the list of Tensors [shift, log_scale]
"""
h0 = Input(shape = input_shape)
h = Conv2D(filters = filters,activation = 'relu',kernel_size = (3,3),padding='SAME',kernel_regularizer=regularizers.l2(5e-5))(h0)
h = BatchNormalization()(h)
h = Conv2D(filters = input_shape[-1],activation = 'relu',kernel_size = (3,3),padding='SAME',kernel_regularizer=regularizers.l2(5e-5))(h)
h = BatchNormalization()(h)
# h1 = Add()([h, h0])
h1 = tf.add(h, h0)
h = Conv2D(filters = filters,activation = 'relu',kernel_size = (3,3),padding='SAME',kernel_regularizer=regularizers.l2(5e-5))(h1)
h = BatchNormalization()(h)
h = Conv2D(filters = input_shape[-1],activation = 'relu',kernel_size = (3,3),padding='SAME',kernel_regularizer=regularizers.l2(5e-5))(h)
h = BatchNormalization()(h)
# h = Add()([h1, h])
h = tf.add(h1, h)
h2 = Conv2D(filters = 2*input_shape[-1],activation = 'linear',kernel_size = (3,3),padding='SAME',kernel_regularizer=regularizers.l2(5e-5))(h)
shift, log_scale = tf.split(h2, num_or_size_splits=2, axis = -1)
log_scale = tf.math.tanh(log_scale)
outputs = [shift, log_scale]
model = tf.keras.Model(inputs=h0, outputs=outputs)
return model
```
```python
# Test your function and print the model summary
conv_resnet = get_conv_resnet((32, 32, 3), 32)
conv_resnet.summary()
```
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 32, 32, 3)] 0
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 32, 32, 32) 896 input_2[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 32, 32, 32) 128 conv2d_5[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 32, 32, 3) 867 batch_normalization_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 32, 32, 3) 12 conv2d_6[0][0]
__________________________________________________________________________________________________
tf_op_layer_Add_2 (TensorFlowOp [(None, 32, 32, 3)] 0 batch_normalization_5[0][0]
input_2[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 32, 32, 32) 896 tf_op_layer_Add_2[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 32, 32, 32) 128 conv2d_7[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 32, 32, 3) 867 batch_normalization_6[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 32, 32, 3) 12 conv2d_8[0][0]
__________________________________________________________________________________________________
tf_op_layer_Add_3 (TensorFlowOp [(None, 32, 32, 3)] 0 tf_op_layer_Add_2[0][0]
batch_normalization_7[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 32, 32, 6) 168 tf_op_layer_Add_3[0][0]
__________________________________________________________________________________________________
tf_op_layer_split_1 (TensorFlow [(None, 32, 32, 3), 0 conv2d_9[0][0]
__________________________________________________________________________________________________
tf_op_layer_Tanh_1 (TensorFlowO [(None, 32, 32, 3)] 0 tf_op_layer_split_1[0][1]
==================================================================================================
Total params: 3,974
Trainable params: 3,834
Non-trainable params: 140
__________________________________________________________________________________________________
You can also inspect your model architecture graphically by running the following cell. It should look something like the following:
```python
# Plot the model graph
tf.keras.utils.plot_model(conv_resnet, show_layer_names=False, rankdir='LR')
```
```python
# Check the output shapes are as expected
print(conv_resnet(tf.random.normal((1, 32, 32, 3)))[0].shape)
print(conv_resnet(tf.random.normal((1, 32, 32, 3)))[1].shape)
```
(1, 32, 32, 3)
(1, 32, 32, 3)
#### Binary masks
Now that you have a shift and log-scale model built, we will now implement the affine coupling layer. We will first need functions to create the binary masks $b$ as described above. The following function creates the spatial 'checkerboard' mask.
It takes a rank-2 `shape` as input, which correspond to the `height` and `width` dimensions, as well as an `orientation` argument (an integer equal to `0` or `1`) that determines which way round the zeros and ones are entered into the Tensor.
```python
# Function to create the checkerboard mask
def checkerboard_binary_mask(shape, orientation=0):
height, width = shape[0], shape[1]
height_range = tf.range(height)
width_range = tf.range(width)
height_odd_inx = tf.cast(tf.math.mod(height_range, 2), dtype=tf.bool)
width_odd_inx = tf.cast(tf.math.mod(width_range, 2), dtype=tf.bool)
odd_rows = tf.tile(tf.expand_dims(height_odd_inx, -1), [1, width])
odd_cols = tf.tile(tf.expand_dims(width_odd_inx, 0), [height, 1])
checkerboard_mask = tf.math.logical_xor(odd_rows, odd_cols)
if orientation == 1:
checkerboard_mask = tf.math.logical_not(checkerboard_mask)
return tf.cast(tf.expand_dims(checkerboard_mask, -1), tf.float32)
```
This function creates a rank-3 Tensor to mask the `height`, `width` and `channels` dimensions of the input. We can take a look at this checkerboard mask for some example inputs below. In order to make the Tensors easier to inspect, we will squeeze out the single channel dimension (which is always 1 for this mask).
```python
# Run the checkerboard_binary_mask function to see an example
# NB: we squeeze the shape for easier viewing. The full shape is (4, 4, 1)
tf.squeeze(checkerboard_binary_mask((4, 4), orientation=0))
```
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.]], dtype=float32)>
```python
# The `orientation` should be 0 or 1, and determines which way round the binary entries are
tf.squeeze(checkerboard_binary_mask((4, 4), orientation=1))
```
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
```python
checkerboard_binary_mask((4, 4), orientation=1)
```
<tf.Tensor: shape=(4, 4, 1), dtype=float32, numpy=
array([[[1.],
[0.],
[1.],
[0.]],
[[0.],
[1.],
[0.],
[1.]],
[[1.],
[0.],
[1.],
[0.]],
[[0.],
[1.],
[0.],
[1.]]], dtype=float32)>
You should now complete the following function to create a channel-wise mask. This function takes a single integer `num_channels` as an input, as well as an `orientation` argument, similar to above. You can assume that the `num_channels` integer is even.
The function should return a rank-3 Tensor with singleton entries for `height` and `width`. In the channel axis, the first `num_channels // 2` entries should be zero (for `orientation=0`) and the final `num_channels // 2` entries should be one (for `orientation=0`). The zeros and ones should be reversed for `orientation=1`. The `dtype` of the returned Tensor should be `tf.float32`.
```python
#### GRADED CELL ####
# Complete the following function.
# Make sure to not change the function name or arguments.
def channel_binary_mask(num_channels, orientation=0):
"""
This function takes an integer num_channels and orientation (0 or 1) as
arguments. It should create a channel-wise binary mask with
dtype=tf.float32, according to the above specification.
The function should then return the binary mask.
"""
# Should do tf.ones(num_channels-z.shape[0]) where z = tf.zeros(num_channels // 2) because
# num_channels may not always be EVEN!
z = tf.zeros(num_channels // 2)
o = tf.ones(num_channels - z.shape[0])
return tf.concat([z,o], axis = -1)[None, None, ...] if not orientation else tf.concat([o,z], axis = -1)[None, None,...]
```
```python
# Run your function to see an example channel-wise binary mask
channel_binary_mask(6, orientation=0)
channel_binary_mask(6, orientation=1)
channel_binary_mask(5, orientation=1)
```
<tf.Tensor: shape=(1, 1, 5), dtype=float32, numpy=array([[[1., 1., 1., 0., 0.]]], dtype=float32)>
$$
\begin{align}
\text{Forward pass:}\qquad y &= b\odot x + (1-b)\odot\left(x\odot\exp(s(b\odot x)) + t(b\odot x)\right),\tag{5}\\
\text{Inverse pass:}\qquad x &= b\odot y + (1-b)\odot\left(y - t(b\odot x)) \odot\exp( -s(b\odot x)\right).\tag{6}
\end{align}
$$
```python
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function names or arguments.
def forward(x, b, shift_and_log_scale_fn):
"""
This function takes the input Tensor x, binary mask b and callable
shift_and_log_scale_fn as arguments.
This function should implement the forward transformation in equation (5)
and return the output Tensor y, which will have the same shape as x
"""
shift, log_scale = shift_and_log_scale_fn(x * b)
return x * b + (1 - b) * (x * tf.exp(log_scale) + shift)
def inverse(y, b, shift_and_log_scale_fn):
"""
This function takes the input Tensor x, binary mask b and callable
shift_and_log_scale_fn as arguments.
This function should implement the forward transformation in equation (5)
and return the output Tensor y, which will have the same shape as x
"""
shift, log_scale = shift_and_log_scale_fn(y * b)
return y * b + (1 - b) * (y - shift) * tf.exp(-log_scale)
```
```python
x = tf.random.normal((16, 32, 32, 3))
print(x.shape[-1])
b = channel_binary_mask(x.shape[-1], 1)
print(b)
forward(x, b, conv_resnet).shape
```
3
tf.Tensor([[[1. 1. 0.]]], shape=(1, 1, 3), dtype=float32)
TensorShape([16, 32, 32, 3])
The new bijector class also requires the `log_det_jacobian` methods to be implemented. Recall that the log of the Jacobian determinant of the forward transformation is given by $\sum_{j}s(x_{1:d})_j$, where $s$ is the log-scale function of the affine coupling layer.
You should now complete the following functions to define the `forward_log_det_jacobian` and `inverse_log_det_jacobian` methods of the affine coupling layer bijector.
* Both functions `forward_log_det_jacobian` and `inverse_log_det_jacobian` takes an input Tensor `x` (or `y`), a rank-3 binary mask `b`, and the `shift_and_log_scale_fn` callable
* These arguments are the same as the description for the `forward` and `inverse` functions
* The `forward_log_det_jacobian` function should implement the log of the Jacobian determinant for the transformation $(5)$
* The `inverse_log_det_jacobian` function should implement the log of the Jacobian determinant for the transformation $(6)$
* Both functions should reduce sum over the last three axes of the input Tensor (`height`, `width` and `channels`)
```python
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function names or arguments.
def forward_log_det_jacobian(x, b, shift_and_log_scale_fn):
"""
This function takes the input Tensor x, binary mask b and callable
shift_and_log_scale_fn as arguments.
This function should compute and return the log of the Jacobian determinant
of the forward transformation in equation (5)
"""
shift, log_scale = shift_and_log_scale_fn(x * b)
return tf.reduce_sum(log_scale * (1-b), axis=[-1,-2,-3])
def inverse_log_det_jacobian(y, b, shift_and_log_scale_fn):
"""
This function takes the input Tensor y, binary mask b and callable
shift_and_log_scale_fn as arguments.
This function should compute and return the log of the Jacobian determinant
of the forward transformation in equation (6)
"""
shift, log_scale = shift_and_log_scale_fn(y * b)
return -tf.reduce_sum(log_scale * (1-b), axis=[-1,-2,-3])
```
You are now ready to create the coupling layer bijector, using bijector subclassing. You should complete the class below to define the `AffineCouplingLayer`.
* You should complete the initialiser `__init__`, and the internal class method `_get_mask`
* The `_forward`, `_inverse`, `_forward_log_det_jacobian` and `_inverse_log_det_jacobian` methods are completed for you using the functions you have written above. Do not modify these methods
* The initialiser takes the `shift_and_log_scale_fn` callable, `mask_type` string (either `"checkerboard"` or `"channel"`, `orientation` (integer, either `0` or `1`) as required arguments, and allows for extra keyword arguments
* The required arguments should be set as class attributes in the initialiser (note that the `shift_and_log_scale_fn` attribute is being used in the `_forward`, `_inverse`, `_forward_log_det_jacobian` and `_inverse_log_det_jacobian` methods)
* The initialiser should call the base class initialiser, and pass in any extra keyword arguments
* The class should have a required number of event dimensions equal to 3
* The internal method `_get_mask` takes a `shape` as an argument, which is the shape of an input Tensor
* This method should use the `checkerboard_binary_mask` and `channel_binary_mask` functions above, as well as the `mask_type` and `orientation` arguments passed to the initialiser to compute and return the required binary mask
* This method is used in each of the `_forward`, `_inverse`, `_forward_log_det_jacobian` and `_inverse_log_det_jacobian` methods
```python
#### GRADED CELL ####
# Complete the following class.
# Make sure to not change the class or method names or arguments.
class AffineCouplingLayer(tfb.Bijector):
"""
Class to implement the affine coupling layer.
Complete the __init__ and _get_mask methods according to the instructions above.
"""
def __init__(self, shift_and_log_scale_fn, mask_type, orientation, **kwargs):
"""
The class initialiser takes the shift_and_log_scale_fn callable, mask_type,
orientation and possibly extra keywords arguments. It should call the
base class initialiser, passing any extra keyword arguments along.
It should also set the required arguments as class attributes.
"""
super().__init__(**kwargs, forward_min_event_ndims = 3)
self.shift_and_log_scale_fn = shift_and_log_scale_fn
self.mask_type = mask_type
self.orientation = orientation
def _get_mask(self, shape):
"""
This internal method should use the binary mask functions above to compute
and return the binary mask, according to the arguments passed in to the
initialiser.
"""
if self.mask_type == 'checkerboard':
return checkerboard_binary_mask(shape[1:], self.orientation)
else:
return channel_binary_mask(shape[-1], self.orientation)
def _forward(self, x):
b = self._get_mask(x.shape)
return forward(x, b, self.shift_and_log_scale_fn)
def _inverse(self, y):
b = self._get_mask(y.shape)
return inverse(y, b, self.shift_and_log_scale_fn)
def _forward_log_det_jacobian(self, x):
b = self._get_mask(x.shape)
return forward_log_det_jacobian(x, b, self.shift_and_log_scale_fn)
def _inverse_log_det_jacobian(self, y):
b = self._get_mask(y.shape)
return inverse_log_det_jacobian(y, b, self.shift_and_log_scale_fn)
```
```python
# Test your function by creating an instance of the AffineCouplingLayer class
affine_coupling_layer = AffineCouplingLayer(conv_resnet, 'channel', orientation=1,
name='affine_coupling_layer')
```
```python
# The following should return a Tensor of the same shape as the input
affine_coupling_layer.forward(tf.random.normal((16, 32, 32, 3))).shape
```
TensorShape([16, 32, 32, 3])
```python
# The following should compute a log_det_jacobian for each event in the batch
affine_coupling_layer.forward_log_det_jacobian(tf.random.normal((16, 32, 32, 3)), event_ndims=3).shape
```
TensorShape([16])
#### Combining the affine coupling layers
In the affine coupling layer, part of the input remains unchanged in the transformation $(5)$. In order to allow transformation of all of the input, several coupling layers are composed, with the orientation of the mask being reversed in subsequent layers.
<center>Figure 2. RealNVP alternates the orientation of masks from one affine coupling layer to the next. From the original paper.</center>
Our model design will be similar to the original architecture; we will compose three affine coupling layers with checkerboard masking, followed by a batch normalization bijector (`tfb.BatchNormalization` is a built-in bijector), followed by a squeezing operation, followed by three more affine coupling layers with channel-wise masking and a final batch normalization bijector.
The squeezing operation divides the spatial dimensions into 2x2 squares, and reshapes a Tensor of shape `(H, W, C)` into a Tensor of shape `(H // 2, W // 2, 4 * C)` as shown in Figure 1.
The squeezing operation is also a bijective operation, and has been provided for you in the class below.
```python
# Bijector class for the squeezing operation
class Squeeze(tfb.Bijector):
def __init__(self, name='Squeeze', **kwargs):
super(Squeeze, self).__init__(forward_min_event_ndims=3, is_constant_jacobian=True,
name=name, **kwargs)
def _forward(self, x):
input_shape = x.shape
height, width, channels = input_shape[-3:]
y = tfb.Reshape((height // 2, 2, width // 2, 2, channels), event_shape_in=(height, width, channels))(x)
y = tfb.Transpose(perm=[0, 2, 1, 3, 4])(y)
y = tfb.Reshape((height // 2, width // 2, 4 * channels),
event_shape_in=(height // 2, width // 2, 2, 2, channels))(y)
return y
def _inverse(self, y):
input_shape = y.shape
height, width, channels = input_shape[-3:]
x = tfb.Reshape((height, width, 2, 2, channels // 4), event_shape_in=(height, width, channels))(y)
x = tfb.Transpose(perm=[0, 2, 1, 3, 4])(x)
x = tfb.Reshape((2 * height, 2 * width, channels // 4),
event_shape_in=(height, 2, width, 2, channels // 4))(x)
return x
def _forward_log_det_jacobian(self, x):
return tf.constant(0., x.dtype)
def _inverse_log_det_jacobian(self, y):
return tf.constant(0., y.dtype)
def _forward_event_shape_tensor(self, input_shape):
height, width, channels = input_shape[-3], input_shape[-2], input_shape[-1]
return height // 2, width // 2, 4 * channels
def _inverse_event_shape_tensor(self, output_shape):
height, width, channels = output_shape[-3], output_shape[-2], output_shape[-1]
return height * 2, width * 2, channels // 4
```
You can see the effect of the squeezing operation on some example inputs in the cells below. In the forward transformation, each spatial dimension is halved, whilst the channel dimension is multiplied by 4. The opposite happens in the inverse transformation.
```python
# Test the Squeeze bijector
squeeze = Squeeze()
squeeze(tf.ones((10, 32, 32, 3))).shape
```
TensorShape([10, 16, 16, 12])
```python
# Test the inverse operation
squeeze.inverse(tf.ones((10, 4, 4, 96))).shape
```
TensorShape([10, 8, 8, 24])
We can now construct a block of coupling layers according to the architecture described above. You should complete the following function to chain together the bijectors that we have constructed, to form a bijector that performs the following operations in the forward transformation:
* Three `AffineCouplingLayer` bijectors with `"checkerboard"` masking with orientations `0, 1, 0` respectively
* A `BatchNormalization` bijector
* A `Squeeze` bijector
* Three more `AffineCouplingLayer` bijectors with `"channel"` masking with orientations `0, 1, 0` respectively
* Another `BatchNormalization` bijector
The function takes the following arguments:
* `shift_and_log_scale_fns`: a list or tuple of six conv_resnet models
* The first three models in this list are used in the three coupling layers with checkerboard masking
* The last three models in this list are used in the three coupling layers with channel masking
* `squeeze`: an instance of the `Squeeze` bijector
_NB: at this point, we would like to point out that we are following the exposition in the original paper, and think of the forward transformation as acting on the input image. Note that this is in contrast to the convention of using the forward transformation for sampling, and the inverse transformation for computing log probs._
```python
#### GRADED CELL ####
# Complete the following function.
# Make sure to not change the function name or arguments.
def realnvp_block(shift_and_log_scale_fns, squeeze):
"""
This function takes a list or tuple of six conv_resnet models, and an
instance of the Squeeze bijector.
The function should construct the chain of bijectors described above,
using the conv_resnet models in the coupling layers.
The function should then return the chained bijector.
"""
# The question actually wants to stack the AffineCouplingLayer and Squeeze bijectors together
# The inputs are the convet and squeeze functions which have NOT been converted to bijectors
len_of_list = len(shift_and_log_scale_fns)
middle = len_of_list // 2
zero_one_zero = [0,1,0]
lst1, lst2 = [], []
for i,orientation in zip(range(middle),zero_one_zero):
lst1.append(AffineCouplingLayer(shift_and_log_scale_fns[i],'checkerboard',
orientation = orientation))
lst1.append(squeeze)
lst1.append(tfb.BatchNormalization())
for i,orientation in zip(range(middle,len_of_list),zero_one_zero):
lst2.append(AffineCouplingLayer(shift_and_log_scale_fns[i],'checkerboard',
orientation = orientation))
lst = lst1+lst2
lst.append(tfb.BatchNormalization())
return tfb.Chain(lst[::-1])
```
```python
# Run your function to create an instance of the bijector
checkerboard_fns = []
for _ in range(3):
checkerboard_fns.append(get_conv_resnet((32, 32, 3), 512))
channel_fns = []
for _ in range(3):
channel_fns.append(get_conv_resnet((16, 16, 12), 512))
block = realnvp_block(checkerboard_fns + channel_fns, squeeze)
```
```python
# Test the bijector on a dummy input
block.forward(tf.random.normal((10, 32, 32, 3))).shape
```
TensorShape([10, 16, 16, 12])
#### Multiscale architecture
The final component of the RealNVP is the multiscale architecture. The squeeze operation reduces the spatial dimensions but increases the channel dimensions. After one of the blocks of coupling-squeeze-coupling that you have implemented above, half of the dimensions are factored out as latent variables, while the other half is further processed through subsequent layers. This results in latent variables that represent different scales of features in the model.
<center>Figure 3. RealNVP creates latent variables at different scales by factoring out half of the dimensions at each scale. From the original paper.</center>
The final scale does not use the squeezing operation, and instead applies four affine coupling layers with alternating checkerboard masks.
The multiscale architecture for two latent variable scales is implemented for you in the following bijector.
```python
# Bijector to implement the multiscale architecture
class RealNVPMultiScale(tfb.Bijector):
def __init__(self, **kwargs):
super(RealNVPMultiScale, self).__init__(forward_min_event_ndims=3, **kwargs)
# First level
shape1 = (32, 32, 3) # Input shape
shape2 = (16, 16, 12) # Shape after the squeeze operation
shape3 = (16, 16, 6) # Shape after factoring out the latent variable
self.conv_resnet1 = get_conv_resnet(shape1, 64)
self.conv_resnet2 = get_conv_resnet(shape1, 64)
self.conv_resnet3 = get_conv_resnet(shape1, 64)
self.conv_resnet4 = get_conv_resnet(shape2, 128)
self.conv_resnet5 = get_conv_resnet(shape2, 128)
self.conv_resnet6 = get_conv_resnet(shape2, 128)
self.squeeze = Squeeze()
self.block1 = realnvp_block([self.conv_resnet1, self.conv_resnet2,
self.conv_resnet3, self.conv_resnet4,
self.conv_resnet5, self.conv_resnet6], self.squeeze)
# Second level
self.conv_resnet7 = get_conv_resnet(shape3, 128)
self.conv_resnet8 = get_conv_resnet(shape3, 128)
self.conv_resnet9 = get_conv_resnet(shape3, 128)
self.conv_resnet10 = get_conv_resnet(shape3, 128)
self.coupling_layer1 = AffineCouplingLayer(self.conv_resnet7, 'checkerboard', 0)
self.coupling_layer2 = AffineCouplingLayer(self.conv_resnet8, 'checkerboard', 1)
self.coupling_layer3 = AffineCouplingLayer(self.conv_resnet9, 'checkerboard', 0)
self.coupling_layer4 = AffineCouplingLayer(self.conv_resnet10, 'checkerboard', 1)
self.block2 = tfb.Chain([self.coupling_layer4, self.coupling_layer3,
self.coupling_layer2, self.coupling_layer1])
def _forward(self, x):
h1 = self.block1.forward(x)
z1, h2 = tf.split(h1, 2, axis=-1)
z2 = self.block2.forward(h2)
return tf.concat([z1, z2], axis=-1)
def _inverse(self, y):
z1, z2 = tf.split(y, 2, axis=-1)
h2 = self.block2.inverse(z2)
h1 = tf.concat([z1, h2], axis=-1)
return self.block1.inverse(h1)
def _forward_log_det_jacobian(self, x):
log_det1 = self.block1.forward_log_det_jacobian(x, event_ndims=3)
h1 = self.block1.forward(x)
_, h2 = tf.split(h1, 2, axis=-1)
log_det2 = self.block2.forward_log_det_jacobian(h2, event_ndims=3)
return log_det1 + log_det2
def _inverse_log_det_jacobian(self, y):
z1, z2 = tf.split(y, 2, axis=-1)
h2 = self.block2.inverse(z2)
log_det2 = self.block2.inverse_log_det_jacobian(z2, event_ndims=3)
h1 = tf.concat([z1, h2], axis=-1)
log_det1 = self.block1.inverse_log_det_jacobian(h1, event_ndims=3)
return log_det1 + log_det2
def _forward_event_shape_tensor(self, input_shape):
height, width, channels = input_shape[-3], input_shape[-2], input_shape[-1]
return height // 4, width // 4, 16 * channels
def _inverse_event_shape_tensor(self, output_shape):
height, width, channels = output_shape[-3], output_shape[-2], output_shape[-1]
return 4 * height, 4 * width, channels // 16
```
```python
# Create an instance of the multiscale architecture
multiscale_bijector = RealNVPMultiScale()
```
#### Data preprocessing bijector
We will also preprocess the image data before sending it through the RealNVP model. To do this, for a Tensor $x$ of pixel values in $[0, 1]^D$, we transform $x$ according to the following:
$$
T(x) = \text{logit}\left(\alpha + (1 - 2\alpha)x\right),\tag{7}
$$
where $\alpha$ is a parameter, and the logit function is the inverse of the sigmoid function, and is given by
$$
\text{logit}(p) = \log (p) - \log (1 - p).
$$
You should now complete the following function to construct this bijector from in-built bijectors from the bijectors module.
* The function takes the parameter `alpha` as an input, which you can assume to take a small positive value ($\ll0.5$)
* The function should construct and return a bijector that computes $(7)$ in the forward pass
```python
#### GRADED CELL ####
# Complete the following function.
# Make sure to not change the function name or arguments.
def get_preprocess_bijector(alpha):
"""
This function should create a chained bijector that computes the
transformation T in equation (7) above.
This can be computed using in-built bijectors from the bijectors module.
Your function should then return the chained bijector.
"""
return tfb.Chain([tfb.Invert(tfb.Sigmoid()),
tfb.Shift(shift=alpha),
tfb.Scale(scale=(1-2*alpha))
])
```
```python
# Create an instance of the preprocess bijector
preprocess = get_preprocess_bijector(0.05)
```
#### Train the RealNVP model
Finally, we will use our RealNVP model to train
We will use the following model class to help with the training process.
```python
# Helper class for training
class RealNVPModel(Model):
def __init__(self, **kwargs):
super(RealNVPModel, self).__init__(**kwargs)
self.preprocess = get_preprocess_bijector(0.05)
self.realnvp_multiscale = RealNVPMultiScale()
self.bijector = tfb.Chain([self.realnvp_multiscale, self.preprocess])
def build(self, input_shape):
output_shape = self.bijector(tf.expand_dims(tf.zeros(input_shape[1:]), axis=0)).shape
self.base = tfd.Independent(tfd.Normal(loc=tf.zeros(output_shape[1:]), scale=1.),
reinterpreted_batch_ndims=3)
self._bijector_variables = (
list(self.bijector.variables))
self.flow = tfd.TransformedDistribution(
distribution=self.base,
bijector=tfb.Invert(self.bijector),
)
super(RealNVPModel, self).build(input_shape)
def call(self, inputs, training=None, **kwargs):
return self.flow
def sample(self, batch_size):
sample = self.base.sample(batch_size)
return self.bijector.inverse(sample)
```
```python
# Create an instance of the RealNVPModel class
realnvp_model = RealNVPModel()
realnvp_model.build((1, 32, 32, 3))
```
```python
# Compute the number of variables in the model
print("Total trainable variables:")
print(sum([np.prod(v.shape) for v in realnvp_model.trainable_variables]))
```
Total trainable variables:
315198
Note that the model's `call` method returns the `TransformedDistribution` object. Also, we have set up our datasets to return the input image twice as a 2-tuple. This is so we can train our model with negative log-likelihood as normal.
```python
# Define the negative log-likelihood loss function
def nll(y_true, y_pred):
return -y_pred.log_prob(y_true)
```
It is recommended to use the GPU accelerator hardware on Colab to train this model, as it can take some time to train. Note that it is not required to train the model in order to pass this assignment. For optimal results, a larger model should be trained for longer.
```python
# Compile and train the model
realnvp_model.compile(loss=nll, optimizer=Adam())
realnvp_model.fit(train_ds, validation_data=val_ds, epochs=20)
```
```python
# Evaluate the model
realnvp_model.evaluate(test_ds)
```
#### Generate some samples
```python
# Sample from the model
samples = realnvp_model.sample(8).numpy()
```
```python
# Display the samples
n_img = 8
f, axs = plt.subplots(2, n_img // 2, figsize=(14, 7))
for k, image in enumerate(samples):
i = k % 2
j = k // 2
axs[i, j].imshow(np.clip(image, 0., 1.))
axs[i, j].axis('off')
f.subplots_adjust(wspace=0.01, hspace=0.03)
```
Congratulations on completing this programming assignment! In the next week of the course we will look at the variational autoencoder.
|
lemma order_root: "poly p a = 0 \<longleftrightarrow> p = 0 \<or> order a p \<noteq> 0" (is "?lhs = ?rhs") |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj17synthconj1 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (mult (plus (mult lv0 lv1) lv1) lv2) (mult (mult lv1 lv2) (Succ lv0))).
Admitted.
QuickChick conj17synthconj1.
|
Require Import List.
Require Import FSets.
Require FSetAVL.
Require Peano_dec.
Require Import Setoid.
Require Import OptionMonad.
Require Import OptionExt.
Require Import BoolExt.
Require Import ILLInterfaces.
Require Import ResourceAlgebra.
Require Import Arith.
Require Import Omega.
Require Import BasicMachineTypes.
Module MkILLSyntax
(B : BASICS)
(BASESYN : ILL_BASE_SYNTAX B)
: ILL_SYNTAX B BASESYN.
Import BASESYN.
Definition context := list formula.
Inductive prf_term : Set :=
| t_lvar : nat -> prf_term
| t_ivar : nat -> prf_term
| t_i_intro : prf_term
| t_tensor_intro : prf_term -> prf_term -> prf_term
| t_tensor_elim : prf_term -> prf_term -> prf_term
| t_and_intro : prf_term -> prf_term -> prf_term
| t_and_elim1 : prf_term -> prf_term
| t_and_elim2 : prf_term -> prf_term
| t_lolli_intro : formula -> prf_term -> prf_term
| t_lolli_elim : prf_term -> prf_term -> prf_term
| t_bang_intro : prf_term -> prf_term
| t_bang_elim : prf_term -> prf_term -> prf_term
| t_axiom : axiom_name -> prf_term -> prf_term
| t_let : prf_term -> prf_term -> prf_term.
Fixpoint l_shift (d c:nat) (t:prf_term) {struct t} : prf_term :=
match t with
| t_lvar k => if le_lt_dec c k then t_lvar (d + k) else t_lvar k
| t_ivar k => t_ivar k
| t_i_intro => t_i_intro
| t_tensor_intro t1 t2 => t_tensor_intro (l_shift d c t1) (l_shift d c t2)
| t_tensor_elim t1 t2 => t_tensor_elim (l_shift d c t1) (l_shift d (S (S c)) t2)
| t_and_intro t1 t2 => t_and_intro (l_shift d c t1) (l_shift d c t2)
| t_and_elim1 t => t_and_elim1 (l_shift d c t)
| t_and_elim2 t => t_and_elim2 (l_shift d c t)
| t_lolli_intro A t => t_lolli_intro A (l_shift d (S c) t)
| t_lolli_elim t1 t2 => t_lolli_elim (l_shift d c t1) (l_shift d c t2)
| t_bang_intro t => t_bang_intro (l_shift d c t)
| t_bang_elim t1 t2 => t_bang_elim (l_shift d c t1) (l_shift d c t2)
| t_axiom nm t => t_axiom nm (l_shift d c t)
| t_let t1 t2 => t_let (l_shift d c t1) (l_shift d (S c) t2)
end.
Definition var_subst : nat -> nat -> prf_term -> prf_term.
intros k j t'.
destruct (lt_eq_lt_dec k j) as [[k_lt_j | k_eq_j] | j_lt_k].
exact (t_lvar k).
exact t'.
destruct (O_or_S k) as [[m k_eq_Sm] | k_eq_0].
exact (t_lvar m).
subst k. elimtype False. apply (lt_n_O j j_lt_k).
Defined.
Fixpoint l_subst (t:prf_term) (s:prf_term) (j:nat) {struct t} : prf_term :=
match t with
| t_lvar k => var_subst k j s
| t_ivar k => t_ivar k
| t_i_intro => t_i_intro
| t_tensor_intro t1 t2 => t_tensor_intro (l_subst t1 s j) (l_subst t2 s j)
| t_tensor_elim t1 t2 => t_tensor_elim (l_subst t1 s j) (l_subst t2 (l_shift 2 0 s) (S (S j)))
| t_and_intro t1 t2 => t_and_intro (l_subst t1 s j) (l_subst t2 s j)
| t_and_elim1 t => t_and_elim1 (l_subst t s j)
| t_and_elim2 t => t_and_elim2 (l_subst t s j)
| t_lolli_intro A t => t_lolli_intro A (l_subst t (l_shift 1 0 s) (S j))
| t_lolli_elim t1 t2 => t_lolli_elim (l_subst t1 s j) (l_subst t2 s j)
| t_bang_intro t => t_bang_intro (l_subst t s j)
| t_bang_elim t1 t2 => t_bang_elim (l_subst t1 s j) (l_subst t2 s j)
| t_axiom nm t => t_axiom nm (l_subst t s j)
| t_let t1 t2 => t_let (l_subst t1 s j) (l_subst t2 (l_shift 1 0 s) (S j))
end.
Module VarSet <: FSetInterface.Sfun Nat_as_OT := FSetAVL.Make Nat_as_OT.
(*Module VarSet : FSetInterface.S with Module E := Nat_as_OT := FSetAVL.Make Nat_as_OT.*)
Definition shift_step n s := match n with O => s | S n => VarSet.add n s end.
Definition shift_down s := VarSet.fold shift_step s VarSet.empty.
Definition shift2_step d c n s := if le_lt_dec c n then VarSet.add (d+n) s else VarSet.add n s.
Definition shift_varset d c s := VarSet.fold (shift2_step d c) s VarSet.empty.
Inductive prf : context -> context -> prf_term -> formula -> VarSet.t -> Prop :=
| prf_ivar : forall Gi G n A U,
nth_error Gi n = Some A ->
VarSet.Equal U VarSet.empty ->
prf Gi G (t_ivar n) A U
| prf_lvar : forall Gi G n A U,
nth_error G n = Some A ->
VarSet.Equal U (VarSet.singleton n) ->
prf Gi G (t_lvar n) A U
| prf_i_intro : forall Gi G U,
VarSet.Equal U VarSet.empty ->
prf Gi G t_i_intro f_i U
| prf_tensor_intro : forall Gi G t1 t2 A B U1 U2 U,
prf Gi G t1 A U1 ->
prf Gi G t2 B U2 ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.Equal U (VarSet.union U1 U2) ->
prf Gi G (t_tensor_intro t1 t2) (f_tensor A B) U
| prf_tensor_elim : forall Gi G t1 t2 A B C U1 U2 U,
prf Gi G t1 (f_tensor A B) U1 ->
prf Gi (A::B::G) t2 C U2 ->
VarSet.Empty (VarSet.inter U1 (shift_down (shift_down U2))) ->
VarSet.Equal U (VarSet.union U1 (shift_down (shift_down U2))) ->
prf Gi G (t_tensor_elim t1 t2) C U
| prf_and_intro : forall Gi G t1 t2 A B U1 U2 U,
prf Gi G t1 A U1->
prf Gi G t2 B U2 ->
VarSet.Equal U (VarSet.union U1 U2) ->
prf Gi G (t_and_intro t1 t2) (f_and A B) U
| prf_and_elim1 : forall Gi G t A B U,
prf Gi G t (f_and A B) U ->
prf Gi G (t_and_elim1 t) A U
| prf_and_elim2 : forall Gi G t A B U,
prf Gi G t (f_and A B) U ->
prf Gi G (t_and_elim2 t) B U
| prf_lolli_intro : forall Gi G t A B Ut U,
prf Gi (A::G) t B Ut ->
VarSet.Equal U (shift_down Ut) ->
prf Gi G (t_lolli_intro A t) (f_lolli A B) U
| prf_lolli_elim : forall Gi G t1 t2 A B U1 U2 U,
prf Gi G t1 (f_lolli A B) U1 ->
prf Gi G t2 A U2 ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.Equal U (VarSet.union U1 U2) ->
prf Gi G (t_lolli_elim t1 t2) B U
| prf_bang_intro : forall Gi G t A U,
prf Gi G t A U ->
VarSet.Empty U ->
prf Gi G (t_bang_intro t) (f_bang A) U
| prf_bang_elim : forall Gi G t1 t2 A B U1 U2 U,
prf Gi G t1 (f_bang A) U1 ->
prf (A::Gi) G t2 B U2 ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.Equal U (VarSet.union U1 U2) ->
prf Gi G (t_bang_elim t1 t2) B U
| prf_axiom : forall Gi G t ax U,
prf Gi G t (axiom_domain ax) U ->
prf Gi G (t_axiom ax t) (axiom_codomain ax) U
| prf_let : forall Gi G t1 t2 A B U1 Ut2 U2 U,
prf Gi G t1 A U1 ->
prf Gi (A::G) t2 B Ut2 ->
VarSet.Equal U2 (shift_down Ut2) ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.Equal U (VarSet.union U1 U2) ->
prf Gi G (t_let t1 t2) B U.
Module VarSetProps := Properties VarSet.
Lemma varset_setoid : Setoid_Theory VarSet.t VarSet.Equal.
Proof.
constructor. unfold Reflexive. reflexivity. unfold Symmetric. symmetry. assumption. intros. unfold Transitive. apply VarSet.eq_trans.
Qed.
Lemma prf_mor_aux : forall Gi G t A U1 U2, VarSet.Equal U1 U2 -> prf Gi G t A U1 -> prf Gi G t A U2.
intros.
generalize U2 H. clear U2 H.
induction H0; intros U2' U2'_eq_U; econstructor; eauto; rewrite <- U2'_eq_U; assumption.
Save.
Add Morphism prf with signature (@eq context) ==> (@eq context) ==> (@eq prf_term) ==> (@eq formula) ==> VarSet.Equal ==> iff as prf_mor.
intuition eauto using prf_mor_aux, VarSetProps.equal_sym.
Save.
Add Morphism shift_step with signature (@eq nat) ==> VarSet.Equal ==> VarSet.Equal as shift_step_mor.
intros. unfold shift_step. destruct y.
assumption.
rewrite H. reflexivity.
Save.
Add Morphism shift2_step with signature (@eq nat) ==> (@eq nat) ==> (@eq nat) ==> VarSet.Equal ==> VarSet.Equal as shift2_step_mor.
unfold shift2_step. intros. destruct (le_lt_dec y0 y1); rewrite H; reflexivity.
Save.
Add Morphism shift_varset with signature (@eq nat) ==> (@eq nat) ==> VarSet.Equal ==> VarSet.Equal as shift_varset_mor.
unfold shift_varset. intros. refine (VarSetProps.fold_equal varset_setoid _ _ _ H).
unfold compat_op. apply shift2_step_mor_Proper; auto.
unfold transpose. intros. unfold shift2_step.
destruct (le_lt_dec y0 x0); destruct (le_lt_dec y0 y2); rewrite VarSetProps.add_add; reflexivity.
Save.
Add Morphism shift_down with signature VarSet.Equal ==> VarSet.Equal as shift_down_mor.
unfold shift_down. intros. refine (VarSetProps.fold_equal varset_setoid _ _ _ H).
unfold compat_op. apply shift_step_mor_Proper.
unfold transpose. intros. unfold shift_step.
destruct x0; destruct y0; try reflexivity; eauto using VarSetProps.add_add.
Save.
Lemma list_empty : forall l,
(forall x, ~InA (@eq nat) x l) -> l = nil.
destruct l; intros.
reflexivity.
elimtype False. apply (H n). constructor. reflexivity.
Save.
Lemma elements_empty : VarSet.elements VarSet.empty = nil.
apply list_empty. intros. unfold not. intro. apply (VarSet.empty_1 (VarSet.elements_2 H)).
Save.
Lemma shift_varset_empty : forall d c,
VarSet.Equal (shift_varset d c VarSet.empty) VarSet.empty.
intros. unfold shift_varset. rewrite VarSet.fold_1. rewrite elements_empty. reflexivity.
Save.
Lemma list_single : forall l y,
(forall x, InA (@eq nat) x l <-> x=y) ->
sort Nat_as_OT.lt l ->
l = y::nil.
destruct l; intros.
assert (InA (@eq nat) y nil). rewrite H. reflexivity. inversion H1.
assert (n = y). rewrite <- H. constructor. reflexivity.
subst. destruct l.
reflexivity.
assert (n = y). rewrite <- H. right. constructor. reflexivity.
subst. inversion H0. inversion H4. elimtype False. refine (Nat_as_OT.lt_not_eq H6 _). unfold Nat_as_OT.eq. reflexivity.
Save.
Lemma elements_singleton : forall n, VarSet.elements (VarSet.singleton n) = n::nil.
intros. apply list_single.
intros. split; intro.
symmetry. eapply VarSet.singleton_1. apply VarSet.elements_2. assumption.
apply VarSet.elements_1. apply VarSet.singleton_2. unfold VarSet.E.eq. symmetry. assumption.
apply VarSet.elements_3.
Save.
Lemma shift_varset_singleton : forall d c n,
c <= n ->
VarSet.Equal (shift_varset d c (VarSet.singleton n)) (VarSet.singleton (d + n)).
intros. unfold shift_varset. rewrite VarSet.fold_1. rewrite elements_singleton. simpl. unfold shift2_step.
destruct (le_lt_dec c n).
auto with set.
elimtype False. omega.
Save.
Lemma shift_varset_singleton2 : forall d c n,
n < c ->
VarSet.Equal (shift_varset d c (VarSet.singleton n)) (VarSet.singleton n).
intros. unfold shift_varset. rewrite VarSet.fold_1. rewrite elements_singleton. simpl. unfold shift2_step.
destruct (le_lt_dec c n).
elimtype False. omega.
auto with set.
Save.
Lemma lookup_length : forall (A:Set) (G G'':list A) a, nth_error (G'' ++ a :: G) (length G'') = Some a.
induction G''; intros; simpl; intuition eauto.
Save.
Lemma lookup_length2 : forall (A:Set) (G G'':list A) a n, nth_error (G'' ++ a :: G) (length G'' + S n) = nth_error G n.
induction G''; intros; simpl; auto.
Save.
Lemma lookup_length3 : forall (A:Set) (G:list A) n a, nth_error G n = Some a -> n < length G.
induction G; intros. destruct n; simpl; discriminate.
destruct n; simpl in *; auto with arith.
apply lt_n_S. eapply IHG; eauto.
Qed.
Lemma lookup_suffix : forall (A:Set) (G G' G'':list A) n a,
length G' <= n ->
nth_error (G' ++ G) n = Some a ->
nth_error (G' ++ G'' ++ G) (length G'' + n) = Some a.
intros A G G' G'' n a. generalize G'. clear G'. induction n; intros; destruct G'.
destruct G.
discriminate.
inversion H0. simpl. replace (length G''+0) with (length G'') by omega. apply lookup_length.
simpl in H. inversion H.
destruct G.
discriminate.
simpl in *. rewrite lookup_length2. assumption.
replace (length G'' + S n) with (S (length G'' + n)) by omega. simpl in *. apply IHn.
omega. assumption.
Save.
Lemma lookup_prefix : forall (A:Set) (G G'' G':list A) n a,
n < length G' ->
nth_error (G' ++ G) n = Some a ->
nth_error (G' ++ G'' ++ G) n = Some a.
intros A G G'' G' n a. generalize G'. clear G'. induction n; intros; (destruct G'; [inversion H|]).
trivial.
simpl in *. apply IHn. omega. assumption.
Save.
Lemma add_not_empty : forall x s, ~VarSet.Empty (VarSet.add x s).
unfold VarSet.Empty. unfold not. intros.
eapply H. apply VarSet.add_1. reflexivity.
Save.
(* Hmm... the hintsdb for set seems to be screwed up with 8.3. *)
Hint Resolve VarSet.add_1 VarSet.add_2 VarSet.add_3 : set.
Lemma add_equal : forall x s s', VarSetProps.Add x s s' -> VarSet.Equal s' (VarSet.add x s).
intros. unfold VarSetProps.Add in H. unfold VarSet.Equal. intro. split; intro.
rewrite H in H0. intuition eauto with set.
rewrite H. destruct (eq_nat_dec x a); [left|right]; eauto with set.
Save.
Lemma shift2_step_twice : forall d c x U, VarSet.Equal (shift2_step d c x (shift2_step d c x U)) (shift2_step d c x U).
unfold shift2_step. intros.
destruct (le_lt_dec c x); apply VarSetProps.add_equal; apply VarSet.add_1; reflexivity.
Save.
Lemma shift2_step_transpose : forall d c x y U, VarSet.Equal (shift2_step d c x (shift2_step d c y U)) (shift2_step d c y (shift2_step d c x U)).
unfold shift2_step. intros.
destruct (le_lt_dec c x); destruct (le_lt_dec c y); rewrite VarSetProps.add_add; reflexivity.
Save.
(* FIXME: need to redo SetoidList.fold_right_add for when f is idemopotent, like shift2_step is *)
(*
Lemma fold_right_add_2 : forall d c i s' s x,
NoDup s ->
NoDup s' ->
(forall y, InA (@eq nat) y s' <-> x = y \/ InA (@eq nat) y s) ->
VarSet.Equal (fold_right (shift2_step d c) i s') (shift2_step d c x (fold_right (shift2_step d c) i s)).
induction s'.
(* base case *)
intros. assert (InA (@eq nat) x nil). rewrite H1. auto. inversion H2.
(* step case *)
simpl. intros.
destruct (eq_nat_dec x a).
subst. rewrite (IHs' s a).
apply H.
inversion H0. apply H5.
intro. split; intro.
rewrite <- H1. right. apply H2.
destruct H2. subst.
apply H1.
simpl.
apply IH. reflexivity.
Lemma shift_varset_add : forall d c s x,
VarSet.Equal (shift_varset d c (VarSet.add x s)) (shift2_step d c x (shift_varset d c s)).
intros.
elim s using VarSetProps.set_induction; intros.
(* base *)
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty.
rewrite <- VarSetProps.singleton_equal_add. unfold shift2_step. destruct (le_lt_dec c x).
rewrite shift_varset_singleton; auto using VarSetProps.singleton_equal_add.
rewrite shift_varset_singleton2; auto using VarSetProps.singleton_equal_add.
(* step *)
assert (VarSet.Equal s' (VarSet.add x0 s0)). auto using add_equal.
rewrite H2. destruct (eq_nat_dec x x0).
subst. setoid_replace (VarSet.add x0 (VarSet.add x0 s0)) with (VarSet.add x0 s0).
rewrite H. rewrite shift2_step_twice. reflexivity.
apply VarSetProps.add_equal. apply VarSet.add_1. reflexivity.
rewrite <- H2.
destruct (VarSetProps.fold_0 (VarSet.add x s) VarSet.empty (shift2_step d c)) as [l [l_nodup [l_eq_xs l_fold_eq]]].
destruct (VarSetProps.fold_0 s VarSet.empty (shift2_step d c)) as [l2 [l2_nodup [l2_eq_xs l2_fold_eq]]].
rewrite l_fold_eq. rewrite l2_fold_eq. clear l_fold_eq l2_fold_eq.
setoid_replace (shift2_step d c x (fold_right (shift2_step d c) VarSet.empty l2))
with (fold_right (shift2_step d c) VarSet.empty (x::l2)).
generalize
simpl. reflexivity.
*)
Lemma In_shift_varset : forall n n' s c d,
VarSet.In n s -> n'=(if le_lt_dec c n then d+n else n) -> VarSet.In n' (shift_varset d c s).
Proof.
intros. subst n'. unfold shift_varset. apply VarSet.elements_1 in H. rewrite VarSet.fold_1. set (l:=VarSet.elements s) in *. generalize VarSet.empty. clearbody l. induction l.
inversion H.
inversion H; subst.
intros. simpl. set (r:=shift2_step d c a t) in *. assert (VarSet.In (if le_lt_dec c a then d + a else a) r).
unfold r. unfold shift2_step. destruct (le_lt_dec c a); apply VarSet.add_1; reflexivity.
clearbody r. generalize r H0. clear. induction l; intros.
simpl. assumption.
simpl. apply IHl. unfold shift2_step. destruct (le_lt_dec c a0); apply VarSet.add_2; assumption.
intros. simpl. apply IHl. assumption.
Qed.
Lemma shift_varset_In : forall n s c d,
VarSet.In n (shift_varset d c s) -> VarSet.In (if le_lt_dec c n then n-d else n) s.
Proof.
intros. unfold shift_varset in H. apply VarSet.elements_2. rewrite VarSet.fold_1 in H. set (l:=VarSet.elements s) in *. clearbody l. set (r:=VarSet.empty) in *. assert (~VarSet.In n r). apply VarSet.empty_1. clearbody r. generalize r H H0. clear. induction l; intros.
simpl in H. contradiction.
compare (if le_lt_dec c n then n - d else n) a; intros.
subst a. left. reflexivity.
right. eapply IHl. simpl in H. eassumption.
unfold shift2_step. destruct (le_lt_dec c n); destruct (le_lt_dec c a);
intro bad; destruct H0; (eapply VarSet.add_3; [|eassumption]); unfold VarSet.E.eq; omega.
Qed.
Lemma shift_varset_image : forall n s c d,
VarSet.In n (shift_varset d c s) -> n < c \/ n >= c+d.
Proof.
intros. unfold shift_varset in H. rewrite VarSet.fold_1 in H. set (l:=VarSet.elements s) in *. clearbody l. set (r:=VarSet.empty) in *. assert (forall n', VarSet.In n' r -> n'<c \/ n'>=c+d).
intros. destruct (VarSet.empty_1 H0).
clearbody r. generalize r H H0. clear. induction l; intros.
simpl in H. auto.
eapply IHl.
simpl in H. apply H.
unfold shift2_step. destruct (le_lt_dec c a); intros.
compare n' (d+a); intros. omega.
apply VarSet.add_3 in H1. auto. unfold VarSet.E.eq. auto.
compare n' a; intros. omega.
apply VarSet.add_3 in H1. auto. unfold VarSet.E.eq. auto.
Qed.
Lemma shift_varset_add : forall d c x s,
~VarSet.In x s ->
VarSet.Equal (shift_varset d c (VarSet.add x s)) (shift2_step d c x (shift_varset d c s)).
intros. unfold shift_varset. apply VarSetProps.fold_add.
apply varset_setoid.
unfold compat_op. apply shift2_step_mor_Proper; auto.
unfold transpose. intros. unfold shift2_step.
destruct (le_lt_dec c x0); destruct (le_lt_dec c y); rewrite VarSetProps.add_add; reflexivity.
assumption.
Save.
Lemma shift_down_add : forall x s,
~VarSet.In x s ->
VarSet.Equal (shift_down (VarSet.add x s)) (shift_step x (shift_down s)).
intros. unfold shift_down. apply VarSetProps.fold_add.
apply varset_setoid.
unfold compat_op. apply shift_step_mor_Proper.
unfold transpose. intros. unfold shift_step.
destruct x0; destruct y; try reflexivity; eauto using VarSetProps.add_add.
assumption.
Save.
Lemma not_in_shift_varset_1 : forall U2 d c x, ~VarSet.In x U2 -> c<=x -> ~VarSet.In (d+x) (shift_varset d c U2).
intros U2. elim U2 using VarSetProps.set_induction; intros.
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty. apply (VarSet.empty_1).
assert (VarSet.Equal s' (VarSet.add x s)). auto using add_equal.
rewrite H4 in H2.
rewrite H4. rewrite shift_varset_add. 2:assumption.
destruct (eq_nat_dec x x0).
elimtype False. auto using VarSet.add_1.
assert (~VarSet.In x0 s). auto using VarSet.add_2.
unfold shift2_step. destruct (le_lt_dec c x); unfold not; intros.
refine (H d _ _ H5 H3 (VarSet.add_3 (x:=d+x) _ H6)). unfold VarSet.E.eq. omega.
refine (H d _ _ H5 H3 (VarSet.add_3 (x:=x) _ H6)). unfold VarSet.E.eq. omega.
Save.
Lemma not_in_shift_varset_2 : forall d c U2 x, ~VarSet.In x U2 -> x<c -> ~VarSet.In x (shift_varset d c U2).
intros d c U2. elim U2 using VarSetProps.set_induction; intros.
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty. apply (VarSet.empty_1).
assert (VarSet.Equal s' (VarSet.add x s)). auto using add_equal.
rewrite H4 in H2.
rewrite H4. rewrite shift_varset_add. 2:assumption.
destruct (eq_nat_dec x x0).
elimtype False. auto using VarSet.add_1.
assert (~VarSet.In x0 s). auto using VarSet.add_2.
unfold shift2_step. destruct (le_lt_dec c x); unfold not; intros.
refine (H _ H5 H3 (VarSet.add_3 (x:=d+x) _ H6)). unfold VarSet.E.eq. omega.
refine (H _ H5 H3 (VarSet.add_3 (x:=x) _ H6)). unfold VarSet.E.eq. omega.
Save.
Lemma shift_varset_empty_inter : forall d c U1 U2,
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.Empty (VarSet.inter (shift_varset d c U1) (shift_varset d c U2)).
intros d c U1. elim U1 using VarSetProps.set_induction; intros.
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty. auto with set.
assert (VarSet.Equal s' (VarSet.add x s)). auto using add_equal.
rewrite H3. rewrite shift_varset_add. 2:assumption.
rewrite H3 in H2.
assert (~VarSet.In x U2). destruct (VarSetProps.In_dec x U2).
rewrite (VarSetProps.inter_add_1 s i) in H2. elimtype False. apply (add_not_empty _ _ H2).
assumption.
rewrite (VarSetProps.inter_add_2 s H4) in H2.
unfold shift2_step. destruct (le_lt_dec c x).
rewrite (VarSetProps.inter_add_2); auto using not_in_shift_varset_1.
rewrite (VarSetProps.inter_add_2); auto using not_in_shift_varset_2.
Save.
Lemma shift2_step_union : forall d c U1 U2 x,
VarSet.Equal (shift2_step d c x (VarSet.union U1 U2)) (VarSet.union (shift2_step d c x U1) U2).
intros. unfold shift2_step.
destruct (le_lt_dec c x); auto with set.
Save.
Lemma shift_varset_union : forall d c U1 U2,
VarSet.Equal (shift_varset d c (VarSet.union U1 U2)) (VarSet.union (shift_varset d c U1) (shift_varset d c U2)).
Proof.
intros. intro y. split; intro H.
set (H1 := shift_varset_image _ _ _ _ H). clearbody H1. apply shift_varset_In in H.
destruct (VarSet.union_1 H); [apply VarSet.union_2|apply VarSet.union_3]; (eapply In_shift_varset; [apply H0| destruct H1; destruct (le_lt_dec c y); destruct (le_lt_dec c (y-d)); destruct (le_lt_dec c y); omega]).
destruct (VarSet.union_1 H); set (H1 := shift_varset_image _ _ _ _ H0); clearbody H1;
apply shift_varset_In in H0 ; (apply In_shift_varset with (n:=(if le_lt_dec c y then y - d else y)); [| destruct H1; destruct (le_lt_dec c y); destruct (le_lt_dec c (y-d)); destruct (le_lt_dec c y); omega]).
apply VarSet.union_2. assumption.
apply VarSet.union_3. assumption.
Qed.
(*Lemma shift_varset_union : forall d c U1 U2,
VarSet.Equal (shift_varset d c (VarSet.union U1 U2)) (VarSet.union (shift_varset d c U1) (shift_varset d c U2)).
Admitted.
*)
Lemma shift_down_empty : VarSet.Equal (shift_down VarSet.empty) VarSet.empty.
unfold shift_down. rewrite VarSet.fold_1. rewrite elements_empty. reflexivity.
Save.
Lemma In_shift_down : forall n U,
VarSet.In (S n) U -> VarSet.In n (shift_down U).
Proof.
intros n U H. apply VarSet.elements_1 in H. unfold shift_down. rewrite VarSet.fold_1. set (l:=VarSet.elements U) in *. clearbody l. clear U. generalize VarSet.empty. induction l.
inversion H.
intro s0. simpl. inversion H; subst.
simpl. set (s1:=VarSet.add n s0). assert (VarSet.In n s1) by (unfold s1; apply VarSet.add_1; reflexivity). generalize s1 H0. clear . induction l; intros.
simpl. assumption.
simpl. assert (VarSet.In n (shift_step a s1)) by (unfold shift_step; destruct a; auto using VarSet.add_2). apply IHl. assumption.
apply IHl. assumption.
Qed.
Lemma shift_down_In : forall n U,
VarSet.In n (shift_down U) -> VarSet.In (S n) U.
Proof.
intros n U H. apply VarSet.elements_2. unfold shift_down in H. rewrite VarSet.fold_1 in H. set (l:=VarSet.elements U) in *. clearbody l. assert (~VarSet.In n VarSet.empty). apply VarSet.empty_1. generalize H H0. generalize VarSet.empty. clear. induction l; intros.
simpl in H. contradiction.
simpl in H. compare a (S n); intro an.
subst a. left. reflexivity.
right. eapply IHl. apply H. unfold shift_step. destruct a. assumption.
assert (~VarSet.E.eq a n). change VarSet.E.eq with (eq (A:=nat)). intro an'. subst a. destruct an. reflexivity.
intro inadded. destruct H0. eapply VarSet.add_3; eauto.
Qed.
Lemma down_shift : forall d c U x, VarSet.Equal (shift_down (shift2_step d (S c) (S x) U)) (shift2_step d c x (shift_down U)).
intros. unfold shift2_step. destruct (le_lt_dec (S c) (S x)); destruct (le_lt_dec c x); try (elimtype False; clear - l l0; omega).
rewrite <- plus_Snm_nSm.
destruct (VarSetProps.In_dec (S d + x) U).
rewrite VarSetProps.add_equal; auto. rewrite VarSetProps.add_equal; [reflexivity|apply In_shift_down;assumption].
rewrite shift_down_add; auto. simpl. reflexivity.
destruct (VarSetProps.In_dec (S x) U).
rewrite VarSetProps.add_equal; auto. rewrite VarSetProps.add_equal; [reflexivity|apply In_shift_down;assumption].
rewrite shift_down_add; auto. simpl. reflexivity.
Qed.
Lemma shift_down_varset : forall d c U, VarSet.Equal (shift_down (shift_varset d (S c) U)) (shift_varset d c (shift_down U)).
intros d c U. elim U using VarSetProps.set_induction; intros.
(* base *)
rewrite (VarSetProps.empty_is_empty_1 H).
rewrite shift_down_empty. rewrite shift_varset_empty.
rewrite shift_down_empty. rewrite shift_varset_empty. reflexivity.
(* step *)
assert (VarSet.Equal s' (VarSet.add x s)). auto using add_equal.
rewrite H2.
rewrite shift_varset_add. 2:assumption.
rewrite shift_down_add. 2:assumption.
case_eq x; intros.
subst x. unfold shift2_step. simpl. rewrite shift_down_add. 2:(apply not_in_shift_varset_2; auto with arith). simpl. assumption.
simpl. rewrite shift_varset_add.
rewrite down_shift. rewrite H. reflexivity.
intro H'. destruct H0. rewrite H3. apply shift_down_In. assumption.
Qed.
(*
Lemma shift_down_varset_2 : forall s,
VarSet.Equal s (shift_down (shift_varset 1 1 s)).
Proof.
intros. elim s using VarSetProps.set_induction; intros.
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty. rewrite shift_down_empty. reflexivity.
assert (VarSet.Equal s' (VarSet.add x s0)). auto using add_equal.
rewrite H2.
rewrite shift_varset_add. assumption.
unfold shift2_step.
rewrite shift_down_add. assumption.
*)
Lemma shift_preserves_typing : forall Gi G G' G'' t A U,
prf Gi (G'++G) t A U ->
prf Gi (G'++G''++G) (l_shift (length G'') (length G') t) A (shift_varset (length G'') (length G') U).
intros Gi G G' G'' t A U prf_t.
set (Gx:=G'++G) in *. set (Gx_eq:=refl_equal Gx:Gx=G'++G).
generalize G' Gx_eq. clear Gx_eq.
induction prf_t; intros; simpl; subst G0.
(* ivar *)
rewrite H0. econstructor. assumption. apply shift_varset_empty.
(* lvar *)
rewrite H0.
destruct (le_lt_dec (length G'0) n) as [lenG_le_n | n_lt_lenG].
econstructor; eauto using lookup_suffix, shift_varset_singleton.
constructor; eauto using lookup_prefix, shift_varset_singleton2.
(* i_intro *)
rewrite H. constructor; eauto using shift_varset_empty.
(* tensor_intro *)
rewrite H0. econstructor; eauto using shift_varset_empty_inter, shift_varset_union.
(* tensor_elim *)
rewrite H0. econstructor.
apply IHprf_t1. reflexivity.
replace (A::B::G'0++G''++G) with ((A::B::G'0)++G''++G).
change (S (S (length G'0))) with (length (A::B::G'0)). apply IHprf_t2.
rewrite app_comm_cons. rewrite app_comm_cons. reflexivity.
rewrite app_comm_cons. rewrite app_comm_cons. reflexivity.
simpl. rewrite shift_down_varset. rewrite shift_down_varset. apply shift_varset_empty_inter. assumption.
simpl. rewrite shift_down_varset. rewrite shift_down_varset. apply shift_varset_union; assumption.
(* t_and_intro *)
rewrite H. econstructor.
apply IHprf_t1. reflexivity.
apply IHprf_t2. reflexivity.
apply shift_varset_union.
(* t_and_elim1 *)
econstructor; auto.
(* t_and_elim2 *)
econstructor; auto.
(* t_lolli_intro *)
rewrite H. econstructor.
replace (A::G'0++G''++G) with ((A::G'0)++G''++G).
change (S (length G'0)) with (length (A::G'0)). apply IHprf_t.
rewrite app_comm_cons. reflexivity.
rewrite app_comm_cons. reflexivity.
simpl. rewrite shift_down_varset. reflexivity.
(* t_lolli_elim *)
rewrite H0. econstructor; eauto using shift_varset_empty_inter, shift_varset_union.
(* t_bang_intro *)
econstructor; auto.
rewrite (VarSetProps.empty_is_empty_1 H). rewrite shift_varset_empty. apply VarSet.empty_1.
(* t_bang_elim *)
rewrite H0. econstructor; auto using shift_varset_empty_inter, shift_varset_union.
(* t_axiom *)
econstructor. auto.
(* t_let *)
rewrite H1. rewrite H in *. econstructor.
apply IHprf_t1. reflexivity.
replace (A::G'0++G''++G) with ((A::G'0)++G''++G).
change (S (length G'0)) with (length (A::G'0)). apply IHprf_t2. reflexivity.
reflexivity.
reflexivity.
change (length (A::G'0)) with (S (length G'0)). rewrite shift_down_varset. apply shift_varset_empty_inter. assumption.
change (length (A::G'0)) with (S (length G'0)). rewrite shift_varset_union. rewrite shift_down_varset. reflexivity.
Save.
(*
Lemma subst_preserve_typing :
prf Gi G t A U1 ->
prf Gi (G'++A::G) t' B U2 ->
prf Gi (
*)
Lemma prf_uses_ctx : forall Gi G t A U,
prf Gi G t A U -> forall n, VarSet.In n U -> n < length G.
Proof.
induction 1; intros; auto.
rewrite H0 in H1. destruct (VarSet.empty_1 H1).
rewrite H0 in H1. apply VarSet.singleton_1 in H1. unfold VarSet.E.eq in H1. subst n0. generalize G H. clear G H H0. induction n; intros.
simpl in H. destruct G. discriminate. simpl. auto with arith.
destruct G. discriminate. simpl in *. apply lt_n_S. auto.
rewrite H in H0. destruct (VarSet.empty_1 H0).
rewrite H2 in H3. destruct (VarSet.union_1 H3); auto.
rewrite H2 in H3. destruct (VarSet.union_1 H3); auto. repeat apply shift_down_In in H4. apply IHprf2 in H4. simpl in H4. auto using lt_S_n.
rewrite H1 in H2. destruct (VarSet.union_1 H2); auto.
rewrite H0 in H1. apply shift_down_In in H1. apply IHprf in H1. simpl in H1. auto using lt_S_n.
rewrite H2 in H3. destruct (VarSet.union_1 H3); auto.
rewrite H2 in H3. destruct (VarSet.union_1 H3); auto.
rewrite H3 in H4. rewrite H1 in H4. destruct (VarSet.union_1 H4). auto. apply shift_down_In in H5. apply IHprf2 in H5. auto using lt_S_n.
Qed.
Lemma proof_weakening : forall Gi G G' t A U,
prf Gi G t A U -> prf Gi (G++G') t A U.
Proof.
intros.
replace (G++G') with (G++G'++nil).
replace t with (l_shift (length G') (length G) t).
setoid_replace U with (shift_varset (length G') (length G) U).
apply shift_preserves_typing.
rewrite <- List.app_nil_end. assumption.
intros x.
split.
intros xinu.
apply prf_uses_ctx with (n:=x) in H. 2:assumption.
eapply In_shift_varset.
apply xinu.
destruct (le_lt_dec (length G)); omega.
intro xinsu.
destruct (shift_varset_image _ _ _ _ xinsu).
apply shift_varset_In in xinsu.
destruct (le_lt_dec (length G)). elimtype False. omega.
assumption.
apply shift_varset_In in xinsu.
apply prf_uses_ctx with (n:=(if le_lt_dec (length G) x then x - length G' else x)) in H. 2:assumption.
destruct (le_lt_dec (length G)). elimtype False. omega.
assumption.
induction H; simpl in *; auto; try (rewrite IHprf1; rewrite IHprf2; auto); try (rewrite IHprf; auto).
apply lookup_length3 in H. destruct (le_lt_dec (length G) n). elimtype False. omega. reflexivity.
rewrite <- app_nil_end. reflexivity.
Qed.
Definition sumbool_to_bool : forall (A B:Prop), {A}+{B} -> bool :=
fun A B x => if x then true else false.
Implicit Arguments sumbool_to_bool [A B].
Fixpoint proof_checker (Gi:context)
(G:context)
(t:prf_term)
{struct t}
: option (formula * VarSet.t)
:=
match t with
| t_lvar n =>
A <- nth_error G n;:
ret (A, VarSet.singleton n)
| t_ivar n =>
A <- nth_error Gi n;:
ret (A, VarSet.empty)
| t_i_intro =>
ret (f_i, VarSet.empty)
| t_tensor_intro t1 t2 =>
x1 <- proof_checker Gi G t1;:
x2 <- proof_checker Gi G t2;:
let (A, U1) := x1 in
let (B, U2) := x2 in
lift_bool (VarSet.is_empty (VarSet.inter U1 U2));;
ret (f_tensor A B, VarSet.union U1 U2)
| t_tensor_elim t1 t2 =>
x1 <- proof_checker Gi G t1;:
match x1 with
| (f_tensor A B, U1) =>
x2 <- proof_checker Gi (A::B::G) t2;:
let (C, U2) := x2 in
let U2' := shift_down (shift_down U2) in
lift_bool (VarSet.is_empty (VarSet.inter U1 U2'));;
ret (C, VarSet.union U1 U2')
| _ =>
fail
end
| t_and_intro t1 t2 =>
x1 <- proof_checker Gi G t1;:
x2 <- proof_checker Gi G t2;:
let (A, U1) := x1 in
let (B, U2) := x2 in
ret (f_and A B, VarSet.union U1 U2)
| t_and_elim1 t =>
x <- proof_checker Gi G t;:
match x with
| (f_and A B, U) =>
ret (A, U)
| _ => fail
end
| t_and_elim2 t =>
x <- proof_checker Gi G t;:
match x with
| (f_and A B, U) =>
ret (B, U)
| _ => fail
end
| t_lolli_intro A t =>
x <- proof_checker Gi (A::G) t;:
let (B,U) := x in
ret (f_lolli A B, shift_down U)
| t_lolli_elim t1 t2 =>
x1 <- proof_checker Gi G t1;:
match x1 with
| (f_lolli A B, U1) =>
x2 <- proof_checker Gi G t2;:
let (A', U2) := x2 in
lift_bool (sumbool_to_bool (formula_eq_dec A A'));;
lift_bool (VarSet.is_empty (VarSet.inter U1 U2));;
ret (B, VarSet.union U1 U2)
| _ =>
fail
end
| t_bang_intro t =>
x <- proof_checker Gi G t;:
let (A,U) := x in
lift_bool (VarSet.is_empty U);;
ret (f_bang A, U)
| t_bang_elim t1 t2 =>
x1 <- proof_checker Gi G t1;:
match x1 with
| (f_bang A, U1) =>
x2 <- proof_checker (A::Gi) G t2;:
let (C, U2) := x2 in
lift_bool (VarSet.is_empty (VarSet.inter U1 U2));;
ret (C, VarSet.union U1 U2)
| _ =>
fail
end
| t_axiom ax t =>
x <- proof_checker Gi G t;:
let (A,U) := x in
lift_bool (sumbool_to_bool (formula_eq_dec A (axiom_domain ax)));;
ret (axiom_codomain ax, U)
| t_let t1 t2 =>
x1 <- proof_checker Gi G t1;:
let (A, U1) := x1 in
x2 <- proof_checker Gi (A::G) t2;:
let (B, Ut2) := x2 in
let U2 := shift_down Ut2 in
lift_bool (VarSet.is_empty (VarSet.inter U1 U2));;
ret (B, VarSet.union U1 U2)
end.
Hint Resolve VarSet.is_empty_2.
Lemma proof_checker_sound : forall Gi G t A U,
proof_checker Gi G t = Some (A, U) ->
prf Gi G t A U.
intros Gi G t A U. generalize Gi G A U. clear Gi G A U.
induction t; intros.
(* t_lvar *)
simpl in H.
destruct (option_dec (nth_error G n)) as [[A' A'_res] | A'_res]; rewrite A'_res in H; try discriminate.
inversion H. subst. apply prf_lvar. assumption. reflexivity.
(* t_ivar *)
simpl in H.
destruct (option_dec (nth_error Gi n)) as [[A' A'_res] | A'_res]; rewrite A'_res in H; try discriminate.
inversion H. subst. apply prf_ivar. assumption. reflexivity.
(* t_i_intro *)
inversion H. subst. apply prf_i_intro. reflexivity.
(* t_tensor_intro *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
destruct (option_dec (proof_checker Gi G t2)) as [[[B U2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
simpl in H.
destruct (bool_dec (VarSet.is_empty (VarSet.inter U1 U2))) as [inter_check | inter_check]; rewrite inter_check in H; try discriminate.
inversion H. subst. econstructor; intuition eauto. reflexivity.
(* t_tensor_elim *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
simpl in H.
destruct A'; try discriminate.
destruct (option_dec (proof_checker Gi (A'1::A'2::G) t2)) as [[[B U2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
simpl in H.
destruct (bool_dec (VarSet.is_empty (VarSet.inter U1 (shift_down (shift_down U2))))) as [inter_check | inter_check]; rewrite inter_check in H; try discriminate.
inversion H. subst. econstructor; intuition eauto. reflexivity.
(* t_and_intro *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
destruct (option_dec (proof_checker Gi G t2)) as [[[B U2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
inversion H. subst. econstructor; intuition eauto. reflexivity.
(* t_and_elim1 *)
simpl in H.
destruct (option_dec (proof_checker Gi G t)) as [[[A' U1] t_check] | t_check]; rewrite t_check in H; try discriminate.
simpl in H. destruct A'; try discriminate.
inversion H. subst. econstructor; eauto.
(* t_and_elim2 *)
simpl in H.
destruct (option_dec (proof_checker Gi G t)) as [[[A' U1] t_check] | t_check]; rewrite t_check in H; try discriminate.
simpl in H. destruct A'; try discriminate.
inversion H. subst. econstructor; eauto.
(* t_lolli_intro *)
simpl in H.
destruct (option_dec (proof_checker Gi (f::G) t)) as [[[B U1] t_check] | t_check]; rewrite t_check in H; try discriminate.
inversion H. subst. econstructor; eauto. reflexivity.
(* t_lolli_elim *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
simpl in H. destruct A'; try discriminate.
destruct (option_dec (proof_checker Gi G t2)) as [[[A' U2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
simpl in H.
destruct (formula_eq_dec A'1 A') as [fs_eq | fs_neq]; try discriminate. simpl in H.
destruct (bool_dec (VarSet.is_empty (VarSet.inter U1 U2))) as [inter_check | inter_check]; rewrite inter_check in H; try discriminate.
inversion H. subst. econstructor; eauto. reflexivity.
(* t_bang_intro *)
simpl in H.
destruct (option_dec (proof_checker Gi G t)) as [[[A' U1] t_check] | t_check]; rewrite t_check in H; try discriminate.
simpl in H.
destruct (bool_dec (VarSet.is_empty U1)) as [empty_check | empty_check]; rewrite empty_check in H; try discriminate.
inversion H. subst. econstructor; eauto.
(* t_bang_elim *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
simpl in H; destruct A'; try discriminate.
destruct (option_dec (proof_checker (A'::Gi) G t2)) as [[[B U2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
simpl in H.
destruct (bool_dec (VarSet.is_empty (VarSet.inter U1 U2))) as [inter_check | inter_check]; rewrite inter_check in H; try discriminate.
inversion H. subst. econstructor; eauto. reflexivity.
(* t_axiom *)
simpl in H.
destruct (option_dec (proof_checker Gi G t)) as [[[A' U1] t_check] | t_check]; rewrite t_check in H; try discriminate.
simpl in H.
destruct (formula_eq_dec A' (axiom_domain a)) as [is_equal | not_equal]; try discriminate.
inversion H. clear H. subst. econstructor; eauto.
(* t_let *)
simpl in H.
destruct (option_dec (proof_checker Gi G t1)) as [[[A' U1] t1_check] | t1_check]; rewrite t1_check in H; try discriminate.
simpl in H.
destruct (option_dec (proof_checker Gi (A'::G) t2)) as [[[B Ut2] t2_check] | t2_check]; rewrite t2_check in H; try discriminate.
simpl in H.
destruct (bool_dec (VarSet.is_empty (VarSet.inter U1 (shift_down Ut2)))) as [inter_check | inter_check]; rewrite inter_check in H; try discriminate.
inversion H. subst. econstructor; eauto. reflexivity. reflexivity.
Save.
Definition proof_check (Gi G:context) (t:prf_term)
: { B : formula | exists U, prf Gi G t B U }+{True}
:= match proof_checker Gi G t as l return proof_checker Gi G t = l -> { B : formula | exists U, prf Gi G t B U }+{True} with
| None => fun e : _ =>
inright _ I
| Some (B, U) => fun e : proof_checker Gi G t = Some (B, U) =>
(* coq suddenly seems to require more details, not sure why...
inleft _ (exist _ B (ex_intro _ U (proof_checker_sound Gi G t B U e)))
*)
inleft True (exist (fun B => exists U, prf Gi G t B U) B (ex_intro (fun U => prf Gi G t B U) U (proof_checker_sound Gi G t B U e)))
end (refl_equal (proof_checker Gi G t)).
Definition implies : formula -> formula -> Prop :=
fun A B => exists t, exists U, prf nil (A::nil) t B U.
Lemma implies_refl : forall A, implies A A.
intro A.
exists (t_lvar 0).
exists (VarSet.singleton 0%nat).
apply proof_checker_sound. reflexivity.
Save.
Lemma implies_trans : forall A B C, implies A B -> implies B C -> implies A C.
intros A B C [tAB [UAB pAB]] [tBC [UBC pBC]].
exists (t_lolli_elim (t_lolli_intro B (l_shift (length (A::nil)) (length (B::nil)) tBC)) tAB).
eexists.
(*assert (prf nil (B::A::nil) (t_lolli_intro B (l_shift (length (A::nil)) (length (B::nil)) tBC)) (f_lolli B C) *)
eapply prf_lolli_elim.
eapply prf_lolli_intro.
change (B::A::nil) with ((B::nil)++(A::nil)++nil).
apply shift_preserves_typing.
eassumption.
reflexivity.
eassumption.
intros x inboth.
pose (inbc := VarSet.inter_1 inboth).
pose (inab := VarSet.inter_2 inboth).
apply prf_uses_ctx with (n:=x) in pAB; auto.
apply shift_down_In in inbc. pose (shift := shift_varset_image _ _ _ _ inbc). clearbody shift. apply shift_varset_In in inbc. simpl in inbc. rewrite <- minus_n_O in inbc.
apply prf_uses_ctx with (n:=x) in pBC; auto.
simpl in *. omega.
reflexivity.
Qed.
Lemma lookup_neq : forall (A:Set) (G G'':list A) a b n, n<>(length G'') -> nth_error (G'' ++ a :: G) n = nth_error (G'' ++ b :: G) n.
induction G''; intros; simpl in *.
destruct n. destruct H; reflexivity. simpl. reflexivity.
destruct n; simpl. reflexivity. auto.
Save.
Set Implicit Arguments.
Lemma subst_and_1 : forall Gi G G' A C t B U,
prf Gi (G++A::G') t B U ->
exists t', prf Gi (G++(f_and A C)::G') t' B U.
Proof.
intros. set (G0:=(G++A::G')) in *. assert (G0 = (G++A::G')) by reflexivity. generalize G H0. clear H0. induction H; intros.
eexists; eauto using prf_ivar.
compare n (length G1); intro; eexists.
eapply prf_and_elim1. rewrite e in H. rewrite H1 in H. rewrite lookup_length in H. injection H as H'. subst A0. apply prf_lvar.
apply lookup_length.
rewrite <- e. assumption.
apply prf_lvar. rewrite <- H. rewrite H1. apply lookup_neq. assumption. assumption.
eexists. eauto using prf_i_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eapply prf_tensor_intro; eauto.
destruct (IHprf1 G1 H3); destruct (IHprf2 (A0::B::G1)). rewrite H3. simpl. reflexivity. eauto using prf_tensor_elim.
destruct (IHprf1 G1 H2); destruct (IHprf2 G1 H2); eexists; eapply prf_and_intro; eauto.
destruct (IHprf G1 H0); eauto using prf_and_elim1.
destruct (IHprf G1 H0); eauto using prf_and_elim2.
destruct (IHprf (A0::G1)). rewrite H1. simpl. reflexivity. eauto using prf_lolli_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eauto using prf_lolli_elim.
destruct (IHprf G1 H1); eauto using prf_bang_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eauto using prf_bang_elim.
destruct (IHprf G1 H0); eauto using prf_axiom.
destruct (IHprf1 G1 H4); destruct (IHprf2 (A0::G1)). rewrite H4. simpl. reflexivity. eauto using prf_let.
Qed.
Lemma subst_and_2 : forall Gi G G' A C t B U,
prf Gi (G++A::G') t B U ->
exists t', prf Gi (G++(f_and C A)::G') t' B U.
Proof.
intros. set (G0:=(G++A::G')) in *. assert (G0 = (G++A::G')) by reflexivity. generalize G H0. clear H0. induction H; intros.
eexists; eauto using prf_ivar.
compare n (length G1); intro; eexists.
eapply prf_and_elim2. rewrite e in H. rewrite H1 in H. rewrite lookup_length in H. injection H as H'. subst A0. apply prf_lvar.
apply lookup_length.
rewrite <- e. assumption.
apply prf_lvar. rewrite <- H. rewrite H1. apply lookup_neq. assumption. assumption.
eexists. eauto using prf_i_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eapply prf_tensor_intro; eauto.
destruct (IHprf1 G1 H3); destruct (IHprf2 (A0::B::G1)). rewrite H3. simpl. reflexivity. eauto using prf_tensor_elim.
destruct (IHprf1 G1 H2); destruct (IHprf2 G1 H2); eexists; eapply prf_and_intro; eauto.
destruct (IHprf G1 H0); eauto using prf_and_elim1.
destruct (IHprf G1 H0); eauto using prf_and_elim2.
destruct (IHprf (A0::G1)). rewrite H1. simpl. reflexivity. eauto using prf_lolli_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eauto using prf_lolli_elim.
destruct (IHprf G1 H1); eauto using prf_bang_intro.
destruct (IHprf1 G1 H3); destruct (IHprf2 G1 H3); eexists; eauto using prf_bang_elim.
destruct (IHprf G1 H0); eauto using prf_axiom.
destruct (IHprf1 G1 H4); destruct (IHprf2 (A0::G1)). rewrite H4. simpl. reflexivity. eauto using prf_let.
Qed.
Unset Implicit Arguments.
Lemma implies_subformulae : forall f1 f2 f1' f2' fo,
implies f1 f1' -> implies f2 f2' -> fo = f_tensor \/ fo = f_and ->
implies (fo f1 f2) (fo f1' f2').
Proof.
intros until fo. intros [t1 [U1 p1]] [t2 [U2 p2]] [fo_eq|fo_eq]; subst fo.
unfold implies. do 2 eexists.
eapply prf_tensor_elim.
apply prf_lvar with (n:=0).
simpl. reflexivity.
reflexivity.
eapply prf_tensor_intro.
change (f1 :: f2 :: f_tensor f1 f2 :: nil) with ((f1::nil)++(f2::f_tensor f1 f2::nil)++nil). apply shift_preserves_typing. apply p1.
change (f1 :: f2 :: f_tensor f1 f2 :: nil) with (nil++(f1::nil)++(f2::f_tensor f1 f2::nil)). apply shift_preserves_typing. change (nil ++ f2 :: f_tensor f1 f2 :: nil) with ((f2::nil)++(f_tensor f1 f2::nil)++nil). apply shift_preserves_typing. apply p2.
simpl. intros x in_inter.
pose (inu1 := VarSet.inter_1 in_inter). pose (inu2 := VarSet.inter_2 in_inter).
pose (x1 := shift_varset_image _ _ _ _ inu1). clearbody x1. pose (x2 := shift_varset_image _ _ _ _ inu2). clearbody x2. apply shift_varset_In in inu2. pose (x2' := shift_varset_image _ _ _ _ inu2). clearbody x2'. apply shift_varset_In in inu2.
match type of inu2 with VarSet.In ?x2 U2 => set (n2:=x2) end. apply prf_uses_ctx with (n:=n2) in p2; auto.
destruct (le_lt_dec 0 x); destruct (le_lt_dec 1 (x-1)); unfold n2 in *; simpl in *; omega.
reflexivity.
simpl. intros x in_inter.
pose (inu1 := VarSet.inter_1 in_inter). pose (inu2 := VarSet.inter_2 in_inter).
rewrite <- (VarSet.singleton_1 inu1) in *. do 2 apply shift_down_In in inu2.
destruct (VarSet.union_1 inu2). apply shift_varset_image in H. omega.
apply shift_varset_In in H. simpl in H. apply shift_varset_image in H. omega.
reflexivity.
unfold implies.
change (f1::nil) with (nil++(f1::nil)++nil) in p1. destruct (subst_and_1 _ _ _ f2 p1). simpl in H.
change (f2::nil) with (nil++(f2::nil)++nil) in p2. destruct (subst_and_2 _ _ _ f1 p2). simpl in H0.
do 2 eexists. eapply prf_and_intro.
apply H.
apply H0.
reflexivity.
Qed.
(*
Lemma implies_lolli : forall f1 f2 f1' f2',
implies f1 f1' -> implies f2 f2' ->
implies (f_lolli f1' f2) (f_lolli f1 f2').
Proof.
intros f1 f2 f1' f2' [t1 [U1 p1]] [t2 [U2 p2]].
unfold implies. do 2 eexists. eapply prf_lolli_intro.
eapply prf_lolli_elim.
*)
Definition proof_check_single (A B:formula) (t:prf_term)
: { implies A B }+{True}
:= match proof_check nil (A::nil) t with
| inleft (exist _ B' H) =>
match formula_eq_dec B' B with
| left B'_eq_B =>
left _ (ex_intro _ t (eq_ind _ _ H _ B'_eq_B))
| right _ =>
right _ I
end
| inright _ => right _ I
end.
Definition r_may := fun c f =>
match R_new c with
| None => f_i
| Some r => f r
end.
Definition resexpr_to_formula : res_expr B.Classname.t -> formula :=
let r_to_f_step (f:formula) (r:res_expr B.Classname.t) :=
List.fold_left (fun f expr =>
f_tensor f (match expr with (true,c) => r_may c (fun r => f_bang (f_atom r)) | (false,c) => r_may c (fun r => f_atom r) end))
r f
in r_to_f_step f_i.
End MkILLSyntax.
Module MkILLSemantics
(B : BASICS)
(BASESEM : ILL_BASE_SEMANTICS B)
(SYN : ILL_SYNTAX B BASESEM.SYN)
: ILL_SEMANTICS B BASESEM SYN.
Import SYN.
Import BASESEM.SYN.
(*Lemma VarSet_Equal_refl : forall s, VarSet.Equal s s.
unfold VarSet.Equal. tauto.
Save.
Lemma VarSet_Equal_symm : forall s1 s2, VarSet.Equal s1 s2 -> VarSet.Equal s2 s1.
unfold VarSet.Equal. firstorder.
Save.
Lemma VarSet_Equal_trans : forall s1 s2 s3, VarSet.Equal s1 s2 -> VarSet.Equal s2 s3 -> VarSet.Equal s1 s3.
unfold VarSet.Equal. firstorder.
Save.
Add Relation VarSet.t VarSet.Equal
reflexivity proved by VarSet_Equal_refl
symmetry proved by VarSet_Equal_symm
transitivity proved by VarSet_Equal_trans
as VarSet_Equal.
Add Morphism VarSet.Empty with signature VarSet.Equal ==> iff as VarSet_Empty_mor.
unfold VarSet.Equal. unfold VarSet.Empty. firstorder.
Save.
Add Morphism VarSet.In with signature eq ==> VarSet.Equal ==> iff as VarSet_In_mor.
unfold VarSet.Equal. firstorder.
Save.*)
Fixpoint context_to_formula (G:context) (n:nat) (U:VarSet.t)
{struct G}
: formula :=
match G with
| nil => f_i
| cons f rest => if VarSet.mem n U then f_tensor f (context_to_formula rest (S n) U)
else context_to_formula rest (S n) U
end.
Add Morphism context_to_formula with signature (@eq context) ==> (@eq nat) ==> VarSet.Equal ==> (@eq formula) as ctf_mor.
induction y; intros x0 x1 x2 H.
reflexivity.
simpl. unfold VarSet.Equal in H. destruct (bool_dec (VarSet.mem x0 x1)).
rewrite H0. rewrite (@VarSet.mem_1 x2 x0).
rewrite (IHy (S x0) x1 x2 H); reflexivity.
rewrite <- H. apply VarSet.mem_2. assumption.
rewrite H0. replace (VarSet.mem x0 x2) with false. eauto.
destruct (bool_dec (VarSet.mem x0 x2)).
rewrite (@VarSet.mem_1 x1 x0) in H0. discriminate. rewrite H. apply VarSet.mem_2. assumption.
congruence.
Save.
Import BASESEM.
Import RA.
Fixpoint icontext_to_formula (G:context) : formula :=
match G with
| nil => f_i
| cons f rest => f_tensor f (icontext_to_formula rest)
end.
Lemma right_weaken : forall r1 r2 r,
r1 :*: r2 <: r ->
r1 <: r.
intros.
rewrite <- (r_combine_e r1).
eapply leq_trans.
apply combine_order. apply leq_refl. apply eq_refl. apply (e_bottom r2).
assumption.
Save.
Lemma left_weaken : forall r1 r2 r,
r1 :*: r2 <: r ->
r2 <: r.
intros.
rewrite <- (e_combine_r r2).
eapply leq_trans.
apply combine_order. apply e_bottom. apply leq_refl. reflexivity.
apply H.
Save.
(*
FIXME: this doesn't work because setoid doesn't know that ILL.RA.res is a setoid
but it turns out we didn't need it anyway
Add Morphism sat : sat_morphism.
intros r1 r2 r1_eq_r2 A.
generalize r1 r2 r1_eq_r2. clear r1 r2 r1_eq_r2.
induction A; intros.
(* f_i *)
intuition.
(* f_atom *)
simpl. rewrite r1_eq_r2. intuition.
(* f_tensor *)
simpl. split; intros.
destruct H as [r3 [r4 [r3r4_r1 [sat_r3 sat_r4]]]].
exists r3. exists r4. rewrite <- r1_eq_r2. intuition.
destruct H as [r3 [r4 [r3r4_r1 [sat_r3 sat_r4]]]].
exists r3. exists r4. rewrite r1_eq_r2. intuition.
(* f_and *)
simpl. rewrite (IHA1 r1 r2 r1_eq_r2). rewrite (IHA2 r1 r2 r1_eq_r2). intuition.
(* f_lolli *)
simpl. split; intros.
rewrite (IHA2 (r2 :*: r') (r1 :*: r')).
rewrite <- r1_eq_r2. reflexivity.
auto.
rewrite (IHA2 (r1 :*: r') (r2 :*: r')).
rewrite <- r1_eq_r2. reflexivity.
auto.
(* f_bang *)
simpl. split; intros.
destruct H as [r3 [r3_r2 sat_r2]].
exists r3. rewrite <- r1_eq_r2. intuition.
destruct H as [r3 [r3_r2 sat_r2]].
exists r3. rewrite r1_eq_r2. intuition.
Save.
*)
Lemma sat_leq : forall r1 r2 A,
sat r1 A -> r1 <: r2 -> sat r2 A.
intros r1 r2 A. generalize r1 r2. clear r1 r2.
induction A; intros; simpl in *.
(* f_i *)
trivial.
(* f_atom *)
eapply leq_trans; eauto.
(* f_tensor *)
destruct H as [r3 [r4 [r3r4_r1 [r2_A1 r3_A2]]]].
exists r3. exists r4. intuition. eapply leq_trans; eauto.
(* f_and *)
intuition eauto.
(* f_lolli *)
intros. apply (IHA2 (r1 :*: r') (r2 :*: r')).
apply H. apply H1.
apply combine_order. apply H0. apply leq_refl. reflexivity.
(* f_bang *)
destruct H as [r' [r'_r1 sat_r']].
exists r'. intuition. eapply leq_trans; eauto.
Save.
Lemma nat_dec : forall (n:nat), n=0%nat\/(exists m, n = S m).
intros. destruct n.
left. reflexivity.
right. exists n. reflexivity.
Save.
Lemma not_in_mem_false : forall n s,
~ (VarSet.In n s) -> VarSet.mem n s = false.
intros.
compare (VarSet.mem n s) true; intros.
elimtype False. apply H. apply VarSet.mem_2. assumption.
rewrite (not_true_is_false _ n0). reflexivity.
Save.
Implicit Arguments not_in_mem_false [n s].
Lemma not_mem : forall n m,
n <> m ->
VarSet.mem m (VarSet.singleton n) = false.
intros.
apply not_in_mem_false.
unfold not. intros. apply H. apply VarSet.singleton_1. assumption.
Save.
Lemma sat_singleton : forall r G m n A,
sat r (context_to_formula G m (VarSet.singleton (n+m))) ->
nth_error G n = Some A ->
sat r A.
intros. generalize m n H H0. clear m n H H0.
induction G; intros.
destruct n; discriminate.
destruct (nat_dec n) as [n_is_0 | [n' n_is_Sn']].
(* this is the correct one *)
subst. inversion H0. subst.
simpl in H. rewrite VarSet.mem_1 in H.
simpl in H. destruct H as [r1 [r2 [r1r2_r [sat_r1 _]]]]. eapply sat_leq. apply sat_r1. eapply right_weaken. apply r1r2_r.
apply VarSet.singleton_2. change (VarSet.E.eq m m) with (m = m). apply refl_equal. (* BUG *)
(* step on *)
subst. simpl in H0. simpl in H. rewrite not_mem in H.
eapply IHG.
replace (S (n' + m)) with (n' + (S m)) in H.
apply H.
omega.
assumption.
omega.
Save.
Lemma sep_mem : forall m U1 U2,
VarSet.In m U1 ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.mem m U2 = false.
intros. destruct (SYN.VarSetProps.In_dec m U2) as [in_2 | not_in_2].
(* contradiction *)
assert (VarSet.In m (VarSet.inter U1 U2)). apply VarSet.inter_3; assumption.
elimtype False. apply (H0 _ H1).
(* not_there *)
apply not_in_mem_false. assumption.
Save.
Implicit Arguments sep_mem [m U1 U2].
Lemma sep_mem_s : forall m U1 U2,
VarSet.In m U2 ->
VarSet.Empty (VarSet.inter U1 U2) ->
VarSet.mem m U1 = false.
intros.
apply (sep_mem (U1:=U2) (U2:=U1) H).
rewrite SYN.VarSetProps.inter_sym.
assumption.
Save.
Implicit Arguments sep_mem_s [m U1 U2].
Lemma not_in_union : forall x U1 U2,
~(VarSet.In x (VarSet.union U1 U2)) ->
~(VarSet.In x U1) /\ ~(VarSet.In x U2).
intros.
destruct (SYN.VarSetProps.In_dec x U1) as [in_1 | not_in_1].
elimtype False. apply H. apply VarSet.union_2. assumption.
destruct (SYN.VarSetProps.In_dec x U2) as [in_2 | not_in_2].
elimtype False. apply H. apply VarSet.union_3. assumption.
auto.
Save.
Implicit Arguments not_in_union [x U1 U2].
Lemma sat_split : forall r G m U1 U2,
VarSet.Empty (VarSet.inter U1 U2) ->
sat r (context_to_formula G m (VarSet.union U1 U2)) ->
exists r1, exists r2,
r1 :*: r2 <: r /\ sat r1 (context_to_formula G m U1) /\ sat r2 (context_to_formula G m U2).
intros r G m U1 U2 sep. generalize r m. clear r m. induction G; intros.
(* base case *)
simpl. exists e. exists e. intuition. rewrite e_combine_r. apply e_bottom.
(* step case *)
simpl in *.
destruct (SYN.VarSetProps.In_dec m (VarSet.union U1 U2)) as [is_in | isnt_in].
(* we included this position originally *)
rewrite (VarSet.mem_1 is_in) in H. simpl in H. destruct H as [r1 [r2 [r1r2_r [sat_r1_a sat_r2_rest]]]].
destruct (VarSet.union_1 is_in) as [in_1 | in_2].
(* it was in U1 *)
rewrite (VarSet.mem_1 in_1).
rewrite (sep_mem in_1 sep).
destruct (IHG _ _ sat_r2_rest) as [r1' [r2' [r1'r2'_r2 [r1'_U1 r2'_U2]]]].
exists (r1 :*: r1'). exists r2'. intuition.
rewrite <- combine_assoc. eapply leq_trans.
apply combine_order.
apply leq_refl. reflexivity.
apply r1'r2'_r2.
apply r1r2_r.
exists r1. exists r1'. intuition.
(* it was in U2 *)
rewrite (VarSet.mem_1 in_2).
rewrite (sep_mem_s in_2 sep).
destruct (IHG _ _ sat_r2_rest) as [r1' [r2' [r1'r2'_r2 [r1'_U1 r2'_U2]]]].
exists r1'. exists (r1 :*: r2'). intuition.
rewrite combine_assoc. rewrite (combine_symm r1' r1). rewrite <- combine_assoc.
eapply leq_trans.
apply combine_order.
apply leq_refl. reflexivity.
apply r1'r2'_r2.
apply r1r2_r.
exists r1. exists r2'. intuition.
(* we didn't originally include this position *)
destruct (not_in_union isnt_in) as [not_in_1 not_in_2].
rewrite (not_in_mem_false not_in_1).
rewrite (not_in_mem_false not_in_2).
rewrite (not_in_mem_false isnt_in) in H.
auto.
Save.
Implicit Arguments sat_split [r G m U1 U2].
Lemma shift_step_union : forall S S2 a,
VarSet.Equal (shift_step a (VarSet.union S2 S)) (VarSet.union (shift_step a S2) S).
intros. destruct a; simpl; auto with set.
Save.
Lemma shift_step_morphism : forall a S1 S2,
VarSet.Equal S1 S2 ->
VarSet.Equal (shift_step a S1) (shift_step a S2).
intros. destruct a.
assumption.
simpl. setoid_replace S1 with S2.
reflexivity.
assumption.
Save.
Definition fold_left_shift := fold_left (fun a b => shift_step b a).
Add Morphism fold_left_shift with signature Logic.eq ==> VarSet.Equal ==> VarSet.Equal as fold_left_morphism.
intro l. induction l; intros; simpl.
assumption.
apply IHl. apply shift_step_morphism. assumption.
Save.
Lemma fold_shift_step_union : forall l S S2,
VarSet.Equal (fold_left (fun a b => shift_step b a) l (VarSet.union S2 S))
(VarSet.union (fold_left (fun a b => shift_step b a) l S2) S).
intros l. induction l; intros; simpl.
unfold VarSet.Equal. split; intro; assumption.
setoid_rewrite <- IHl. apply fold_left_morphism. auto. apply shift_step_union.
Save.
Lemma fold_shift_step_only_add : forall m l S,
VarSet.In m (fold_left (fun a b => shift_step b a) l S) ->
~(VarSet.In m S) ->
VarSet.In m (fold_left (fun a b => shift_step b a) l VarSet.empty).
intros m l S is_in m_nin_S.
fold fold_left_shift in is_in.
setoid_replace S with (VarSet.union VarSet.empty S) in is_in; [|auto with set].
rewrite fold_shift_step_union in is_in.
destruct (VarSet.union_1 is_in).
assumption.
contradiction.
Save.
Lemma shift_prop : forall m U,
VarSet.In m (shift_down U) ->
VarSet.In (S m) U.
intros. unfold shift_down in H.
rewrite VarSet.fold_1 in H.
apply VarSet.elements_2.
induction (VarSet.elements U).
simpl in H. elimtype False. apply (VarSet.empty_1 H).
simpl in H. destruct (nat_dec a) as [is_0 | [x a_is_Sx]].
subst. auto.
subst. destruct (Peano_dec.eq_nat_dec x m).
subst. apply InA_cons_hd. reflexivity.
apply InA_cons_tl. apply IHl. eapply fold_shift_step_only_add.
apply H.
simpl. rewrite <- SYN.VarSetProps.singleton_equal_add. unfold not. intros. apply n. apply VarSet.singleton_1. assumption.
Save.
Lemma shift_prop_2 : forall m U,
VarSet.In (S m) U ->
VarSet.In m (shift_down U).
intros. unfold shift_down.
rewrite VarSet.fold_1.
set (H':=VarSet.elements_1 H). generalize H'. clear H H'. intro H.
induction (VarSet.elements U).
inversion H.
inversion H; subst.
simpl. destruct (nat_dec a) as [is_0 | [x a_is_Sx]]; subst.
discriminate.
simpl. setoid_replace (fold_left (fun a b => shift_step b a) l (VarSet.add x VarSet.empty))
with (fold_left (fun a b => shift_step b a) l (VarSet.union VarSet.empty (VarSet.singleton x))).
fold fold_left_shift.
rewrite fold_shift_step_union.
apply VarSet.union_3. apply VarSet.singleton_2. apply eq_add_S. symmetry. apply H1.
apply fold_left_morphism. reflexivity. rewrite <- SYN.VarSetProps.singleton_equal_add. rewrite SYN.VarSetProps.empty_union_1; auto with set.
simpl. destruct (nat_dec a) as [is_0 | [x a_is_Sx]]; subst.
simpl. apply IHl. assumption.
simpl. setoid_replace (fold_left (fun a b => shift_step b a) l (VarSet.add x VarSet.empty))
with (fold_left (fun a b => shift_step b a) l (VarSet.union VarSet.empty (VarSet.singleton x))).
fold fold_left_shift.
rewrite fold_shift_step_union.
apply VarSet.union_2. apply IHl. apply H1.
apply fold_left_morphism. reflexivity. rewrite <- SYN.VarSetProps.singleton_equal_add. rewrite SYN.VarSetProps.empty_union_1; auto with set.
Save.
Lemma sat_shift_aux : forall r m G U,
sat r (context_to_formula G m (shift_down U)) ->
sat r (context_to_formula G (S m) U).
intros. generalize r m H. clear r m H. induction G; intros.
simpl in *. trivial.
simpl in *. destruct (SYN.VarSetProps.In_dec m (shift_down U)).
(* it was in the shifted down one *)
rewrite (VarSet.mem_1 i) in H. rewrite (VarSet.mem_1 (shift_prop _ _ i)).
simpl in H. destruct H as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
exists r1. exists r2. intuition.
(* it wasn't in the shifted down one *)
rewrite (not_in_mem_false n) in H.
destruct (SYN.VarSetProps.In_dec (S m) U) as [is_in | isnt_in].
elimtype False. apply n. apply shift_prop_2. apply is_in.
rewrite (not_in_mem_false isnt_in). auto.
Save.
Lemma sat_shift : forall r1 r2 G U A,
sat r2 (context_to_formula G 0 (shift_down U)) ->
sat r1 A ->
sat (r1 :*: r2) (context_to_formula (A::G) 0 U).
intros. simpl. destruct (SYN.VarSetProps.In_dec 0%nat U) as [has_0 | no_0].
(* 0 was in U *)
rewrite (VarSet.mem_1 has_0). exists r1. exists r2. intuition.
apply sat_shift_aux. assumption.
(* 0 wasn't in U *)
rewrite (not_in_mem_false no_0). eapply sat_leq.
apply sat_shift_aux. apply H.
rewrite <- (e_combine_r r2). apply combine_order.
apply e_bottom.
rewrite e_combine_r. apply leq_refl. reflexivity.
Save.
Lemma notin_subset : forall m S1 S2,
~(VarSet.In m S2) ->
VarSet.Subset S1 S2 ->
~(VarSet.In m S1).
intros. destruct (SYN.VarSetProps.In_dec m S1).
elimtype False. auto.
apply n.
Save.
Implicit Arguments notin_subset [m S1 S2].
Lemma sat_subset : forall r m G U1 U2,
VarSet.Subset U2 U1 ->
sat r (context_to_formula G m U1) ->
sat r (context_to_formula G m U2).
intros r m G U1 U2 U2_ss_U1.
generalize r m. clear r m. induction G; intros.
(* base case *)
trivial.
(* step case *)
simpl in *. destruct (SYN.VarSetProps.In_dec m U1) as [in_U1 | notin_U1].
(* is in U1 *)
rewrite (VarSet.mem_1 in_U1) in H. simpl in H.
destruct H as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
destruct (SYN.VarSetProps.In_dec m U2) as [in_U2 | notin_U2].
(* is in U2 *)
rewrite (VarSet.mem_1 in_U2). exists r1. exists r2. intuition.
(* not in U2 *)
rewrite (not_in_mem_false notin_U2). eapply sat_leq.
apply IHG. apply sat_r2.
rewrite <- (e_combine_r r2).
eapply leq_trans.
apply combine_order. apply e_bottom. apply leq_refl. reflexivity.
apply r1r2_r.
(* not in U1 *)
rewrite (not_in_mem_false notin_U1) in H.
rewrite (not_in_mem_false (notin_subset notin_U1 U2_ss_U1)).
auto.
Save.
Lemma get_from_icontext : forall r G n A,
nth_error G n = Some A ->
sat r (f_bang (icontext_to_formula G)) ->
sat r A.
intros r G n A. generalize r n. clear r n.
induction G; intros.
(* empty list *)
destruct n; discriminate.
(* cons *)
simpl in *.
destruct H0 as [r' [r'_r1 [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]]]].
destruct (nat_dec n) as [n_is_0 | [x n_is_Sx]]; subst; simpl in *.
(* found here *)
inversion H. subst.
eapply sat_leq.
apply sat_r1.
eapply leq_trans.
eapply right_weaken. apply r1r2_r.
eapply leq_trans. apply bang_unit. assumption.
(* not found here *)
eapply sat_leq.
eapply IHG.
apply H.
exists r2. intuition.
apply bang_order_Proper. eapply left_weaken. apply r1r2_r.
apply r'_r1.
Save.
Lemma context_empty : forall U G m,
VarSet.Empty U ->
context_to_formula G m U = f_i.
intros U G m U_empty. generalize m. clear m. induction G; intros.
reflexivity.
simpl. rewrite (not_in_mem_false (U_empty m)). auto.
Save.
Lemma combine_resources : forall r1 r2 rI rL r' r,
rI :*: rL <: r ->
!r' <: rI ->
r1 :*: r2 <: rL ->
!r' :*: r1 :*: (!r' :*: r2) <: r.
intros r1 r2 rI rL r' r rIrL_r r'_rI r1r2_r.
rewrite (combine_symm (!r') r1).
rewrite <- combine_assoc.
rewrite (combine_assoc (!r') (!r')).
rewrite combine_symm. rewrite <- combine_assoc.
eapply leq_trans.
apply combine_order.
apply bang_codup.
rewrite combine_symm at 1. apply r1r2_r.
eapply leq_trans.
apply combine_order. apply r'_rI. apply leq_refl. reflexivity.
apply rIrL_r.
Save.
Lemma soundness : forall r Gi G t A U,
prf Gi G t A U ->
sat r (f_tensor (f_bang (icontext_to_formula Gi)) (context_to_formula G 0 U)) ->
sat r A.
intros r Gi G t A U prf_deriv. generalize r. clear r.
induction prf_deriv; intros.
(* prf_ivar *)
destruct H1 as [rI [rL [rIrL_r [sat_I sat_L]]]].
eapply sat_leq.
eapply get_from_icontext.
apply H.
simpl. apply sat_I.
eapply right_weaken. apply rIrL_r.
(* prf_lvar *)
destruct H1 as [rI [rL [rIrL_r [sat_I sat_L]]]].
eapply sat_leq.
eapply sat_singleton.
replace n with (n+0) in H0. rewrite H0 in sat_L. apply sat_L. omega.
apply H.
eapply left_weaken. apply rIrL_r.
(* prf_i_intro *)
simpl. trivial.
(* prf_tensor_intro *)
destruct H1 as [rI [rL [rIrL_r [sat_I sat_L]]]]. fold sat in *.
destruct sat_I as [r' [r'_rI sat_r']].
rewrite H0 in sat_L.
destruct (sat_split H sat_L) as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
exists (!r' :*: r1). exists (!r' :*: r2). intuition.
eapply combine_resources; eauto.
apply IHprf_deriv1. exists (!r'). exists r1. intuition.
exists r'. intuition.
apply IHprf_deriv2. exists (!r'). exists r2. intuition.
exists r'. intuition.
(* prf_tensor_elim *)
apply IHprf_deriv2.
destruct H1 as [rI [rL [rIrL_r [sat_rI sat_rL]]]]. fold sat in *.
rewrite H0 in sat_rL.
destruct sat_rI as [r' [r'_rI sat_r']].
destruct (sat_split H sat_rL) as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
simpl in IHprf_deriv1. destruct (IHprf_deriv1 (!r' :*: r1)) as [rA [rB [rArB_r1 [sat_rA sat_rB]]]].
exists (!r'). exists r1. intuition.
exists r'. intuition.
exists (!r'). exists (rA :*: (rB :*: r2)). intuition.
rewrite (combine_assoc rA rB r2). rewrite combine_assoc. eapply leq_trans.
apply combine_order. eapply leq_trans.
apply combine_order.
apply leq_refl. reflexivity.
apply rArB_r1.
rewrite combine_assoc at 1. apply combine_order.
apply bang_codup.
apply leq_refl. reflexivity.
apply leq_refl. reflexivity.
rewrite <- combine_assoc. eapply leq_trans.
apply combine_order. apply r'_rI. apply r1r2_r.
apply rIrL_r.
exists r'. intuition.
apply sat_shift.
apply sat_shift; assumption.
assumption.
(* prf_and_intro *)
destruct H0 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
rewrite H in sat_rL.
simpl. split.
apply IHprf_deriv1. exists rI. exists rL. intuition. eapply sat_subset. eapply SYN.VarSetProps.union_subset_1. apply sat_rL.
apply IHprf_deriv2. exists rI. exists rL. intuition. eapply sat_subset. eapply SYN.VarSetProps.union_subset_2. apply sat_rL.
(* prf_and_elim_1 *)
simpl in *. destruct (IHprf_deriv r H). assumption.
(* prf_and_elim_2 *)
simpl in *. destruct (IHprf_deriv r H). assumption.
(* prf_lolli_intro *)
destruct H0 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
rewrite H in sat_rL.
simpl. intros.
apply IHprf_deriv. exists rI. exists (r' :*: rL). intuition.
rewrite (combine_symm r' rL). rewrite combine_assoc. apply combine_order.
assumption.
apply leq_refl. reflexivity.
apply sat_shift; assumption.
(* prf_lolli_elim *)
rewrite H0 in H1.
destruct H1 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
destruct sat_rI as [r' [r'_rI sat_r']].
destruct (sat_split H sat_rL) as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
eapply sat_leq.
simpl in IHprf_deriv1. eapply IHprf_deriv1.
exists (!r'). exists r1. intuition.
apply leq_refl. reflexivity.
exists r'. intuition.
apply IHprf_deriv2. exists (!r'). exists r2. intuition.
apply leq_refl. reflexivity.
exists r'. intuition.
eapply combine_resources; eauto.
(* prf_bang_intro *)
rewrite context_empty in IHprf_deriv; [|assumption].
destruct H0 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
destruct sat_rI as [r' [r'_rI sat_r']].
exists (!r'). split.
eapply leq_trans.
apply bang_mult.
rewrite <- (r_combine_e (!r')). eapply leq_trans.
apply combine_order. apply r'_rI. apply e_bottom.
apply rIrL_r.
apply IHprf_deriv. exists (!r'). exists e. intuition.
rewrite r_combine_e. apply leq_refl. reflexivity.
exists r'. intuition.
simpl. trivial.
(* prf_bang_elim *)
apply IHprf_deriv2.
rewrite H0 in H1.
destruct H1 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
destruct sat_rI as [r' [r'_rI sat_r']].
destruct (sat_split H sat_rL) as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
simpl in IHprf_deriv1. destruct (IHprf_deriv1 (!r' :*: r1)) as [rA [rA_r sat_rA]].
exists (!r'). exists r1. intuition.
exists r'. intuition.
exists (!(rA :*: r')). exists r2. intuition.
rewrite bang_combine. rewrite <- combine_assoc. eapply leq_trans.
apply combine_order.
apply rA_r. apply leq_refl. reflexivity.
eapply combine_resources; eauto.
exists (rA :*: r'). intuition.
exists rA. exists r'. intuition.
(* prf_axiom *)
apply axioms_sound. apply IHprf_deriv. apply H.
(* prf_let *)
rewrite H in *. rewrite H1 in H2.
destruct H2 as [rI [rL [rIrL_r [sat_rI sat_rL]]]].
destruct sat_rI as [r' [r'_rI sat_r']].
destruct (sat_split H0 sat_rL) as [r1 [r2 [r1r2_r [sat_r1 sat_r2]]]].
eapply sat_leq.
apply IHprf_deriv2.
exists (!r'). exists (!r':*:rL). intuition.
apply leq_refl. reflexivity.
exists r'. intuition.
apply sat_leq with (r1:=(!r' :*: r1) :*: r2).
apply sat_shift. assumption.
apply IHprf_deriv1.
exists (!r'). exists r1. intuition.
exists r'. intuition.
rewrite <- combine_assoc. apply combine_order.
apply leq_refl. reflexivity.
assumption.
rewrite combine_assoc.
eapply leq_trans. apply combine_order. eapply leq_trans. apply bang_codup. apply r'_rI.
apply leq_refl. reflexivity.
assumption.
Save.
Lemma single_soundness : forall r A B t U,
sat r A ->
prf nil (A::nil) t B U ->
sat r B.
intros r A B t U sat_r_A prf_A_B. eapply soundness.
apply prf_A_B.
simpl.
exists e. exists r. intuition.
rewrite e_combine_r. apply leq_refl. reflexivity.
exists e. intuition. apply leq_refl. apply bang_e.
destruct (SYN.VarSetProps.In_dec 0%nat U) as [in_U | notin_U].
(* the evidence was used by the proof *)
rewrite (VarSet.mem_1 in_U).
exists r. exists e. simpl. intuition.
rewrite r_combine_e. apply leq_refl. reflexivity.
(* the evidence was not used by the proof *)
rewrite (not_in_mem_false notin_U). simpl. trivial.
Save.
Lemma implies_soundness : forall r A B,
sat r A ->
implies A B ->
sat r B.
intros r A B r_A A_implies_B.
destruct A_implies_B as [t [U prf_ok]].
eapply single_soundness; eassumption.
Save.
Lemma res_formula : forall re,
sat (res_parse re) (resexpr_to_formula re).
Proof.
unfold resexpr_to_formula.
intro.
assert (sat e f_i) by (simpl;trivial).
apply sat_leq with (r1:=e:*:res_parse re).
generalize e f_i H. clear H.
induction re.
intros. unfold res_parse. simpl. eapply sat_leq. apply H. rewrite r_combine_e. apply leq_refl. reflexivity.
intros. unfold r_may. destruct a as [[|] c].
simpl. destruct (r_new c) as [rc|] _eqn: req. apply sat_leq with (r1:=(r :*: !rc) :*: res_parse re).
apply IHre. simpl. exists r. exists (!rc). repeat split.
apply leq_refl. reflexivity.
assumption.
destruct (r_new_match _ _ req) as [a [req' aeq]]. rewrite req'. simpl. exists rc. split.
apply leq_refl. reflexivity.
apply RA.leq_refl. apply RA.eq_symm. rewrite aeq. reflexivity.
apply leq_refl. apply eq_symm. apply combine_assoc.
rewrite (r_new_empty _ req). apply IHre. simpl. exists r. exists e. intuition.
rewrite r_combine_e. apply leq_refl. reflexivity.
simpl. destruct (r_new c) as [rc|] _eqn: req.
apply sat_leq with (r1:=(r :*: rc) :*: res_parse re).
apply IHre. simpl. exists r. exists rc. intuition.
destruct (r_new_match _ _ req) as [a [req' aeq]]. rewrite req'. simpl. rewrite aeq. apply leq_refl. reflexivity.
apply leq_refl. apply eq_symm. apply combine_assoc.
apply IHre. simpl. exists r. exists e. intuition.
apply leq_refl. rewrite r_combine_e. reflexivity.
rewrite (r_new_empty _ req). simpl. trivial.
apply leq_refl. rewrite e_combine_r. reflexivity.
Qed.
End MkILLSemantics.
(*
Local Variables:
coq-prog-args: ("-emacs-U" "-I" ".." "-R" "." "ILL")
End:
*)
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj16eqsynthconj4 : forall (lv0 : natural), (@eq natural (Succ lv0) (plus (Succ Zero) lv0)).
Admitted.
QuickChick conj16eqsynthconj4.
|
State Before: R : Type u
S : Type v
a b : R
n✝ m : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
n : ℕ
hp : p ≠ 0
⊢ trailingDegree p = ↑n ↔ natTrailingDegree p = n State After: R : Type u
S : Type v
a b : R
n✝ m : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
n : ℕ
hp : p ≠ 0
⊢ ↑(natTrailingDegree p) = ↑n ↔ natTrailingDegree p = n Tactic: rw [trailingDegree_eq_natTrailingDegree hp] State Before: R : Type u
S : Type v
a b : R
n✝ m : ℕ
inst✝ : Semiring R
p✝ q r p : R[X]
n : ℕ
hp : p ≠ 0
⊢ ↑(natTrailingDegree p) = ↑n ↔ natTrailingDegree p = n State After: no goals Tactic: exact WithTop.coe_eq_coe |
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
! This file was ported from Lean 3 source module algebra.big_operators.multiset.basic
! leanprover-community/mathlib commit 6c5f73fd6f6cc83122788a80a27cdd54663609f4
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.List.BigOperators.Basic
import Mathbin.Data.Multiset.Basic
/-!
# Sums and products over multisets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define products and sums indexed by multisets. This is later used to define products
and sums indexed by finite sets.
## Main declarations
* `multiset.prod`: `s.prod f` is the product of `f i` over all `i ∈ s`. Not to be mistaken with
the cartesian product `multiset.product`.
* `multiset.sum`: `s.sum f` is the sum of `f i` over all `i ∈ s`.
## Implementation notes
Nov 2022: To speed the Lean 4 port, lemmas requiring extra algebra imports
(`data.list.big_operators.lemmas` rather than `.basic`) have been moved to a separate file,
`algebra.big_operators.multiset.lemmas`. This split does not need to be permanent.
-/
variable {ι α β γ : Type _}
namespace Multiset
section CommMonoid
variable [CommMonoid α] {s t : Multiset α} {a : α} {m : Multiset ι} {f g : ι → α}
#print Multiset.prod /-
/-- Product of a multiset given a commutative monoid structure on `α`.
`prod {a, b, c} = a * b * c` -/
@[to_additive
"Sum of a multiset given a commutative additive monoid structure on `α`.\n `sum {a, b, c} = a + b + c`"]
def prod : Multiset α → α :=
foldr (· * ·) (fun x y z => by simp [mul_left_comm]) 1
#align multiset.prod Multiset.prod
#align multiset.sum Multiset.sum
-/
/- warning: multiset.prod_eq_foldr -> Multiset.prod_eq_foldr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (Multiset.foldr.{u1, u1} α α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))) (fun (x : α) (y : α) (z : α) => Eq.mpr.{0} (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z))) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z))) True) (Eq.trans.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z))) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z))) True ((fun (a : α) (a_1 : α) (e_1 : Eq.{succ u1} α a a_1) (ᾰ : α) (ᾰ_1 : α) (e_2 : Eq.{succ u1} α ᾰ ᾰ_1) => congr.{succ u1, 1} α Prop (Eq.{succ u1} α a) (Eq.{succ u1} α a_1) ᾰ ᾰ_1 (congr_arg.{succ u1, succ u1} α (α -> Prop) a a_1 (Eq.{succ u1} α) e_1) e_2) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (rfl.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (mul_left_comm.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1) y x z)) (propext (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z))) True (eq_self_iff_true.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)))))) trivial) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))) s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (Multiset.foldr.{u1, u1} α α (fun ([email protected]._hyg.129 : α) ([email protected]._hyg.131 : α) => HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) [email protected]._hyg.129 [email protected]._hyg.131) (fun (x : α) (y : α) (z : α) => of_eq_true (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z))) (Eq.trans.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z))) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) y z))) True (congrArg.{succ u1, 1} α Prop (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) y (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) y z)) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z))) (mul_left_comm.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1) y x z)) (eq_self.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) y z))))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) s)
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_foldr Multiset.prod_eq_foldrₓ'. -/
@[to_additive]
theorem prod_eq_foldr (s : Multiset α) :
prod s = foldr (· * ·) (fun x y z => by simp [mul_left_comm]) 1 s :=
rfl
#align multiset.prod_eq_foldr Multiset.prod_eq_foldr
#align multiset.sum_eq_foldr Multiset.sum_eq_foldr
/- warning: multiset.prod_eq_foldl -> Multiset.prod_eq_foldl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (Multiset.foldl.{u1, u1} α α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))) (fun (x : α) (y : α) (z : α) => Eq.mpr.{0} (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z) y)) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z) y)) True) (Eq.trans.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z) y)) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z)) True ((fun (a : α) (a_1 : α) (e_1 : Eq.{succ u1} α a a_1) (ᾰ : α) (ᾰ_1 : α) (e_2 : Eq.{succ u1} α ᾰ ᾰ_1) => congr.{succ u1, 1} α Prop (Eq.{succ u1} α a) (Eq.{succ u1} α a_1) ᾰ ᾰ_1 (congr_arg.{succ u1, succ u1} α (α -> Prop) a a_1 (Eq.{succ u1} α) e_1) e_2) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (rfl.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x z) y) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (mul_right_comm.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1) x z y)) (propext (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z)) True (eq_self_iff_true.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z))))) trivial) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))) s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (Multiset.foldl.{u1, u1} α α (fun ([email protected]._hyg.189 : α) ([email protected]._hyg.191 : α) => HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) [email protected]._hyg.189 [email protected]._hyg.191) (fun (x : α) (y : α) (z : α) => of_eq_true (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z) y)) (Eq.trans.{1} Prop (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z) y)) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x y) z)) True (congrArg.{succ u1, 1} α Prop (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x z) y) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (Semigroup.toMul.{u1} α (CommSemigroup.toSemigroup.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1)))) x y) z) (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z)) (mul_right_comm.{u1} α (CommMonoid.toCommSemigroup.{u1} α _inst_1) x z y)) (eq_self.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) x y) z)))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) s)
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_foldl Multiset.prod_eq_foldlₓ'. -/
@[to_additive]
theorem prod_eq_foldl (s : Multiset α) :
prod s = foldl (· * ·) (fun x y z => by simp [mul_right_comm]) 1 s :=
(foldr_swap _ _ _ _).trans (by simp [mul_comm])
#align multiset.prod_eq_foldl Multiset.prod_eq_foldl
#align multiset.sum_eq_foldl Multiset.sum_eq_foldl
/- warning: multiset.coe_prod -> Multiset.coe_prod is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (l : List.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (List.{u1} α) (Multiset.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (List.{u1} α) (Multiset.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (List.{u1} α) (Multiset.{u1} α) (coeBase.{succ u1, succ u1} (List.{u1} α) (Multiset.{u1} α) (Multiset.hasCoe.{u1} α)))) l)) (List.prod.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) l)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (l : List.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.ofList.{u1} α l)) (List.prod.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) l)
Case conversion may be inaccurate. Consider using '#align multiset.coe_prod Multiset.coe_prodₓ'. -/
@[simp, norm_cast, to_additive]
theorem coe_prod (l : List α) : prod ↑l = l.Prod :=
prod_eq_foldl _
#align multiset.coe_prod Multiset.coe_prod
#align multiset.coe_sum Multiset.coe_sum
/- warning: multiset.prod_to_list -> Multiset.prod_toList is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (List.prod.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Multiset.toList.{u1} α s)) (Multiset.prod.{u1} α _inst_1 s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α), Eq.{succ u1} α (List.prod.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Multiset.toList.{u1} α s)) (Multiset.prod.{u1} α _inst_1 s)
Case conversion may be inaccurate. Consider using '#align multiset.prod_to_list Multiset.prod_toListₓ'. -/
@[simp, to_additive]
theorem prod_toList (s : Multiset α) : s.toList.Prod = s.Prod :=
by
conv_rhs => rw [← coe_to_list s]
rw [coe_prod]
#align multiset.prod_to_list Multiset.prod_toList
#align multiset.sum_to_list Multiset.sum_toList
/- warning: multiset.prod_zero -> Multiset.prod_zero is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α], Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (OfNat.mk.{u1} (Multiset.{u1} α) 0 (Zero.zero.{u1} (Multiset.{u1} α) (Multiset.hasZero.{u1} α))))) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α], Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (Zero.toOfNat0.{u1} (Multiset.{u1} α) (Multiset.instZeroMultiset.{u1} α)))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_zero Multiset.prod_zeroₓ'. -/
@[simp, to_additive]
theorem prod_zero : @prod α _ 0 = 1 :=
rfl
#align multiset.prod_zero Multiset.prod_zero
#align multiset.sum_zero Multiset.sum_zero
/- warning: multiset.prod_cons -> Multiset.prod_cons is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (a : α) (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.cons.{u1} α a s)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (a : α) (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.cons.{u1} α a s)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a (Multiset.prod.{u1} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_cons Multiset.prod_consₓ'. -/
@[simp, to_additive]
theorem prod_cons (a : α) (s) : prod (a ::ₘ s) = a * prod s :=
foldr_cons _ _ _ _ _
#align multiset.prod_cons Multiset.prod_cons
#align multiset.sum_cons Multiset.sum_cons
/- warning: multiset.prod_erase -> Multiset.prod_erase is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} {a : α} [_inst_2 : DecidableEq.{succ u1} α], (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a s) -> (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a (Multiset.prod.{u1} α _inst_1 (Multiset.erase.{u1} α (fun (a : α) (b : α) => _inst_2 a b) s a))) (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} {a : α} [_inst_2 : DecidableEq.{succ u1} α], (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) a s) -> (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a (Multiset.prod.{u1} α _inst_1 (Multiset.erase.{u1} α (fun (a : α) (b : α) => _inst_2 a b) s a))) (Multiset.prod.{u1} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_erase Multiset.prod_eraseₓ'. -/
@[simp, to_additive]
theorem prod_erase [DecidableEq α] (h : a ∈ s) : a * (s.eraseₓ a).Prod = s.Prod := by
rw [← s.coe_to_list, coe_erase, coe_prod, coe_prod, List.prod_erase (mem_to_list.2 h)]
#align multiset.prod_erase Multiset.prod_erase
#align multiset.sum_erase Multiset.sum_erase
/- warning: multiset.prod_map_erase -> Multiset.prod_map_erase is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} [_inst_2 : DecidableEq.{succ u1} ι] {a : ι}, (Membership.Mem.{u1, u1} ι (Multiset.{u1} ι) (Multiset.hasMem.{u1} ι) a m) -> (Eq.{succ u2} α (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (f a) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f (Multiset.erase.{u1} ι (fun (a : ι) (b : ι) => _inst_2 a b) m a)))) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f m)))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {m : Multiset.{u2} ι} {f : ι -> α} [_inst_2 : DecidableEq.{succ u2} ι] {a : ι}, (Membership.mem.{u2, u2} ι (Multiset.{u2} ι) (Multiset.instMembershipMultiset.{u2} ι) a m) -> (Eq.{succ u1} α (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (f a) (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} ι α f (Multiset.erase.{u2} ι (fun (a : ι) (b : ι) => _inst_2 a b) m a)))) (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} ι α f m)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_erase Multiset.prod_map_eraseₓ'. -/
@[simp, to_additive]
theorem prod_map_erase [DecidableEq ι] {a : ι} (h : a ∈ m) :
f a * ((m.eraseₓ a).map f).Prod = (m.map f).Prod := by
rw [← m.coe_to_list, coe_erase, coe_map, coe_map, coe_prod, coe_prod,
List.prod_map_erase f (mem_to_list.2 h)]
#align multiset.prod_map_erase Multiset.prod_map_erase
#align multiset.sum_map_erase Multiset.sum_map_erase
#print Multiset.prod_singleton /-
@[simp, to_additive]
theorem prod_singleton (a : α) : prod {a} = a := by
simp only [mul_one, prod_cons, ← cons_zero, eq_self_iff_true, prod_zero]
#align multiset.prod_singleton Multiset.prod_singleton
#align multiset.sum_singleton Multiset.sum_singleton
-/
/- warning: multiset.prod_pair -> Multiset.prod_pair is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (a : α) (b : α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Insert.insert.{u1, u1} α (Multiset.{u1} α) (Multiset.hasInsert.{u1} α) a (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.hasSingleton.{u1} α) b))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (a : α) (b : α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Insert.insert.{u1, u1} α (Multiset.{u1} α) (Multiset.instInsertMultiset.{u1} α) a (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.instSingletonMultiset.{u1} α) b))) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)
Case conversion may be inaccurate. Consider using '#align multiset.prod_pair Multiset.prod_pairₓ'. -/
@[to_additive]
theorem prod_pair (a b : α) : ({a, b} : Multiset α).Prod = a * b := by
rw [insert_eq_cons, prod_cons, prod_singleton]
#align multiset.prod_pair Multiset.prod_pair
#align multiset.sum_pair Multiset.sum_pair
/- warning: multiset.prod_add -> Multiset.prod_add is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α) (t : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.hasAdd.{u1} α)) s t)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (Multiset.prod.{u1} α _inst_1 s) (Multiset.prod.{u1} α _inst_1 t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (s : Multiset.{u1} α) (t : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.instAddMultiset.{u1} α)) s t)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (Multiset.prod.{u1} α _inst_1 s) (Multiset.prod.{u1} α _inst_1 t))
Case conversion may be inaccurate. Consider using '#align multiset.prod_add Multiset.prod_addₓ'. -/
@[simp, to_additive]
theorem prod_add (s t : Multiset α) : prod (s + t) = prod s * prod t :=
Quotient.induction_on₂ s t fun l₁ l₂ => by simp
#align multiset.prod_add Multiset.prod_add
#align multiset.sum_add Multiset.sum_add
/- warning: multiset.prod_nsmul -> Multiset.prod_nsmul is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (m : Multiset.{u1} α) (n : Nat), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (SMul.smul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) n m)) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Multiset.prod.{u1} α _inst_1 m) n)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (m : Multiset.{u1} α) (n : Nat), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (HSMul.hSMul.{0, u1, u1} Nat (Multiset.{u1} α) (Multiset.{u1} α) (instHSMul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) n m)) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Multiset.prod.{u1} α _inst_1 m) n)
Case conversion may be inaccurate. Consider using '#align multiset.prod_nsmul Multiset.prod_nsmulₓ'. -/
theorem prod_nsmul (m : Multiset α) : ∀ n : ℕ, (n • m).Prod = m.Prod ^ n
| 0 => by
rw [zero_nsmul, pow_zero]
rfl
| n + 1 => by rw [add_nsmul, one_nsmul, pow_add, pow_one, prod_add, prod_nsmul n]
#align multiset.prod_nsmul Multiset.prod_nsmul
#print Multiset.prod_replicate /-
@[simp, to_additive]
theorem prod_replicate (n : ℕ) (a : α) : (replicate n a).Prod = a ^ n := by
simp [replicate, List.prod_replicate]
#align multiset.prod_replicate Multiset.prod_replicate
#align multiset.sum_replicate Multiset.sum_replicate
-/
/- warning: multiset.prod_map_eq_pow_single -> Multiset.prod_map_eq_pow_single is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} [_inst_2 : DecidableEq.{succ u1} ι] (i : ι), (forall (i' : ι), (Ne.{succ u1} ι i' i) -> (Membership.Mem.{u1, u1} ι (Multiset.{u1} ι) (Multiset.hasMem.{u1} ι) i' m) -> (Eq.{succ u2} α (f i') (OfNat.ofNat.{u2} α 1 (OfNat.mk.{u2} α 1 (One.one.{u2} α (MulOneClass.toHasOne.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))))))) -> (Eq.{succ u2} α (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f m)) (HPow.hPow.{u2, 0, u2} α Nat α (instHPow.{u2, 0} α Nat (Monoid.Pow.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (f i) (Multiset.count.{u1} ι (fun (a : ι) (b : ι) => _inst_2 a b) i m)))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {m : Multiset.{u2} ι} {f : ι -> α} [_inst_2 : DecidableEq.{succ u2} ι] (i : ι), (forall (i' : ι), (Ne.{succ u2} ι i' i) -> (Membership.mem.{u2, u2} ι (Multiset.{u2} ι) (Multiset.instMembershipMultiset.{u2} ι) i' m) -> (Eq.{succ u1} α (f i') (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))) -> (Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} ι α f m)) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (f i) (Multiset.count.{u2} ι (fun (a : ι) (b : ι) => _inst_2 a b) i m)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_eq_pow_single Multiset.prod_map_eq_pow_singleₓ'. -/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (i' «expr ≠ » i) -/
@[to_additive]
theorem prod_map_eq_pow_single [DecidableEq ι] (i : ι)
(hf : ∀ (i') (_ : i' ≠ i), i' ∈ m → f i' = 1) : (m.map f).Prod = f i ^ m.count i :=
by
induction' m using Quotient.inductionOn with l
simp [List.prod_map_eq_pow_single i f hf]
#align multiset.prod_map_eq_pow_single Multiset.prod_map_eq_pow_single
#align multiset.sum_map_eq_nsmul_single Multiset.sum_map_eq_nsmul_single
/- warning: multiset.prod_eq_pow_single -> Multiset.prod_eq_pow_single is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} [_inst_2 : DecidableEq.{succ u1} α] (a : α), (forall (a' : α), (Ne.{succ u1} α a' a) -> (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a' s) -> (Eq.{succ u1} α a' (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))))) -> (Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) a (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_2 a b) a s)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} [_inst_2 : DecidableEq.{succ u1} α] (a : α), (forall (a' : α), (Ne.{succ u1} α a' a) -> (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) a' s) -> (Eq.{succ u1} α a' (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))) -> (Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 s) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) a (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_2 a b) a s)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_pow_single Multiset.prod_eq_pow_singleₓ'. -/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (a' «expr ≠ » a) -/
@[to_additive]
theorem prod_eq_pow_single [DecidableEq α] (a : α) (h : ∀ (a') (_ : a' ≠ a), a' ∈ s → a' = 1) :
s.Prod = a ^ s.count a :=
by
induction' s using Quotient.inductionOn with l
simp [List.prod_eq_pow_single a h]
#align multiset.prod_eq_pow_single Multiset.prod_eq_pow_single
#align multiset.sum_eq_nsmul_single Multiset.sum_eq_nsmul_single
#print Multiset.pow_count /-
@[to_additive]
theorem pow_count [DecidableEq α] (a : α) : a ^ s.count a = (s.filterₓ (Eq a)).Prod := by
rw [filter_eq, prod_replicate]
#align multiset.pow_count Multiset.pow_count
#align multiset.nsmul_count Multiset.nsmul_count
-/
/- warning: multiset.prod_hom -> Multiset.prod_hom is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u2} β] (s : Multiset.{u1} α) {F : Type.{u3}} [_inst_3 : MonoidHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))] (f : F), Eq.{succ u2} β (Multiset.prod.{u2} β _inst_2 (Multiset.map.{u1, u2} α β (coeFn.{succ u3, max (succ u1) (succ u2)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u3, succ u1, succ u2} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u3, u1, u2} F α β (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3))) f) s)) (coeFn.{succ u3, max (succ u1) (succ u2)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u3, succ u1, succ u2} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u3, u1, u2} F α β (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3))) f (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : CommMonoid.{u3} β] (s : Multiset.{u2} α) {F : Type.{u1}} [_inst_3 : MonoidHomClass.{u1, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))] (f : F), Eq.{succ u3} β (Multiset.prod.{u3} β _inst_2 (Multiset.map.{u2, u3} α β (FunLike.coe.{succ u1, succ u2, succ u3} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u1, u2, u3} F α β (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))) (MonoidHomClass.toMulHomClass.{u1, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)) _inst_3)) f) s)) (FunLike.coe.{succ u1, succ u2, succ u3} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u1, u2, u3} F α β (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))) (MonoidHomClass.toMulHomClass.{u1, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)) _inst_3)) f (Multiset.prod.{u2} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_hom Multiset.prod_homₓ'. -/
@[to_additive]
theorem prod_hom [CommMonoid β] (s : Multiset α) {F : Type _} [MonoidHomClass F α β] (f : F) :
(s.map f).Prod = f s.Prod :=
Quotient.inductionOn s fun l => by simp only [l.prod_hom f, quot_mk_to_coe, coe_map, coe_prod]
#align multiset.prod_hom Multiset.prod_hom
#align multiset.sum_hom Multiset.sum_hom
/- warning: multiset.prod_hom' -> Multiset.prod_hom' is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} {β : Type.{u3}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : CommMonoid.{u3} β] (s : Multiset.{u1} ι) {F : Type.{u4}} [_inst_3 : MonoidHomClass.{u4, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))] (f : F) (g : ι -> α), Eq.{succ u3} β (Multiset.prod.{u3} β _inst_2 (Multiset.map.{u1, u3} ι β (fun (i : ι) => coeFn.{succ u4, max (succ u2) (succ u3)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u4, succ u2, succ u3} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u4, u2, u3} F α β (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toHasMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))) (MonoidHomClass.toMulHomClass.{u4, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)) _inst_3))) f (g i)) s)) (coeFn.{succ u4, max (succ u2) (succ u3)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u4, succ u2, succ u3} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u4, u2, u3} F α β (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toHasMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))) (MonoidHomClass.toMulHomClass.{u4, u2, u3} F α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)) _inst_3))) f (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α g s)))
but is expected to have type
forall {ι : Type.{u3}} {α : Type.{u1}} {β : Type.{u4}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u4} β] (s : Multiset.{u3} ι) {F : Type.{u2}} [_inst_3 : MonoidHomClass.{u2, u1, u4} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2))] (f : F) (g : ι -> α), Eq.{succ u4} β (Multiset.prod.{u4} β _inst_2 (Multiset.map.{u3, u4} ι β (fun (i : ι) => FunLike.coe.{succ u2, succ u1, succ u4} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u2, u1, u4} F α β (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toMul.{u4} β (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2))) (MonoidHomClass.toMulHomClass.{u2, u1, u4} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2)) _inst_3)) f (g i)) s)) (FunLike.coe.{succ u2, succ u1, succ u4} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u2, u1, u4} F α β (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toMul.{u4} β (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2))) (MonoidHomClass.toMulHomClass.{u2, u1, u4} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2)) _inst_3)) f (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u3, u1} ι α g s)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_hom' Multiset.prod_hom'ₓ'. -/
@[to_additive]
theorem prod_hom' [CommMonoid β] (s : Multiset ι) {F : Type _} [MonoidHomClass F α β] (f : F)
(g : ι → α) : (s.map fun i => f <| g i).Prod = f (s.map g).Prod :=
by
convert(s.map g).prod_hom f
exact (map_map _ _ _).symm
#align multiset.prod_hom' Multiset.prod_hom'
#align multiset.sum_hom' Multiset.sum_hom'
/- warning: multiset.prod_hom₂ -> Multiset.prod_hom₂ is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u4}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : CommMonoid.{u3} β] [_inst_3 : CommMonoid.{u4} γ] (s : Multiset.{u1} ι) (f : α -> β -> γ), (forall (a : α) (b : α) (c : β) (d : β), Eq.{succ u4} γ (f (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b) (HMul.hMul.{u3, u3, u3} β β β (instHMul.{u3} β (MulOneClass.toHasMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)))) c d)) (HMul.hMul.{u4, u4, u4} γ γ γ (instHMul.{u4} γ (MulOneClass.toHasMul.{u4} γ (Monoid.toMulOneClass.{u4} γ (CommMonoid.toMonoid.{u4} γ _inst_3)))) (f a c) (f b d))) -> (Eq.{succ u4} γ (f (OfNat.ofNat.{u2} α 1 (OfNat.mk.{u2} α 1 (One.one.{u2} α (MulOneClass.toHasOne.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))))) (OfNat.ofNat.{u3} β 1 (OfNat.mk.{u3} β 1 (One.one.{u3} β (MulOneClass.toHasOne.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))))))) (OfNat.ofNat.{u4} γ 1 (OfNat.mk.{u4} γ 1 (One.one.{u4} γ (MulOneClass.toHasOne.{u4} γ (Monoid.toMulOneClass.{u4} γ (CommMonoid.toMonoid.{u4} γ _inst_3))))))) -> (forall (f₁ : ι -> α) (f₂ : ι -> β), Eq.{succ u4} γ (Multiset.prod.{u4} γ _inst_3 (Multiset.map.{u1, u4} ι γ (fun (i : ι) => f (f₁ i) (f₂ i)) s)) (f (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f₁ s)) (Multiset.prod.{u3} β _inst_2 (Multiset.map.{u1, u3} ι β f₂ s))))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} {β : Type.{u4}} {γ : Type.{u3}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u4} β] [_inst_3 : CommMonoid.{u3} γ] (s : Multiset.{u2} ι) (f : α -> β -> γ), (forall (a : α) (b : α) (c : β) (d : β), Eq.{succ u3} γ (f (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b) (HMul.hMul.{u4, u4, u4} β β β (instHMul.{u4} β (MulOneClass.toMul.{u4} β (Monoid.toMulOneClass.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2)))) c d)) (HMul.hMul.{u3, u3, u3} γ γ γ (instHMul.{u3} γ (MulOneClass.toMul.{u3} γ (Monoid.toMulOneClass.{u3} γ (CommMonoid.toMonoid.{u3} γ _inst_3)))) (f a c) (f b d))) -> (Eq.{succ u3} γ (f (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (OfNat.ofNat.{u4} β 1 (One.toOfNat1.{u4} β (Monoid.toOne.{u4} β (CommMonoid.toMonoid.{u4} β _inst_2))))) (OfNat.ofNat.{u3} γ 1 (One.toOfNat1.{u3} γ (Monoid.toOne.{u3} γ (CommMonoid.toMonoid.{u3} γ _inst_3))))) -> (forall (f₁ : ι -> α) (f₂ : ι -> β), Eq.{succ u3} γ (Multiset.prod.{u3} γ _inst_3 (Multiset.map.{u2, u3} ι γ (fun (i : ι) => f (f₁ i) (f₂ i)) s)) (f (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} ι α f₁ s)) (Multiset.prod.{u4} β _inst_2 (Multiset.map.{u2, u4} ι β f₂ s))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_hom₂ Multiset.prod_hom₂ₓ'. -/
@[to_additive]
theorem prod_hom₂ [CommMonoid β] [CommMonoid γ] (s : Multiset ι) (f : α → β → γ)
(hf : ∀ a b c d, f (a * b) (c * d) = f a c * f b d) (hf' : f 1 1 = 1) (f₁ : ι → α)
(f₂ : ι → β) : (s.map fun i => f (f₁ i) (f₂ i)).Prod = f (s.map f₁).Prod (s.map f₂).Prod :=
Quotient.inductionOn s fun l => by
simp only [l.prod_hom₂ f hf hf', quot_mk_to_coe, coe_map, coe_prod]
#align multiset.prod_hom₂ Multiset.prod_hom₂
#align multiset.sum_hom₂ Multiset.sum_hom₂
/- warning: multiset.prod_hom_rel -> Multiset.prod_hom_rel is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} {β : Type.{u3}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : CommMonoid.{u3} β] (s : Multiset.{u1} ι) {r : α -> β -> Prop} {f : ι -> α} {g : ι -> β}, (r (OfNat.ofNat.{u2} α 1 (OfNat.mk.{u2} α 1 (One.one.{u2} α (MulOneClass.toHasOne.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))))) (OfNat.ofNat.{u3} β 1 (OfNat.mk.{u3} β 1 (One.one.{u3} β (MulOneClass.toHasOne.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))))))) -> (forall {{a : ι}} {{b : α}} {{c : β}}, (r b c) -> (r (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (f a) b) (HMul.hMul.{u3, u3, u3} β β β (instHMul.{u3} β (MulOneClass.toHasMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)))) (g a) c))) -> (r (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f s)) (Multiset.prod.{u3} β _inst_2 (Multiset.map.{u1, u3} ι β g s)))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} {β : Type.{u3}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u3} β] (s : Multiset.{u2} ι) {r : α -> β -> Prop} {f : ι -> α} {g : ι -> β}, (r (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (OfNat.ofNat.{u3} β 1 (One.toOfNat1.{u3} β (Monoid.toOne.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2))))) -> (forall {{a : ι}} {{b : α}} {{c : β}}, (r b c) -> (r (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (f a) b) (HMul.hMul.{u3, u3, u3} β β β (instHMul.{u3} β (MulOneClass.toMul.{u3} β (Monoid.toMulOneClass.{u3} β (CommMonoid.toMonoid.{u3} β _inst_2)))) (g a) c))) -> (r (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} ι α f s)) (Multiset.prod.{u3} β _inst_2 (Multiset.map.{u2, u3} ι β g s)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_hom_rel Multiset.prod_hom_relₓ'. -/
@[to_additive]
theorem prod_hom_rel [CommMonoid β] (s : Multiset ι) {r : α → β → Prop} {f : ι → α} {g : ι → β}
(h₁ : r 1 1) (h₂ : ∀ ⦃a b c⦄, r b c → r (f a * b) (g a * c)) :
r (s.map f).Prod (s.map g).Prod :=
Quotient.inductionOn s fun l => by
simp only [l.prod_hom_rel h₁ h₂, quot_mk_to_coe, coe_map, coe_prod]
#align multiset.prod_hom_rel Multiset.prod_hom_rel
#align multiset.sum_hom_rel Multiset.sum_hom_rel
/- warning: multiset.prod_map_one -> Multiset.prod_map_one is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι}, Eq.{succ u2} α (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α (fun (i : ι) => OfNat.ofNat.{u2} α 1 (OfNat.mk.{u2} α 1 (One.one.{u2} α (MulOneClass.toHasOne.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))))) m)) (OfNat.ofNat.{u2} α 1 (OfNat.mk.{u2} α 1 (One.one.{u2} α (MulOneClass.toHasOne.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))))))
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι}, Eq.{succ u2} α (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α (fun (i : ι) => OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) m)) (OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_one Multiset.prod_map_oneₓ'. -/
@[to_additive]
theorem prod_map_one : prod (m.map fun i => (1 : α)) = 1 := by
rw [map_const, prod_replicate, one_pow]
#align multiset.prod_map_one Multiset.prod_map_one
#align multiset.sum_map_zero Multiset.sum_map_zero
/- warning: multiset.prod_map_mul -> Multiset.prod_map_mul is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} {g : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (f i) (g i)) m)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toHasMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f m)) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α g m)))
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : CommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} {g : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (f i) (g i)) m)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α f m)) (Multiset.prod.{u2} α _inst_1 (Multiset.map.{u1, u2} ι α g m)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_mul Multiset.prod_map_mulₓ'. -/
@[simp, to_additive]
theorem prod_map_mul : (m.map fun i => f i * g i).Prod = (m.map f).Prod * (m.map g).Prod :=
m.prod_hom₂ (· * ·) mul_mul_mul_comm (mul_one _) _ _
#align multiset.prod_map_mul Multiset.prod_map_mul
#align multiset.sum_map_add Multiset.sum_map_add
/- warning: multiset.prod_map_neg -> Multiset.prod_map_neg is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : HasDistribNeg.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u1, u1} α α (Neg.neg.{u1} α (InvolutiveNeg.toHasNeg.{u1} α (HasDistribNeg.toHasInvolutiveNeg.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) _inst_2))) s)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Neg.neg.{u1} α (InvolutiveNeg.toHasNeg.{u1} α (HasDistribNeg.toHasInvolutiveNeg.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) _inst_2)) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))) (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (fun (_x : AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) => (Multiset.{u1} α) -> Nat) (AddMonoidHom.hasCoeToFun.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.card.{u1} α) s)) (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : HasDistribNeg.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))] (s : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u1, u1} α α (Neg.neg.{u1} α (InvolutiveNeg.toNeg.{u1} α (HasDistribNeg.toInvolutiveNeg.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) _inst_2))) s)) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) (HPow.hPow.{u1, 0, u1} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) α (instHPow.{u1, 0} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Neg.neg.{u1} α (InvolutiveNeg.toNeg.{u1} α (HasDistribNeg.toInvolutiveNeg.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) _inst_2)) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))) (FunLike.coe.{succ u1, succ u1, 1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) _x) (AddHomClass.toFunLike.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{0} Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoidHomClass.toAddHomClass.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid) (AddMonoidHom.addMonoidHomClass.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))) (Multiset.card.{u1} α) s)) (Multiset.prod.{u1} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_neg Multiset.prod_map_negₓ'. -/
@[simp]
theorem prod_map_neg [HasDistribNeg α] (s : Multiset α) :
(s.map Neg.neg).Prod = (-1) ^ s.card * s.Prod :=
by
refine' Quotient.ind _ s
simp
#align multiset.prod_map_neg Multiset.prod_map_neg
#print Multiset.prod_map_pow /-
@[to_additive]
theorem prod_map_pow {n : ℕ} : (m.map fun i => f i ^ n).Prod = (m.map f).Prod ^ n :=
m.prod_hom' (powMonoidHom n : α →* α) f
#align multiset.prod_map_pow Multiset.prod_map_pow
#align multiset.sum_map_nsmul Multiset.sum_map_nsmul
-/
/- warning: multiset.prod_map_prod_map -> Multiset.prod_map_prod_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : CommMonoid.{u1} α] (m : Multiset.{u2} β) (n : Multiset.{u3} γ) {f : β -> γ -> α}, Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} β α (fun (a : β) => Multiset.prod.{u1} α _inst_1 (Multiset.map.{u3, u1} γ α (fun (b : γ) => f a b) n)) m)) (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u3, u1} γ α (fun (b : γ) => Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} β α (fun (a : β) => f a b) m)) n))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u3}} {γ : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] (m : Multiset.{u3} β) (n : Multiset.{u2} γ) {f : β -> γ -> α}, Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u3, u1} β α (fun (a : β) => Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} γ α (fun (b : γ) => f a b) n)) m)) (Multiset.prod.{u1} α _inst_1 (Multiset.map.{u2, u1} γ α (fun (b : γ) => Multiset.prod.{u1} α _inst_1 (Multiset.map.{u3, u1} β α (fun (a : β) => f a b) m)) n))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_prod_map Multiset.prod_map_prod_mapₓ'. -/
@[to_additive]
theorem prod_map_prod_map (m : Multiset β) (n : Multiset γ) {f : β → γ → α} :
prod (m.map fun a => prod <| n.map fun b => f a b) =
prod (n.map fun b => prod <| m.map fun a => f a b) :=
Multiset.induction_on m (by simp) fun a m ih => by simp [ih]
#align multiset.prod_map_prod_map Multiset.prod_map_prod_map
#align multiset.sum_map_sum_map Multiset.sum_map_sum_map
/- warning: multiset.prod_induction -> Multiset.prod_induction is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (p : α -> Prop) (s : Multiset.{u1} α), (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (p (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))) -> (forall (a : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a s) -> (p a)) -> (p (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (p : α -> Prop) (s : Multiset.{u1} α), (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (p (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))) -> (forall (a : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) a s) -> (p a)) -> (p (Multiset.prod.{u1} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_induction Multiset.prod_inductionₓ'. -/
@[to_additive]
theorem prod_induction (p : α → Prop) (s : Multiset α) (p_mul : ∀ a b, p a → p b → p (a * b))
(p_one : p 1) (p_s : ∀ a ∈ s, p a) : p s.Prod :=
by
rw [prod_eq_foldr]
exact foldr_induction (· * ·) (fun x y z => by simp [mul_left_comm]) 1 p s p_mul p_one p_s
#align multiset.prod_induction Multiset.prod_induction
#align multiset.sum_induction Multiset.sum_induction
/- warning: multiset.prod_induction_nonempty -> Multiset.prod_induction_nonempty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} (p : α -> Prop), (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (Ne.{succ u1} (Multiset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Multiset.{u1} α) (Multiset.hasEmptyc.{u1} α))) -> (forall (a : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a s) -> (p a)) -> (p (Multiset.prod.{u1} α _inst_1 s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} (p : α -> Prop), (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (Ne.{succ u1} (Multiset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Multiset.{u1} α) (Multiset.instEmptyCollectionMultiset.{u1} α))) -> (forall (a : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) a s) -> (p a)) -> (p (Multiset.prod.{u1} α _inst_1 s))
Case conversion may be inaccurate. Consider using '#align multiset.prod_induction_nonempty Multiset.prod_induction_nonemptyₓ'. -/
@[to_additive]
theorem prod_induction_nonempty (p : α → Prop) (p_mul : ∀ a b, p a → p b → p (a * b)) (hs : s ≠ ∅)
(p_s : ∀ a ∈ s, p a) : p s.Prod := by
revert s
refine' Multiset.induction _ _
· intro h
exfalso
simpa using h
intro a s hs hsa hpsa
rw [prod_cons]
by_cases hs_empty : s = ∅
· simp [hs_empty, hpsa a]
have hps : ∀ x, x ∈ s → p x := fun x hxs => hpsa x (mem_cons_of_mem hxs)
exact p_mul a s.prod (hpsa a (mem_cons_self a s)) (hs hs_empty hps)
#align multiset.prod_induction_nonempty Multiset.prod_induction_nonempty
#align multiset.sum_induction_nonempty Multiset.sum_induction_nonempty
/- warning: multiset.prod_dvd_prod_of_le -> Multiset.prod_dvd_prod_of_le is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} {t : Multiset.{u1} α}, (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.partialOrder.{u1} α))) s t) -> (Dvd.Dvd.{u1} α (semigroupDvd.{u1} α (Monoid.toSemigroup.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Multiset.prod.{u1} α _inst_1 s) (Multiset.prod.{u1} α _inst_1 t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {s : Multiset.{u1} α} {t : Multiset.{u1} α}, (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.instPartialOrderMultiset.{u1} α))) s t) -> (Dvd.dvd.{u1} α (semigroupDvd.{u1} α (Monoid.toSemigroup.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (Multiset.prod.{u1} α _inst_1 s) (Multiset.prod.{u1} α _inst_1 t))
Case conversion may be inaccurate. Consider using '#align multiset.prod_dvd_prod_of_le Multiset.prod_dvd_prod_of_leₓ'. -/
theorem prod_dvd_prod_of_le (h : s ≤ t) : s.Prod ∣ t.Prod :=
by
obtain ⟨z, rfl⟩ := exists_add_of_le h
simp only [prod_add, dvd_mul_right]
#align multiset.prod_dvd_prod_of_le Multiset.prod_dvd_prod_of_le
end CommMonoid
#print Multiset.prod_dvd_prod_of_dvd /-
theorem prod_dvd_prod_of_dvd [CommMonoid β] {S : Multiset α} (g1 g2 : α → β)
(h : ∀ a ∈ S, g1 a ∣ g2 a) : (Multiset.map g1 S).Prod ∣ (Multiset.map g2 S).Prod :=
by
apply Multiset.induction_on' S; · simp
intro a T haS _ IH
simp [mul_dvd_mul (h a haS) IH]
#align multiset.prod_dvd_prod_of_dvd Multiset.prod_dvd_prod_of_dvd
-/
section AddCommMonoid
variable [AddCommMonoid α]
/- warning: multiset.sum_add_monoid_hom -> Multiset.sumAddMonoidHom is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : AddCommMonoid.{u1} α], AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : AddCommMonoid.{u1} α], AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))
Case conversion may be inaccurate. Consider using '#align multiset.sum_add_monoid_hom Multiset.sumAddMonoidHomₓ'. -/
/-- `multiset.sum`, the sum of the elements of a multiset, promoted to a morphism of
`add_comm_monoid`s. -/
def sumAddMonoidHom : Multiset α →+ α where
toFun := sum
map_zero' := sum_zero
map_add' := sum_add
#align multiset.sum_add_monoid_hom Multiset.sumAddMonoidHom
/- warning: multiset.coe_sum_add_monoid_hom -> Multiset.coe_sumAddMonoidHom is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : AddCommMonoid.{u1} α], Eq.{succ u1} ((fun (_x : AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) => (Multiset.{u1} α) -> α) (Multiset.sumAddMonoidHom.{u1} α _inst_1)) (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (fun (_x : AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) => (Multiset.{u1} α) -> α) (AddMonoidHom.hasCoeToFun.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (Multiset.sumAddMonoidHom.{u1} α _inst_1)) (Multiset.sum.{u1} α _inst_1)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : AddCommMonoid.{u1} α], Eq.{succ u1} (forall (a : Multiset.{u1} α), (fun ([email protected]._hyg.403 : Multiset.{u1} α) => α) a) (FunLike.coe.{succ u1, succ u1, succ u1} (AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (Multiset.{u1} α) α (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddMonoidHom.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))) (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1)) (AddMonoidHom.addMonoidHomClass.{u1, u1} (Multiset.{u1} α) α (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{u1} α (AddCommMonoid.toAddMonoid.{u1} α _inst_1))))) (Multiset.sumAddMonoidHom.{u1} α _inst_1)) (Multiset.sum.{u1} α _inst_1)
Case conversion may be inaccurate. Consider using '#align multiset.coe_sum_add_monoid_hom Multiset.coe_sumAddMonoidHomₓ'. -/
@[simp]
theorem coe_sumAddMonoidHom : (sumAddMonoidHom : Multiset α → α) = sum :=
rfl
#align multiset.coe_sum_add_monoid_hom Multiset.coe_sumAddMonoidHom
end AddCommMonoid
section CommMonoidWithZero
variable [CommMonoidWithZero α]
/- warning: multiset.prod_eq_zero -> Multiset.prod_eq_zero is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] {s : Multiset.{u1} α}, (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))))) s) -> (Eq.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] {s : Multiset.{u1} α}, (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1))) s) -> (Eq.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_zero Multiset.prod_eq_zeroₓ'. -/
theorem prod_eq_zero {s : Multiset α} (h : (0 : α) ∈ s) : s.Prod = 0 :=
by
rcases Multiset.exists_cons_of_mem h with ⟨s', hs'⟩
simp [hs', Multiset.prod_cons]
#align multiset.prod_eq_zero Multiset.prod_eq_zero
variable [NoZeroDivisors α] [Nontrivial α] {s : Multiset α}
/- warning: multiset.prod_eq_zero_iff -> Multiset.prod_eq_zero_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] [_inst_2 : NoZeroDivisors.{u1} α (MulZeroClass.toHasMul.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1)))) (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))] [_inst_3 : Nontrivial.{u1} α] {s : Multiset.{u1} α}, Iff (Eq.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1)))))))) (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))))) s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] [_inst_2 : NoZeroDivisors.{u1} α (MulZeroClass.toMul.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1)))) (CommMonoidWithZero.toZero.{u1} α _inst_1)] [_inst_3 : Nontrivial.{u1} α] {s : Multiset.{u1} α}, Iff (Eq.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1)))) (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1))) s)
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_zero_iff Multiset.prod_eq_zero_iffₓ'. -/
theorem prod_eq_zero_iff : s.Prod = 0 ↔ (0 : α) ∈ s :=
Quotient.inductionOn s fun l => by
rw [quot_mk_to_coe, coe_prod]
exact List.prod_eq_zero_iff
#align multiset.prod_eq_zero_iff Multiset.prod_eq_zero_iff
/- warning: multiset.prod_ne_zero -> Multiset.prod_ne_zero is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] [_inst_2 : NoZeroDivisors.{u1} α (MulZeroClass.toHasMul.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1)))) (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))] [_inst_3 : Nontrivial.{u1} α] {s : Multiset.{u1} α}, (Not (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))))) s)) -> (Ne.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1))))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoidWithZero.{u1} α] [_inst_2 : NoZeroDivisors.{u1} α (MulZeroClass.toMul.{u1} α (MulZeroOneClass.toMulZeroClass.{u1} α (MonoidWithZero.toMulZeroOneClass.{u1} α (CommMonoidWithZero.toMonoidWithZero.{u1} α _inst_1)))) (CommMonoidWithZero.toZero.{u1} α _inst_1)] [_inst_3 : Nontrivial.{u1} α] {s : Multiset.{u1} α}, (Not (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1))) s)) -> (Ne.{succ u1} α (Multiset.prod.{u1} α (CommMonoidWithZero.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α _inst_1))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_ne_zero Multiset.prod_ne_zeroₓ'. -/
theorem prod_ne_zero (h : (0 : α) ∉ s) : s.Prod ≠ 0 :=
mt prod_eq_zero_iff.1 h
#align multiset.prod_ne_zero Multiset.prod_ne_zero
end CommMonoidWithZero
section DivisionCommMonoid
variable [DivisionCommMonoid α] {m : Multiset ι} {f g : ι → α}
/- warning: multiset.prod_map_inv' -> Multiset.prod_map_inv' is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DivisionCommMonoid.{u1} α] (m : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α (DivisionCommMonoid.toCommMonoid.{u1} α _inst_1) (Multiset.map.{u1, u1} α α (Inv.inv.{u1} α (DivInvMonoid.toHasInv.{u1} α (DivisionMonoid.toDivInvMonoid.{u1} α (DivisionCommMonoid.toDivisionMonoid.{u1} α _inst_1)))) m)) (Inv.inv.{u1} α (DivInvMonoid.toHasInv.{u1} α (DivisionMonoid.toDivInvMonoid.{u1} α (DivisionCommMonoid.toDivisionMonoid.{u1} α _inst_1))) (Multiset.prod.{u1} α (DivisionCommMonoid.toCommMonoid.{u1} α _inst_1) m))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DivisionCommMonoid.{u1} α] (m : Multiset.{u1} α), Eq.{succ u1} α (Multiset.prod.{u1} α (DivisionCommMonoid.toCommMonoid.{u1} α _inst_1) (Multiset.map.{u1, u1} α α (Inv.inv.{u1} α (InvOneClass.toInv.{u1} α (DivInvOneMonoid.toInvOneClass.{u1} α (DivisionMonoid.toDivInvOneMonoid.{u1} α (DivisionCommMonoid.toDivisionMonoid.{u1} α _inst_1))))) m)) (Inv.inv.{u1} α (InvOneClass.toInv.{u1} α (DivInvOneMonoid.toInvOneClass.{u1} α (DivisionMonoid.toDivInvOneMonoid.{u1} α (DivisionCommMonoid.toDivisionMonoid.{u1} α _inst_1)))) (Multiset.prod.{u1} α (DivisionCommMonoid.toCommMonoid.{u1} α _inst_1) m))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_inv' Multiset.prod_map_inv'ₓ'. -/
@[to_additive]
theorem prod_map_inv' (m : Multiset α) : (m.map Inv.inv).Prod = m.Prod⁻¹ :=
m.prod_hom (invMonoidHom : α →* α)
#align multiset.prod_map_inv' Multiset.prod_map_inv'
#align multiset.sum_map_neg' Multiset.sum_map_neg'
/- warning: multiset.prod_map_inv -> Multiset.prod_map_inv is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : DivisionCommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => Inv.inv.{u2} α (DivInvMonoid.toHasInv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1))) (f i)) m)) (Inv.inv.{u2} α (DivInvMonoid.toHasInv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1))) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f m)))
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : DivisionCommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => Inv.inv.{u2} α (InvOneClass.toInv.{u2} α (DivInvOneMonoid.toInvOneClass.{u2} α (DivisionMonoid.toDivInvOneMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (f i)) m)) (Inv.inv.{u2} α (InvOneClass.toInv.{u2} α (DivInvOneMonoid.toInvOneClass.{u2} α (DivisionMonoid.toDivInvOneMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f m)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_inv Multiset.prod_map_invₓ'. -/
@[simp, to_additive]
theorem prod_map_inv : (m.map fun i => (f i)⁻¹).Prod = (m.map f).Prod⁻¹ :=
by
convert(m.map f).prod_map_inv'
rw [map_map]
#align multiset.prod_map_inv Multiset.prod_map_inv
#align multiset.sum_map_neg Multiset.sum_map_neg
/- warning: multiset.prod_map_div -> Multiset.prod_map_div is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : DivisionCommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} {g : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HDiv.hDiv.{u2, u2, u2} α α α (instHDiv.{u2} α (DivInvMonoid.toHasDiv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (f i) (g i)) m)) (HDiv.hDiv.{u2, u2, u2} α α α (instHDiv.{u2} α (DivInvMonoid.toHasDiv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f m)) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α g m)))
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : DivisionCommMonoid.{u2} α] {m : Multiset.{u1} ι} {f : ι -> α} {g : ι -> α}, Eq.{succ u2} α (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HDiv.hDiv.{u2, u2, u2} α α α (instHDiv.{u2} α (DivInvMonoid.toDiv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (f i) (g i)) m)) (HDiv.hDiv.{u2, u2, u2} α α α (instHDiv.{u2} α (DivInvMonoid.toDiv.{u2} α (DivisionMonoid.toDivInvMonoid.{u2} α (DivisionCommMonoid.toDivisionMonoid.{u2} α _inst_1)))) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f m)) (Multiset.prod.{u2} α (DivisionCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α g m)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_div Multiset.prod_map_divₓ'. -/
@[simp, to_additive]
theorem prod_map_div : (m.map fun i => f i / g i).Prod = (m.map f).Prod / (m.map g).Prod :=
m.prod_hom₂ (· / ·) mul_div_mul_comm (div_one _) _ _
#align multiset.prod_map_div Multiset.prod_map_div
#align multiset.sum_map_sub Multiset.sum_map_sub
#print Multiset.prod_map_zpow /-
@[to_additive]
theorem prod_map_zpow {n : ℤ} : (m.map fun i => f i ^ n).Prod = (m.map f).Prod ^ n :=
by
convert(m.map f).prod_hom (zpowGroupHom _ : α →* α)
rw [map_map]
rfl
#align multiset.prod_map_zpow Multiset.prod_map_zpow
#align multiset.sum_map_zsmul Multiset.sum_map_zsmul
-/
end DivisionCommMonoid
section NonUnitalNonAssocSemiring
variable [NonUnitalNonAssocSemiring α] {a : α} {s : Multiset ι} {f : ι → α}
/- warning: multiset.sum_map_mul_left -> Multiset.sum_map_mul_left is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : NonUnitalNonAssocSemiring.{u2} α] {a : α} {s : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (Distrib.toHasMul.{u2} α (NonUnitalNonAssocSemiring.toDistrib.{u2} α _inst_1))) a (f i)) s)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (Distrib.toHasMul.{u2} α (NonUnitalNonAssocSemiring.toDistrib.{u2} α _inst_1))) a (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f s)))
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : NonUnitalNonAssocSemiring.{u2} α] {a : α} {s : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (NonUnitalNonAssocSemiring.toMul.{u2} α _inst_1)) a (f i)) s)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (NonUnitalNonAssocSemiring.toMul.{u2} α _inst_1)) a (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f s)))
Case conversion may be inaccurate. Consider using '#align multiset.sum_map_mul_left Multiset.sum_map_mul_leftₓ'. -/
theorem sum_map_mul_left : sum (s.map fun i => a * f i) = a * sum (s.map f) :=
Multiset.induction_on s (by simp) fun i s ih => by simp [ih, mul_add]
#align multiset.sum_map_mul_left Multiset.sum_map_mul_left
/- warning: multiset.sum_map_mul_right -> Multiset.sum_map_mul_right is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : NonUnitalNonAssocSemiring.{u2} α] {a : α} {s : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (Distrib.toHasMul.{u2} α (NonUnitalNonAssocSemiring.toDistrib.{u2} α _inst_1))) (f i) a) s)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (Distrib.toHasMul.{u2} α (NonUnitalNonAssocSemiring.toDistrib.{u2} α _inst_1))) (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f s)) a)
but is expected to have type
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : NonUnitalNonAssocSemiring.{u2} α] {a : α} {s : Multiset.{u1} ι} {f : ι -> α}, Eq.{succ u2} α (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α (fun (i : ι) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (NonUnitalNonAssocSemiring.toMul.{u2} α _inst_1)) (f i) a) s)) (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (NonUnitalNonAssocSemiring.toMul.{u2} α _inst_1)) (Multiset.sum.{u2} α (NonUnitalNonAssocSemiring.toAddCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f s)) a)
Case conversion may be inaccurate. Consider using '#align multiset.sum_map_mul_right Multiset.sum_map_mul_rightₓ'. -/
theorem sum_map_mul_right : sum (s.map fun i => f i * a) = sum (s.map f) * a :=
Multiset.induction_on s (by simp) fun a s ih => by simp [ih, add_mul]
#align multiset.sum_map_mul_right Multiset.sum_map_mul_right
end NonUnitalNonAssocSemiring
section Semiring
variable [Semiring α]
#print Multiset.dvd_sum /-
theorem dvd_sum {a : α} {s : Multiset α} : (∀ x ∈ s, a ∣ x) → a ∣ s.Sum :=
Multiset.induction_on s (fun _ => dvd_zero _) fun x s ih h =>
by
rw [sum_cons]
exact dvd_add (h _ (mem_cons_self _ _)) (ih fun y hy => h _ <| mem_cons.2 <| Or.inr hy)
#align multiset.dvd_sum Multiset.dvd_sum
-/
end Semiring
/-! ### Order -/
section OrderedCommMonoid
variable [OrderedCommMonoid α] {s t : Multiset α} {a : α}
/- warning: multiset.one_le_prod_of_one_le -> Multiset.one_le_prod_of_one_le is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))))) x)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))))) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))) x)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s))
Case conversion may be inaccurate. Consider using '#align multiset.one_le_prod_of_one_le Multiset.one_le_prod_of_one_leₓ'. -/
@[to_additive sum_nonneg]
theorem one_le_prod_of_one_le : (∀ x ∈ s, (1 : α) ≤ x) → 1 ≤ s.Prod :=
Quotient.inductionOn s fun l hl => by simpa using List.one_le_prod_of_one_le hl
#align multiset.one_le_prod_of_one_le Multiset.one_le_prod_of_one_le
#align multiset.sum_nonneg Multiset.sum_nonneg
/- warning: multiset.single_le_prod -> Multiset.single_le_prod is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))))) x)) -> (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) x (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))) x)) -> (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) x (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s)))
Case conversion may be inaccurate. Consider using '#align multiset.single_le_prod Multiset.single_le_prodₓ'. -/
@[to_additive]
theorem single_le_prod : (∀ x ∈ s, (1 : α) ≤ x) → ∀ x ∈ s, x ≤ s.Prod :=
Quotient.inductionOn s fun l hl x hx => by simpa using List.single_le_prod hl x hx
#align multiset.single_le_prod Multiset.single_le_prod
#align multiset.single_le_sum Multiset.single_le_sum
/- warning: multiset.prod_le_pow_card -> Multiset.prod_le_pow_card is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] (s : Multiset.{u1} α) (n : α), (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) x n)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))) n (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (fun (_x : AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) => (Multiset.{u1} α) -> Nat) (AddMonoidHom.hasCoeToFun.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.card.{u1} α) s)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] (s : Multiset.{u1} α) (n : α), (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) x n)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s) (HPow.hPow.{u1, 0, u1} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) α (instHPow.{u1, 0} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))) n (FunLike.coe.{succ u1, succ u1, 1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) _x) (AddHomClass.toFunLike.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{0} Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoidHomClass.toAddHomClass.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid) (AddMonoidHom.addMonoidHomClass.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))) (Multiset.card.{u1} α) s)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_le_pow_card Multiset.prod_le_pow_cardₓ'. -/
@[to_additive sum_le_card_nsmul]
theorem prod_le_pow_card (s : Multiset α) (n : α) (h : ∀ x ∈ s, x ≤ n) : s.Prod ≤ n ^ s.card :=
by
induction s using Quotient.inductionOn
simpa using List.prod_le_pow_card _ _ h
#align multiset.prod_le_pow_card Multiset.prod_le_pow_card
#align multiset.sum_le_card_nsmul Multiset.sum_le_card_nsmul
/- warning: multiset.all_one_of_le_one_le_of_prod_eq_one -> Multiset.all_one_of_le_one_le_of_prod_eq_one is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))))) x)) -> (Eq.{succ u1} α (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))))))) -> (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (Eq.{succ u1} α x (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α}, (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1))))) x)) -> (Eq.{succ u1} α (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))))) -> (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (Eq.{succ u1} α x (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))))))
Case conversion may be inaccurate. Consider using '#align multiset.all_one_of_le_one_le_of_prod_eq_one Multiset.all_one_of_le_one_le_of_prod_eq_oneₓ'. -/
@[to_additive all_zero_of_le_zero_le_of_sum_eq_zero]
theorem all_one_of_le_one_le_of_prod_eq_one :
(∀ x ∈ s, (1 : α) ≤ x) → s.Prod = 1 → ∀ x ∈ s, x = (1 : α) :=
by
apply Quotient.inductionOn s
simp only [quot_mk_to_coe, coe_prod, mem_coe]
exact fun l => List.all_one_of_le_one_le_of_prod_eq_one
#align multiset.all_one_of_le_one_le_of_prod_eq_one Multiset.all_one_of_le_one_le_of_prod_eq_one
#align multiset.all_zero_of_le_zero_le_of_sum_eq_zero Multiset.all_zero_of_le_zero_le_of_sum_eq_zero
#print Multiset.prod_le_prod_of_rel_le /-
@[to_additive]
theorem prod_le_prod_of_rel_le (h : s.Rel (· ≤ ·) t) : s.Prod ≤ t.Prod :=
by
induction' h with _ _ _ _ rh _ rt
· rfl
· rw [prod_cons, prod_cons]
exact mul_le_mul' rh rt
#align multiset.prod_le_prod_of_rel_le Multiset.prod_le_prod_of_rel_le
#align multiset.sum_le_sum_of_rel_le Multiset.sum_le_sum_of_rel_le
-/
/- warning: multiset.prod_map_le_prod_map -> Multiset.prod_map_le_prod_map is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} {α : Type.{u2}} [_inst_1 : OrderedCommMonoid.{u2} α] {s : Multiset.{u1} ι} (f : ι -> α) (g : ι -> α), (forall (i : ι), (Membership.Mem.{u1, u1} ι (Multiset.{u1} ι) (Multiset.hasMem.{u1} ι) i s) -> (LE.le.{u2} α (Preorder.toLE.{u2} α (PartialOrder.toPreorder.{u2} α (OrderedCommMonoid.toPartialOrder.{u2} α _inst_1))) (f i) (g i))) -> (LE.le.{u2} α (Preorder.toLE.{u2} α (PartialOrder.toPreorder.{u2} α (OrderedCommMonoid.toPartialOrder.{u2} α _inst_1))) (Multiset.prod.{u2} α (OrderedCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α f s)) (Multiset.prod.{u2} α (OrderedCommMonoid.toCommMonoid.{u2} α _inst_1) (Multiset.map.{u1, u2} ι α g s)))
but is expected to have type
forall {ι : Type.{u2}} {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u2} ι} (f : ι -> α) (g : ι -> α), (forall (i : ι), (Membership.mem.{u2, u2} ι (Multiset.{u2} ι) (Multiset.instMembershipMultiset.{u2} ι) i s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (f i) (g i))) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) (Multiset.map.{u2, u1} ι α f s)) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) (Multiset.map.{u2, u1} ι α g s)))
Case conversion may be inaccurate. Consider using '#align multiset.prod_map_le_prod_map Multiset.prod_map_le_prod_mapₓ'. -/
@[to_additive]
theorem prod_map_le_prod_map {s : Multiset ι} (f : ι → α) (g : ι → α) (h : ∀ i, i ∈ s → f i ≤ g i) :
(s.map f).Prod ≤ (s.map g).Prod :=
prod_le_prod_of_rel_le <| rel_map.2 <| rel_refl_of_refl_on h
#align multiset.prod_map_le_prod_map Multiset.prod_map_le_prod_map
#align multiset.sum_map_le_sum_map Multiset.sum_map_le_sum_map
#print Multiset.prod_map_le_prod /-
@[to_additive]
theorem prod_map_le_prod (f : α → α) (h : ∀ x, x ∈ s → f x ≤ x) : (s.map f).Prod ≤ s.Prod :=
prod_le_prod_of_rel_le <| rel_map_left.2 <| rel_refl_of_refl_on h
#align multiset.prod_map_le_prod Multiset.prod_map_le_prod
#align multiset.sum_map_le_sum Multiset.sum_map_le_sum
-/
#print Multiset.prod_le_prod_map /-
@[to_additive]
theorem prod_le_prod_map (f : α → α) (h : ∀ x, x ∈ s → x ≤ f x) : s.Prod ≤ (s.map f).Prod :=
@prod_map_le_prod αᵒᵈ _ _ f h
#align multiset.prod_le_prod_map Multiset.prod_le_prod_map
#align multiset.sum_le_sum_map Multiset.sum_le_sum_map
-/
/- warning: multiset.pow_card_le_prod -> Multiset.pow_card_le_prod is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α} {a : α}, (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) a x)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))) a (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (fun (_x : AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) => (Multiset.{u1} α) -> Nat) (AddMonoidHom.hasCoeToFun.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.card.{u1} α) s)) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommMonoid.{u1} α] {s : Multiset.{u1} α} {a : α}, (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) a x)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedCommMonoid.toPartialOrder.{u1} α _inst_1))) (HPow.hPow.{u1, 0, u1} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) α (instHPow.{u1, 0} α ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) s) (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1)))) a (FunLike.coe.{succ u1, succ u1, 1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) _x) (AddHomClass.toFunLike.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{0} Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoidHomClass.toAddHomClass.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid) (AddMonoidHom.addMonoidHomClass.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))) (Multiset.card.{u1} α) s)) (Multiset.prod.{u1} α (OrderedCommMonoid.toCommMonoid.{u1} α _inst_1) s))
Case conversion may be inaccurate. Consider using '#align multiset.pow_card_le_prod Multiset.pow_card_le_prodₓ'. -/
@[to_additive card_nsmul_le_sum]
theorem pow_card_le_prod (h : ∀ x ∈ s, a ≤ x) : a ^ s.card ≤ s.Prod :=
by
rw [← Multiset.prod_replicate, ← Multiset.map_const]
exact prod_map_le_prod _ h
#align multiset.pow_card_le_prod Multiset.pow_card_le_prod
#align multiset.card_nsmul_le_sum Multiset.card_nsmul_le_sum
end OrderedCommMonoid
/- warning: multiset.prod_nonneg -> Multiset.prod_nonneg is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : OrderedCommSemiring.{u1} α] {m : Multiset.{u1} α}, (forall (a : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a m) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedAddCommMonoid.toPartialOrder.{u1} α (OrderedSemiring.toOrderedAddCommMonoid.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1))))) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (NonUnitalNonAssocSemiring.toMulZeroClass.{u1} α (NonAssocSemiring.toNonUnitalNonAssocSemiring.{u1} α (Semiring.toNonAssocSemiring.{u1} α (OrderedSemiring.toSemiring.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1))))))))) a)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedAddCommMonoid.toPartialOrder.{u1} α (OrderedSemiring.toOrderedAddCommMonoid.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1))))) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α (MulZeroClass.toHasZero.{u1} α (NonUnitalNonAssocSemiring.toMulZeroClass.{u1} α (NonAssocSemiring.toNonUnitalNonAssocSemiring.{u1} α (Semiring.toNonAssocSemiring.{u1} α (OrderedSemiring.toSemiring.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1))))))))) (Multiset.prod.{u1} α (CommSemiring.toCommMonoid.{u1} α (OrderedCommSemiring.toCommSemiring.{u1} α _inst_1)) m))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : OrderedCommSemiring.{u1} α] {m : Multiset.{u1} α}, (forall (a : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) a m) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedSemiring.toPartialOrder.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1)))) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α (CommSemiring.toCommMonoidWithZero.{u1} α (OrderedCommSemiring.toCommSemiring.{u1} α _inst_1))))) a)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedSemiring.toPartialOrder.{u1} α (OrderedCommSemiring.toOrderedSemiring.{u1} α _inst_1)))) (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α (CommMonoidWithZero.toZero.{u1} α (CommSemiring.toCommMonoidWithZero.{u1} α (OrderedCommSemiring.toCommSemiring.{u1} α _inst_1))))) (Multiset.prod.{u1} α (CommSemiring.toCommMonoid.{u1} α (OrderedCommSemiring.toCommSemiring.{u1} α _inst_1)) m))
Case conversion may be inaccurate. Consider using '#align multiset.prod_nonneg Multiset.prod_nonnegₓ'. -/
theorem prod_nonneg [OrderedCommSemiring α] {m : Multiset α} (h : ∀ a ∈ m, (0 : α) ≤ a) :
0 ≤ m.Prod := by
revert h
refine' m.induction_on _ _
· rintro -
rw [prod_zero]
exact zero_le_one
intro a s hs ih
rw [prod_cons]
exact mul_nonneg (ih _ <| mem_cons_self _ _) (hs fun a ha => ih _ <| mem_cons_of_mem ha)
#align multiset.prod_nonneg Multiset.prod_nonneg
/- warning: multiset.prod_eq_one -> Multiset.prod_eq_one is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {m : Multiset.{u1} α}, (forall (x : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) x m) -> (Eq.{succ u1} α x (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))))) -> (Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 m) (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] {m : Multiset.{u1} α}, (forall (x : α), (Membership.mem.{u1, u1} α (Multiset.{u1} α) (Multiset.instMembershipMultiset.{u1} α) x m) -> (Eq.{succ u1} α x (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))) -> (Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 m) (OfNat.ofNat.{u1} α 1 (One.toOfNat1.{u1} α (Monoid.toOne.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))))
Case conversion may be inaccurate. Consider using '#align multiset.prod_eq_one Multiset.prod_eq_oneₓ'. -/
/-- Slightly more general version of `multiset.prod_eq_one_iff` for a non-ordered `monoid` -/
@[to_additive
"Slightly more general version of `multiset.sum_eq_zero_iff`\n for a non-ordered `add_monoid`"]
theorem prod_eq_one [CommMonoid α] {m : Multiset α} (h : ∀ x ∈ m, x = (1 : α)) : m.Prod = 1 :=
by
induction' m using Quotient.inductionOn with l
simp [List.prod_eq_one h]
#align multiset.prod_eq_one Multiset.prod_eq_one
#align multiset.sum_eq_zero Multiset.sum_eq_zero
#print Multiset.le_prod_of_mem /-
@[to_additive]
theorem le_prod_of_mem [CanonicallyOrderedMonoid α] {m : Multiset α} {a : α} (h : a ∈ m) :
a ≤ m.Prod := by
obtain ⟨m', rfl⟩ := exists_cons_of_mem h
rw [prod_cons]
exact _root_.le_mul_right (le_refl a)
#align multiset.le_prod_of_mem Multiset.le_prod_of_mem
#align multiset.le_sum_of_mem Multiset.le_sum_of_mem
-/
/- warning: multiset.le_prod_of_submultiplicative_on_pred -> Multiset.le_prod_of_submultiplicative_on_pred is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : OrderedCommMonoid.{u2} β] (f : α -> β) (p : α -> Prop), (Eq.{succ u2} β (f (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))) (OfNat.ofNat.{u2} β 1 (OfNat.mk.{u2} β 1 (One.one.{u2} β (MulOneClass.toHasOne.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2)))))))) -> (p (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)) (HMul.hMul.{u2, u2, u2} β β β (instHMul.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2))))) (f a) (f b)))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (forall (s : Multiset.{u1} α), (forall (a : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a s) -> (p a)) -> (LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2) (Multiset.map.{u1, u2} α β f s))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : OrderedCommMonoid.{u1} β] (f : α -> β) (p : α -> Prop), (Eq.{succ u1} β (f (OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))))) (OfNat.ofNat.{u1} β 1 (One.toOfNat1.{u1} β (Monoid.toOne.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2)))))) -> (p (OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b)) (HMul.hMul.{u1, u1, u1} β β β (instHMul.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2))))) (f a) (f b)))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b))) -> (forall (s : Multiset.{u2} α), (forall (a : α), (Membership.mem.{u2, u2} α (Multiset.{u2} α) (Multiset.instMembershipMultiset.{u2} α) a s) -> (p a)) -> (LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (Multiset.prod.{u2} α _inst_1 s)) (Multiset.prod.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2) (Multiset.map.{u2, u1} α β f s))))
Case conversion may be inaccurate. Consider using '#align multiset.le_prod_of_submultiplicative_on_pred Multiset.le_prod_of_submultiplicative_on_predₓ'. -/
@[to_additive le_sum_of_subadditive_on_pred]
theorem le_prod_of_submultiplicative_on_pred [CommMonoid α] [OrderedCommMonoid β] (f : α → β)
(p : α → Prop) (h_one : f 1 = 1) (hp_one : p 1)
(h_mul : ∀ a b, p a → p b → f (a * b) ≤ f a * f b) (hp_mul : ∀ a b, p a → p b → p (a * b))
(s : Multiset α) (hps : ∀ a, a ∈ s → p a) : f s.Prod ≤ (s.map f).Prod :=
by
revert s
refine' Multiset.induction _ _
· simp [le_of_eq h_one]
intro a s hs hpsa
have hps : ∀ x, x ∈ s → p x := fun x hx => hpsa x (mem_cons_of_mem hx)
have hp_prod : p s.prod := prod_induction p s hp_mul hp_one hps
rw [prod_cons, map_cons, prod_cons]
exact (h_mul a s.prod (hpsa a (mem_cons_self a s)) hp_prod).trans (mul_le_mul_left' (hs hps) _)
#align multiset.le_prod_of_submultiplicative_on_pred Multiset.le_prod_of_submultiplicative_on_pred
#align multiset.le_sum_of_subadditive_on_pred Multiset.le_sum_of_subadditive_on_pred
/- warning: multiset.le_prod_of_submultiplicative -> Multiset.le_prod_of_submultiplicative is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : OrderedCommMonoid.{u2} β] (f : α -> β), (Eq.{succ u2} β (f (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))))))) (OfNat.ofNat.{u2} β 1 (OfNat.mk.{u2} β 1 (One.one.{u2} β (MulOneClass.toHasOne.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2)))))))) -> (forall (a : α) (b : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)) (HMul.hMul.{u2, u2, u2} β β β (instHMul.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2))))) (f a) (f b))) -> (forall (s : Multiset.{u1} α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2) (Multiset.map.{u1, u2} α β f s)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : OrderedCommMonoid.{u1} β] (f : α -> β), (Eq.{succ u1} β (f (OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))))) (OfNat.ofNat.{u1} β 1 (One.toOfNat1.{u1} β (Monoid.toOne.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2)))))) -> (forall (a : α) (b : α), LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b)) (HMul.hMul.{u1, u1, u1} β β β (instHMul.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2))))) (f a) (f b))) -> (forall (s : Multiset.{u2} α), LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (Multiset.prod.{u2} α _inst_1 s)) (Multiset.prod.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2) (Multiset.map.{u2, u1} α β f s)))
Case conversion may be inaccurate. Consider using '#align multiset.le_prod_of_submultiplicative Multiset.le_prod_of_submultiplicativeₓ'. -/
@[to_additive le_sum_of_subadditive]
theorem le_prod_of_submultiplicative [CommMonoid α] [OrderedCommMonoid β] (f : α → β)
(h_one : f 1 = 1) (h_mul : ∀ a b, f (a * b) ≤ f a * f b) (s : Multiset α) :
f s.Prod ≤ (s.map f).Prod :=
le_prod_of_submultiplicative_on_pred f (fun i => True) h_one trivial (fun x y _ _ => h_mul x y)
(by simp) s (by simp)
#align multiset.le_prod_of_submultiplicative Multiset.le_prod_of_submultiplicative
#align multiset.le_sum_of_subadditive Multiset.le_sum_of_subadditive
/- warning: multiset.le_prod_nonempty_of_submultiplicative_on_pred -> Multiset.le_prod_nonempty_of_submultiplicative_on_pred is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : OrderedCommMonoid.{u2} β] (f : α -> β) (p : α -> Prop), (forall (a : α) (b : α), (p a) -> (p b) -> (LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)) (HMul.hMul.{u2, u2, u2} β β β (instHMul.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2))))) (f a) (f b)))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b))) -> (forall (s : Multiset.{u1} α), (Ne.{succ u1} (Multiset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Multiset.{u1} α) (Multiset.hasEmptyc.{u1} α))) -> (forall (a : α), (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) a s) -> (p a)) -> (LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2) (Multiset.map.{u1, u2} α β f s))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : OrderedCommMonoid.{u1} β] (f : α -> β) (p : α -> Prop), (forall (a : α) (b : α), (p a) -> (p b) -> (LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b)) (HMul.hMul.{u1, u1, u1} β β β (instHMul.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2))))) (f a) (f b)))) -> (forall (a : α) (b : α), (p a) -> (p b) -> (p (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b))) -> (forall (s : Multiset.{u2} α), (Ne.{succ u2} (Multiset.{u2} α) s (EmptyCollection.emptyCollection.{u2} (Multiset.{u2} α) (Multiset.instEmptyCollectionMultiset.{u2} α))) -> (forall (a : α), (Membership.mem.{u2, u2} α (Multiset.{u2} α) (Multiset.instMembershipMultiset.{u2} α) a s) -> (p a)) -> (LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (Multiset.prod.{u2} α _inst_1 s)) (Multiset.prod.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2) (Multiset.map.{u2, u1} α β f s))))
Case conversion may be inaccurate. Consider using '#align multiset.le_prod_nonempty_of_submultiplicative_on_pred Multiset.le_prod_nonempty_of_submultiplicative_on_predₓ'. -/
@[to_additive le_sum_nonempty_of_subadditive_on_pred]
theorem le_prod_nonempty_of_submultiplicative_on_pred [CommMonoid α] [OrderedCommMonoid β]
(f : α → β) (p : α → Prop) (h_mul : ∀ a b, p a → p b → f (a * b) ≤ f a * f b)
(hp_mul : ∀ a b, p a → p b → p (a * b)) (s : Multiset α) (hs_nonempty : s ≠ ∅)
(hs : ∀ a, a ∈ s → p a) : f s.Prod ≤ (s.map f).Prod :=
by
revert s
refine' Multiset.induction _ _
· intro h
exfalso
exact h rfl
rintro a s hs hsa_nonempty hsa_prop
rw [prod_cons, map_cons, prod_cons]
by_cases hs_empty : s = ∅
· simp [hs_empty]
have hsa_restrict : ∀ x, x ∈ s → p x := fun x hx => hsa_prop x (mem_cons_of_mem hx)
have hp_sup : p s.prod := prod_induction_nonempty p hp_mul hs_empty hsa_restrict
have hp_a : p a := hsa_prop a (mem_cons_self a s)
exact (h_mul a _ hp_a hp_sup).trans (mul_le_mul_left' (hs hs_empty hsa_restrict) _)
#align multiset.le_prod_nonempty_of_submultiplicative_on_pred Multiset.le_prod_nonempty_of_submultiplicative_on_pred
#align multiset.le_sum_nonempty_of_subadditive_on_pred Multiset.le_sum_nonempty_of_subadditive_on_pred
/- warning: multiset.le_prod_nonempty_of_submultiplicative -> Multiset.le_prod_nonempty_of_submultiplicative is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : OrderedCommMonoid.{u2} β] (f : α -> β), (forall (a : α) (b : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)))) a b)) (HMul.hMul.{u2, u2, u2} β β β (instHMul.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2))))) (f a) (f b))) -> (forall (s : Multiset.{u1} α), (Ne.{succ u1} (Multiset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Multiset.{u1} α) (Multiset.hasEmptyc.{u1} α))) -> (LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedCommMonoid.toPartialOrder.{u2} β _inst_2))) (f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β (OrderedCommMonoid.toCommMonoid.{u2} β _inst_2) (Multiset.map.{u1, u2} α β f s))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : OrderedCommMonoid.{u1} β] (f : α -> β), (forall (a : α) (b : α), LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)))) a b)) (HMul.hMul.{u1, u1, u1} β β β (instHMul.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2))))) (f a) (f b))) -> (forall (s : Multiset.{u2} α), (Ne.{succ u2} (Multiset.{u2} α) s (EmptyCollection.emptyCollection.{u2} (Multiset.{u2} α) (Multiset.instEmptyCollectionMultiset.{u2} α))) -> (LE.le.{u1} β (Preorder.toLE.{u1} β (PartialOrder.toPreorder.{u1} β (OrderedCommMonoid.toPartialOrder.{u1} β _inst_2))) (f (Multiset.prod.{u2} α _inst_1 s)) (Multiset.prod.{u1} β (OrderedCommMonoid.toCommMonoid.{u1} β _inst_2) (Multiset.map.{u2, u1} α β f s))))
Case conversion may be inaccurate. Consider using '#align multiset.le_prod_nonempty_of_submultiplicative Multiset.le_prod_nonempty_of_submultiplicativeₓ'. -/
@[to_additive le_sum_nonempty_of_subadditive]
theorem le_prod_nonempty_of_submultiplicative [CommMonoid α] [OrderedCommMonoid β] (f : α → β)
(h_mul : ∀ a b, f (a * b) ≤ f a * f b) (s : Multiset α) (hs_nonempty : s ≠ ∅) :
f s.Prod ≤ (s.map f).Prod :=
le_prod_nonempty_of_submultiplicative_on_pred f (fun i => True) (by simp [h_mul]) (by simp) s
hs_nonempty (by simp)
#align multiset.le_prod_nonempty_of_submultiplicative Multiset.le_prod_nonempty_of_submultiplicative
#align multiset.le_sum_nonempty_of_subadditive Multiset.le_sum_nonempty_of_subadditive
/- warning: multiset.sum_map_singleton -> Multiset.sum_map_singleton is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Multiset.{u1} α), Eq.{succ u1} (Multiset.{u1} α) (Multiset.sum.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)) (Multiset.map.{u1, u1} α (Multiset.{u1} α) (fun (a : α) => Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.hasSingleton.{u1} α) a) s)) s
but is expected to have type
forall {α : Type.{u1}} (s : Multiset.{u1} α), Eq.{succ u1} (Multiset.{u1} α) (Multiset.sum.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)) (Multiset.map.{u1, u1} α (Multiset.{u1} α) (fun (a : α) => Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.instSingletonMultiset.{u1} α) a) s)) s
Case conversion may be inaccurate. Consider using '#align multiset.sum_map_singleton Multiset.sum_map_singletonₓ'. -/
@[simp]
theorem sum_map_singleton (s : Multiset α) : (s.map fun a => ({a} : Multiset α)).Sum = s :=
Multiset.induction_on s (by simp) (by simp)
#align multiset.sum_map_singleton Multiset.sum_map_singleton
/- warning: multiset.abs_sum_le_sum_abs -> Multiset.abs_sum_le_sum_abs is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : LinearOrderedAddCommGroup.{u1} α] {s : Multiset.{u1} α}, LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedAddCommGroup.toPartialOrder.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1)))) (Abs.abs.{u1} α (Neg.toHasAbs.{u1} α (SubNegMonoid.toHasNeg.{u1} α (AddGroup.toSubNegMonoid.{u1} α (AddCommGroup.toAddGroup.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))))) (SemilatticeSup.toHasSup.{u1} α (Lattice.toSemilatticeSup.{u1} α (LinearOrder.toLattice.{u1} α (LinearOrderedAddCommGroup.toLinearOrder.{u1} α _inst_1))))) (Multiset.sum.{u1} α (AddCommGroup.toAddCommMonoid.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))) s)) (Multiset.sum.{u1} α (AddCommGroup.toAddCommMonoid.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))) (Multiset.map.{u1, u1} α α (Abs.abs.{u1} α (Neg.toHasAbs.{u1} α (SubNegMonoid.toHasNeg.{u1} α (AddGroup.toSubNegMonoid.{u1} α (AddCommGroup.toAddGroup.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))))) (SemilatticeSup.toHasSup.{u1} α (Lattice.toSemilatticeSup.{u1} α (LinearOrder.toLattice.{u1} α (LinearOrderedAddCommGroup.toLinearOrder.{u1} α _inst_1)))))) s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : LinearOrderedAddCommGroup.{u1} α] {s : Multiset.{u1} α}, LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (OrderedAddCommGroup.toPartialOrder.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1)))) (Abs.abs.{u1} α (Neg.toHasAbs.{u1} α (NegZeroClass.toNeg.{u1} α (SubNegZeroMonoid.toNegZeroClass.{u1} α (SubtractionMonoid.toSubNegZeroMonoid.{u1} α (SubtractionCommMonoid.toSubtractionMonoid.{u1} α (AddCommGroup.toDivisionAddCommMonoid.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))))))) (SemilatticeSup.toSup.{u1} α (Lattice.toSemilatticeSup.{u1} α (DistribLattice.toLattice.{u1} α (instDistribLattice.{u1} α (LinearOrderedAddCommGroup.toLinearOrder.{u1} α _inst_1)))))) (Multiset.sum.{u1} α (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} α (LinearOrderedCancelAddCommMonoid.toOrderedCancelAddCommMonoid.{u1} α (LinearOrderedAddCommGroup.toLinearOrderedAddCancelCommMonoid.{u1} α _inst_1))) s)) (Multiset.sum.{u1} α (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} α (LinearOrderedCancelAddCommMonoid.toOrderedCancelAddCommMonoid.{u1} α (LinearOrderedAddCommGroup.toLinearOrderedAddCancelCommMonoid.{u1} α _inst_1))) (Multiset.map.{u1, u1} α α (Abs.abs.{u1} α (Neg.toHasAbs.{u1} α (NegZeroClass.toNeg.{u1} α (SubNegZeroMonoid.toNegZeroClass.{u1} α (SubtractionMonoid.toSubNegZeroMonoid.{u1} α (SubtractionCommMonoid.toSubtractionMonoid.{u1} α (AddCommGroup.toDivisionAddCommMonoid.{u1} α (OrderedAddCommGroup.toAddCommGroup.{u1} α (LinearOrderedAddCommGroup.toOrderedAddCommGroup.{u1} α _inst_1))))))) (SemilatticeSup.toSup.{u1} α (Lattice.toSemilatticeSup.{u1} α (DistribLattice.toLattice.{u1} α (instDistribLattice.{u1} α (LinearOrderedAddCommGroup.toLinearOrder.{u1} α _inst_1))))))) s))
Case conversion may be inaccurate. Consider using '#align multiset.abs_sum_le_sum_abs Multiset.abs_sum_le_sum_absₓ'. -/
theorem abs_sum_le_sum_abs [LinearOrderedAddCommGroup α] {s : Multiset α} :
abs s.Sum ≤ (s.map abs).Sum :=
le_sum_of_subadditive _ abs_zero abs_add s
#align multiset.abs_sum_le_sum_abs Multiset.abs_sum_le_sum_abs
#print Multiset.sum_nat_mod /-
theorem sum_nat_mod (s : Multiset ℕ) (n : ℕ) : s.Sum % n = (s.map (· % n)).Sum % n := by
induction s using Multiset.induction <;> simp [Nat.add_mod, *]
#align multiset.sum_nat_mod Multiset.sum_nat_mod
-/
#print Multiset.prod_nat_mod /-
theorem prod_nat_mod (s : Multiset ℕ) (n : ℕ) : s.Prod % n = (s.map (· % n)).Prod % n := by
induction s using Multiset.induction <;> simp [Nat.mul_mod, *]
#align multiset.prod_nat_mod Multiset.prod_nat_mod
-/
/- warning: multiset.sum_int_mod -> Multiset.sum_int_mod is a dubious translation:
lean 3 declaration is
forall (s : Multiset.{0} Int) (n : Int), Eq.{1} Int (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) (Multiset.sum.{0} Int Int.addCommMonoid s) n) (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) (Multiset.sum.{0} Int Int.addCommMonoid (Multiset.map.{0, 0} Int Int (fun (_x : Int) => HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) _x n) s)) n)
but is expected to have type
forall (s : Multiset.{0} Int) (n : Int), Eq.{1} Int (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) (Multiset.sum.{0} Int Int.instAddCommMonoidInt s) n) (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) (Multiset.sum.{0} Int Int.instAddCommMonoidInt (Multiset.map.{0, 0} Int Int (fun (_x : Int) => HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) _x n) s)) n)
Case conversion may be inaccurate. Consider using '#align multiset.sum_int_mod Multiset.sum_int_modₓ'. -/
theorem sum_int_mod (s : Multiset ℤ) (n : ℤ) : s.Sum % n = (s.map (· % n)).Sum % n := by
induction s using Multiset.induction <;> simp [Int.add_emod, *]
#align multiset.sum_int_mod Multiset.sum_int_mod
/- warning: multiset.prod_int_mod -> Multiset.prod_int_mod is a dubious translation:
lean 3 declaration is
forall (s : Multiset.{0} Int) (n : Int), Eq.{1} Int (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) (Multiset.prod.{0} Int Int.commMonoid s) n) (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) (Multiset.prod.{0} Int Int.commMonoid (Multiset.map.{0, 0} Int Int (fun (_x : Int) => HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.hasMod) _x n) s)) n)
but is expected to have type
forall (s : Multiset.{0} Int) (n : Int), Eq.{1} Int (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) (Multiset.prod.{0} Int Int.instCommMonoidInt s) n) (HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) (Multiset.prod.{0} Int Int.instCommMonoidInt (Multiset.map.{0, 0} Int Int (fun (_x : Int) => HMod.hMod.{0, 0, 0} Int Int Int (instHMod.{0} Int Int.instModInt_1) _x n) s)) n)
Case conversion may be inaccurate. Consider using '#align multiset.prod_int_mod Multiset.prod_int_modₓ'. -/
theorem prod_int_mod (s : Multiset ℤ) (n : ℤ) : s.Prod % n = (s.map (· % n)).Prod % n := by
induction s using Multiset.induction <;> simp [Int.mul_emod, *]
#align multiset.prod_int_mod Multiset.prod_int_mod
end Multiset
/- warning: map_multiset_prod -> map_multiset_prod is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u2} β] {F : Type.{u3}} [_inst_3 : MonoidHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))] (f : F) (s : Multiset.{u1} α), Eq.{succ u2} β (coeFn.{succ u3, max (succ u1) (succ u2)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u3, succ u1, succ u2} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u3, u1, u2} F α β (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3))) f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β _inst_2 (Multiset.map.{u1, u2} α β (coeFn.{succ u3, max (succ u1) (succ u2)} F (fun (_x : F) => α -> β) (FunLike.hasCoeToFun.{succ u3, succ u1, succ u2} F α (fun (_x : α) => β) (MulHomClass.toFunLike.{u3, u1, u2} F α β (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u3, u1, u2} F α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3))) f) s))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u3} α] [_inst_2 : CommMonoid.{u2} β] {F : Type.{u1}} [_inst_3 : MonoidHomClass.{u1, u3, u2} F α β (Monoid.toMulOneClass.{u3} α (CommMonoid.toMonoid.{u3} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))] (f : F) (s : Multiset.{u3} α), Eq.{succ u2} ((fun ([email protected]._hyg.2391 : α) => β) (Multiset.prod.{u3} α _inst_1 s)) (FunLike.coe.{succ u1, succ u3, succ u2} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u1, u3, u2} F α β (MulOneClass.toMul.{u3} α (Monoid.toMulOneClass.{u3} α (CommMonoid.toMonoid.{u3} α _inst_1))) (MulOneClass.toMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u1, u3, u2} F α β (Monoid.toMulOneClass.{u3} α (CommMonoid.toMonoid.{u3} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3)) f (Multiset.prod.{u3} α _inst_1 s)) (Multiset.prod.{u2} β _inst_2 (Multiset.map.{u3, u2} α β (FunLike.coe.{succ u1, succ u3, succ u2} F α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{u1, u3, u2} F α β (MulOneClass.toMul.{u3} α (Monoid.toMulOneClass.{u3} α (CommMonoid.toMonoid.{u3} α _inst_1))) (MulOneClass.toMul.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (MonoidHomClass.toMulHomClass.{u1, u3, u2} F α β (Monoid.toMulOneClass.{u3} α (CommMonoid.toMonoid.{u3} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2)) _inst_3)) f) s))
Case conversion may be inaccurate. Consider using '#align map_multiset_prod map_multiset_prodₓ'. -/
@[to_additive]
theorem map_multiset_prod [CommMonoid α] [CommMonoid β] {F : Type _} [MonoidHomClass F α β] (f : F)
(s : Multiset α) : f s.Prod = (s.map f).Prod :=
(s.prod_hom f).symm
#align map_multiset_prod map_multiset_prod
#align map_multiset_sum map_multiset_sum
/- warning: monoid_hom.map_multiset_prod -> MonoidHom.map_multiset_prod is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : CommMonoid.{u1} α] [_inst_2 : CommMonoid.{u2} β] (f : MonoidHom.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (s : Multiset.{u1} α), Eq.{succ u2} β (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (MonoidHom.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (fun (_x : MonoidHom.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) => α -> β) (MonoidHom.hasCoeToFun.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) f (Multiset.prod.{u1} α _inst_1 s)) (Multiset.prod.{u2} β _inst_2 (Multiset.map.{u1, u2} α β (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (MonoidHom.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) (fun (_x : MonoidHom.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) => α -> β) (MonoidHom.hasCoeToFun.{u1, u2} α β (Monoid.toMulOneClass.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1)) (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_2))) f) s))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : CommMonoid.{u2} α] [_inst_2 : CommMonoid.{u1} β] (f : MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) (s : Multiset.{u2} α), Eq.{succ u1} ((fun ([email protected]._hyg.2391 : α) => β) (Multiset.prod.{u2} α _inst_1 s)) (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{max u2 u1, u2, u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α β (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) (MonoidHomClass.toMulHomClass.{max u2 u1, u2, u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2)) (MonoidHom.monoidHomClass.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))))) f (Multiset.prod.{u2} α _inst_1 s)) (Multiset.prod.{u1} β _inst_2 (Multiset.map.{u2, u1} α β (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => β) _x) (MulHomClass.toFunLike.{max u2 u1, u2, u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α β (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1))) (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) (MonoidHomClass.toMulHomClass.{max u2 u1, u2, u1} (MonoidHom.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))) α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2)) (MonoidHom.monoidHomClass.{u2, u1} α β (Monoid.toMulOneClass.{u2} α (CommMonoid.toMonoid.{u2} α _inst_1)) (Monoid.toMulOneClass.{u1} β (CommMonoid.toMonoid.{u1} β _inst_2))))) f) s))
Case conversion may be inaccurate. Consider using '#align monoid_hom.map_multiset_prod MonoidHom.map_multiset_prodₓ'. -/
@[to_additive]
protected theorem MonoidHom.map_multiset_prod [CommMonoid α] [CommMonoid β] (f : α →* β)
(s : Multiset α) : f s.Prod = (s.map f).Prod :=
(s.prod_hom f).symm
#align monoid_hom.map_multiset_prod MonoidHom.map_multiset_prod
#align add_monoid_hom.map_multiset_sum AddMonoidHom.map_multiset_sum
|
Although he was a merciful warrior, David still had to fight Israel’s enemies. This account is strange in that it seems the battles described here could have been avoided if only Hanun, the leader of Ammon, had taken David’s sincere condolences at face value. But Hanun listened to his advisers, who gave him extremely poor advice! It could be that they truly, though erroneously, believed David’s messengers were spies. Or these nobles of Ammon may have turned against David in hopes of provoking him and gaining a victory over Israel.
Whatever the reason, David’s men were humiliated by their Ammonite hosts with no provocation. Then the Ammonites raised an army of 33,000 mercenaries from neighboring Aramean states — another act of unprovoked aggression against Israel. David’s army scored a resounding victory under the leadership of Joab and his brother Abishai. But the Arameans apparently did not learn their lesson, because they regrouped, sent for reinforcements, and came at Israel again! This time David himself led the armies of God to victory — the last recorded triumph of Israel’s warrior-king before his affair with Bathsheba.
What does the humiliation of David’s messengers suggest about the Ammonites’ motives?
What do Amos 1:13 and Zephaniah 2:8–11 tell us about the history of the Ammonites’ relationship with Israel?
Why do you think God gave Joab such a great victory, despite the fact that Joab had committed treachery and murder?
Why did David himself lead the final battle against the Arameans?
Anyone whose well-intentioned motives have been misconstrued — whether accidentally or purposely — can identify with the envoys of David in this story. Since all of us have felt the pain of this rejection, let us resolve to be extremely careful in judging the motives of others! |
theory Analyze_Containern
imports
Iptables_Semantics.Parser
begin
parse_iptables_save docker_fw="iptables-save.topos4.1.established"
thm docker_fw_def
thm docker_fw_FORWARD_default_policy_def
definition "unfolded_FORWARD = unfold_ruleset_FORWARD docker_fw_FORWARD_default_policy (map_of_string_ipv4 (docker_fw))"
value[code] "map (quote_rewrite \<circ> common_primitive_rule_toString) (unfolded_FORWARD)"
value[code] "map (quote_rewrite \<circ> common_primitive_rule_toString) (upper_closure unfolded_FORWARD)"
value[code] "upper_closure (packet_assume_new unfolded_FORWARD)"
lemma "check_simple_fw_preconditions (upper_closure (optimize_matches abstract_for_simple_firewall
(upper_closure (packet_assume_new unfolded_FORWARD))))" by eval
lemma "simple_fw_valid (to_simple_firewall (upper_closure
(optimize_matches abstract_for_simple_firewall (upper_closure (packet_assume_new unfolded_FORWARD)))))"
by eval
lemma "simple_fw_valid (to_simple_firewall (lower_closure
(optimize_matches abstract_for_simple_firewall (lower_closure (packet_assume_new unfolded_FORWARD)))))"
by eval
value[code] "map simple_rule_ipv4_toString (to_simple_firewall (upper_closure
(optimize_matches abstract_for_simple_firewall (upper_closure (packet_assume_new unfolded_FORWARD)))))"
value[code] "map simple_rule_ipv4_toString (to_simple_firewall (lower_closure
(optimize_matches abstract_for_simple_firewall (lower_closure (packet_assume_new unfolded_FORWARD)))))"
(*Interfaces be gone! necessary for ip partition!*)
definition preprocess where
"preprocess unfold closure ipassmt def fw \<equiv> to_simple_firewall (closure
(optimize_matches (abstract_primitive (\<lambda>r. case r of Pos a \<Rightarrow> is_Iiface a \<or> is_Oiface a \<or> is_L4_Flags a
| Neg a \<Rightarrow> is_Iiface a \<or> is_Oiface a \<or> is_Prot a \<or> is_L4_Flags a))
(closure
(iface_try_rewrite ipassmt None
(closure
(packet_assume_new
(unfold def (map_of fw))))))))"
definition preprocess_ESTABLISHED where
"preprocess_ESTABLISHED unfold closure ipassmt def fw \<equiv> to_simple_firewall (closure
(optimize_matches (abstract_primitive (\<lambda>r. case r of Pos a \<Rightarrow> is_Iiface a \<or> is_Oiface a \<or> is_L4_Flags a
| Neg a \<Rightarrow> is_Iiface a \<or> is_Oiface a \<or> is_Prot a \<or> is_L4_Flags a))
(closure
(iface_try_rewrite ipassmt None
(closure
(optimize_matches (ctstate_assume_state CT_Established)
(unfold def (map_of fw))))))))"
(*incomplete, but we won't need it anyway*)
definition ipassmt :: "(iface \<times> (32 word \<times> nat) list) list" where
"ipassmt = [(Iface ''lo'', [(ipv4addr_of_dotdecimal (127,0,0,0),8)]),
(Iface ''br-b74b417b331f'', [(ipv4addr_of_dotdecimal (10,0,0,0),8)])
]"
lemma "distinct (map fst ipassmt)" by eval
lemma "ipassmt_sanity_nowildcards (map_of ipassmt)" by eval
value[code] "map_of_ipassmt ipassmt"
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt docker_fw_FORWARD_default_policy docker_fw)"
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt docker_fw_FORWARD_default_policy docker_fw)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt docker_fw_FORWARD_default_policy docker_fw)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt docker_fw_FORWARD_default_policy docker_fw)"
parse_iptables_save docker_fw2="iptables-save.topos4.1"
thm docker_fw2_def
thm docker_fw2_FORWARD_default_policy_def
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2)"
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2)"
text\<open>Only one of the flows additionally allows answers for ESTABLISHED connections\<close>
lemma "let new = access_matrix_pretty_ipv4 parts_connection_http
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2);
est = access_matrix_pretty_ipv4 parts_connection_http
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt docker_fw2_FORWARD_default_policy docker_fw2)
in fst est = fst new \<and>
set (snd est) - set (snd new) = {(''0.0.0.0'', ''10.0.0.4'')}" by eval
parse_iptables_save docker_fw_initial="iptables-save.topos1"
thm docker_fw_initial_def
thm docker_fw_initial_FORWARD_default_policy_def
(*The original docker0 bridge is still in this dump. We need some information about it.*)
definition ipassmt_initial :: "(iface \<times> (32 word \<times> nat) list) list" where
"ipassmt_initial = (Iface ''docker0'', [(ipv4addr_of_dotdecimal (172,17,0,1),16)])#ipassmt"
lemma "distinct (map fst ipassmt_initial)" by eval
lemma "ipassmt_sanity_nowildcards (map_of ipassmt_initial)" by eval
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt_initial docker_fw_initial_FORWARD_default_policy docker_fw_initial)"
value[code] "access_matrix_pretty_ipv4 parts_connection_ssh
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt_initial docker_fw_initial_FORWARD_default_policy docker_fw_initial)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess unfold_ruleset_FORWARD upper_closure ipassmt_initial docker_fw_initial_FORWARD_default_policy docker_fw_initial)"
value[code] "access_matrix_pretty_ipv4 parts_connection_http
(preprocess_ESTABLISHED unfold_ruleset_FORWARD upper_closure ipassmt_initial docker_fw_initial_FORWARD_default_policy docker_fw_initial)"
end
|
data MyN = MkN Nat Nat
foo : Nat -> Nat -> Nat
foo x y = case MkN x of
MkN z => y
main : IO ()
main = printLn (foo 2 2)
|
Not American..how old is one in grade 8?
I think like, 13 and 14. So definitely a very awkward time.
I feel like bullying can be more of a middle school thing, with high school it's more you get ignored. IDK I remember kids fighting in high school I don't remember much bullying. |
import data.list.perm
import data.sigma.on_fst
local attribute [simp] not_or_distrib and.assoc
local attribute [-simp] sigma.forall
namespace list
section αβ
variables {α : Type*} {β : α → Type*}
/-- Keys: the list of keys from a list of dependent key-value pairs -/
def keys : list (sigma β) → list α :=
map sigma.fst
section keys
variables {a : α} {s hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem keys_nil : @keys α β [] = [] :=
rfl
@[simp] theorem keys_cons : (hd :: tl).keys = hd.1 :: tl.keys :=
rfl
@[simp] theorem keys_singleton : [s].keys = [s.1] :=
rfl
@[simp] theorem keys_append : (l₁ ++ l₂).keys = l₁.keys ++ l₂.keys :=
by simp [keys]
@[simp] theorem keys_iff_ne_key_of_mem :
(∀ (s : sigma β), s ∈ l → a ≠ s.1) ↔ a ∉ l.keys :=
by induction l; simp *
theorem mem_of_ne_key_of_mem_cons (h : hd.1 ≠ s.1) : s ∈ hd :: tl → s ∈ tl :=
by cases s; cases hd; simp [ne.symm h]
theorem mem_keys_of_mem : s ∈ l → s.1 ∈ l.keys :=
mem_map_of_mem sigma.fst
theorem exists_mem_of_mem_keys (h : a ∈ l.keys) : ∃ (b : β a), sigma.mk a b ∈ l :=
let ⟨⟨a', b'⟩, m, e⟩ := exists_of_mem_map h in
eq.rec_on e (exists.intro b' m)
theorem mem_keys : a ∈ l.keys ↔ ∃ (b : β a), sigma.mk a b ∈ l :=
⟨exists_mem_of_mem_keys, λ ⟨b, h⟩, mem_keys_of_mem h⟩
end keys
/-- No duplicate keys in a list of dependent key-value pairs. -/
def nodupkeys : list (sigma β) → Prop :=
pairwise (sigma.fst_rel (≠))
section nodupkeys
variables {s t hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem nodupkeys_nil : @nodupkeys α β [] :=
pairwise.nil _
@[simp] theorem nodupkeys_cons :
(hd :: tl).nodupkeys ↔ hd.1 ∉ tl.keys ∧ tl.nodupkeys :=
by simp [nodupkeys, sigma.fst_rel]
theorem nodupkeys_cons_of_nodupkeys (h : hd.1 ∉ tl.keys)
(t : nodupkeys tl) : nodupkeys (hd :: tl) :=
nodupkeys_cons.mpr ⟨h, t⟩
theorem nodupkeys_singleton (s : sigma β) : nodupkeys [s] :=
nodupkeys_cons_of_nodupkeys (not_mem_nil s.1) nodupkeys_nil
theorem nodup_of_nodupkeys : l.nodupkeys → l.nodup :=
pairwise.imp $ λ ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ (h : a₁ ≠ a₂), by simp [h]
@[simp] theorem nodupkeys_iff : l.keys.nodup ↔ l.nodupkeys :=
pairwise_map sigma.fst
theorem perm_nodupkeys (p : l₁ ~ l₂) : l₁.nodupkeys ↔ l₂.nodupkeys :=
perm_pairwise (@sigma.fst_rel.symm α β (≠) (@ne.symm α)) p
@[simp] theorem nodupkeys_cons_of_not_mem_keys (h : hd.1 ∉ tl.keys) :
(hd :: tl).nodupkeys ↔ tl.nodupkeys :=
begin
induction tl,
case list.nil { simp },
case list.cons : hd₁ tl ih {
simp at h,
simp [perm_nodupkeys (perm.swap hd₁ hd tl), ne.symm h.1, ih h.2] }
end
variables {ls : list (list (sigma β))}
theorem nodupkeys_join : (join ls).nodupkeys ↔
(∀ {l : list (sigma β)}, l ∈ ls → l.nodupkeys) ∧ pairwise disjoint (ls.map keys) :=
have ∀ (l₁ l₂ : list (sigma β)), (∀ (s ∈ l₁) (t ∈ l₂), sigma.fst_rel ne s t) ↔ disjoint l₁.keys l₂.keys :=
λ l₁ l₂,
have h₁ : (∀ (s : sigma β), s ∈ l₁ → s.1 ∉ l₂.keys) → disjoint l₁.keys l₂.keys :=
λ f a mkas mkat, let ⟨b, mabs⟩ := exists_mem_of_mem_keys mkas in
absurd mkat $ f ⟨a, b⟩ mabs,
have h₂ : disjoint l₁.keys l₂.keys → ∀ (s : sigma β), s ∈ l₁ → s.1 ∉ l₂.keys :=
λ dj s mss mkat, absurd mkat $ dj $ mem_keys_of_mem mss,
⟨by simpa using h₁, by simpa using h₂⟩,
pairwise_join.trans $ and_congr iff.rfl $ (pairwise.iff this).trans (pairwise_map _).symm
theorem nodup_enum_map_fst (l : list α) : (l.enum.map prod.fst).nodup :=
by simp [list.nodup_range]
theorem perm_keys_of_perm (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) (p : l₁ ~ l₂) :
l₁.keys ~ l₂.keys :=
begin
induction p,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih {
simp at nd₁ nd₂,
simp [perm.skip hd.1 (ih nd₁.2 nd₂.2)] },
case list.perm.swap : s₁ s₂ l {
simp [perm.swap s₁.1 s₂.1 (keys l)] },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ nd₁ nd₃ {
have nd₂ : l₂.nodupkeys := (perm_nodupkeys p₁₂).mp nd₁,
exact perm.trans (ih₁₂ nd₁ nd₂) (ih₂₃ nd₂ nd₃) }
end
-- Is this useful?
theorem nodupkeys_functional (d : l.nodupkeys) (ms : s ∈ l) (mt : t ∈ l)
(h : s.1 = t.1) : (eq.rec_on h s.2 : β t.1) = t.2 :=
begin
induction d,
case pairwise.nil { cases ms },
case pairwise.cons : _ _ r _ ih {
simp at ms mt,
cases ms; cases mt,
{ subst ms, subst mt },
{ induction ms, exact absurd h (r _ mt) },
{ induction mt, exact absurd h (ne.symm (r _ ms)) },
{ exact ih ms mt } },
end
-- Is this useful?
theorem eq_of_nodupkeys_of_eq_fst (d : l.nodupkeys) (ms : s ∈ l) (mt : t ∈ l)
(h : s.1 = t.1) : s = t :=
sigma.eq h $ nodupkeys_functional d ms mt h
end nodupkeys
section decidable_eq_α
variables [decidable_eq α]
/-- Key-based single-value lookup in a list of dependent key-value pairs. The
result is the first key-matching value found, if one exists. -/
def klookup (a : α) : list (sigma β) → option (β a)
| [] := none
| (hd :: tl) := if h : hd.1 = a then some (h.rec_on hd.2) else klookup tl
section klookup
variables {a : α} {s hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem klookup_nil : @klookup _ β _ a [] = none :=
rfl
@[simp] theorem klookup_cons_eq (h : hd.1 = a) :
klookup a (hd :: tl) = some (h.rec_on hd.2) :=
by simp [klookup, h]
@[simp] theorem klookup_cons_ne (h : hd.1 ≠ a) :
klookup a (hd :: tl) = klookup a tl :=
by simp [klookup, h]
@[simp] theorem klookup_eq (a : α) : ∀ (l : list (sigma β)),
klookup a l = none ∨ ∃ (b : β a), b ∈ l.klookup a
| [] := or.inl rfl
| (hd :: tl) :=
if h₁ : hd.1 = a then
or.inr ⟨h₁.rec_on hd.2, klookup_cons_eq h₁⟩
else
match klookup_eq tl with
| or.inl h₂ := or.inl $ (klookup_cons_ne h₁).trans h₂
| or.inr ⟨b, h₂⟩ := or.inr ⟨b, (klookup_cons_ne h₁).trans h₂⟩
end
theorem klookup_is_some : (l.klookup a).is_some ↔ ∃ (b : β a), b ∈ l.klookup a :=
by simp [option.is_some_iff_exists]
theorem klookup_not_mem_keys : a ∉ l.keys ↔ klookup a l = none :=
by induction l with hd _ ih;
[simp, {by_cases h : hd.1 = a; [simp [h], simp [h, ne.symm h, ih]]}]
@[simp] theorem mem_klookup_of_nodupkeys (nd : l.nodupkeys) : s.2 ∈ l.klookup s.1 ↔ s ∈ l :=
begin
induction l generalizing s,
case list.nil { simp },
case list.cons : hd tl ih {
simp at nd,
by_cases h : hd.1 = s.1,
{ rw klookup_cons_eq h,
cases s with a₁ b₁,
cases hd with a₂ b₂,
dsimp at h,
induction h,
split,
{ simp {contextual := tt} },
{ intro h,
simp at h,
cases h with h h,
{ simp [h] },
{ exact absurd (mem_keys_of_mem h) nd.1 } } },
{ rw [klookup_cons_ne h, mem_cons_iff],
split,
{ exact or.inr ∘ (ih nd.2).mp },
{ intro p,
cases p with p p,
{ induction p, exact false.elim (ne.irrefl h) },
{ exact (ih nd.2).mpr p } } } }
end
theorem perm_klookup (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) (p : l₁ ~ l₂) :
l₁.klookup a = l₂.klookup a :=
begin
induction p,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih nd₁ nd₂ {
by_cases h : hd.1 = a,
{ simp [h] },
{ simp at nd₁ nd₂, simp [h, ih nd₁.2 nd₂.2] } },
case list.perm.swap : s₁ s₂ l nd₂₁ nd₁₂ {
simp at nd₂₁ nd₁₂,
by_cases h₂ : s₂.1 = a,
{ induction h₂, simp [nd₁₂.1] },
{ by_cases h₁ : s₁.1 = a; simp [h₂, h₁] } },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ nd₁ nd₃ {
have nd₂ : l₂.nodupkeys := (perm_nodupkeys p₁₂).mp nd₁,
exact eq.trans (ih₁₂ nd₁ nd₂) (ih₂₃ nd₂ nd₃) }
end
end klookup
/-- Key-based multiple-value lookup in a list of dependent key-value pairs.
The result is a list of all key-matching values. -/
def klookup_all (a : α) : list (sigma β) → list (β a)
| [] := []
| (hd :: tl) :=
let tl' := klookup_all tl in
if h : hd.1 = a then h.rec_on hd.2 :: tl' else tl'
section klookup_all
variables {a : α} {hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem klookup_all_nil : @klookup_all _ β _ a [] = [] :=
rfl
@[simp] theorem klookup_all_cons_eq (h : hd.1 = a) :
(hd :: tl).klookup_all a = h.rec_on hd.2 :: tl.klookup_all a :=
by simp [klookup_all, h]
@[simp] theorem klookup_all_cons_ne (h : hd.1 ≠ a) :
(hd :: tl).klookup_all a = tl.klookup_all a :=
by simp [klookup_all, h]
theorem klookup_all_head [inhabited (β a)] :
(l.klookup_all a).head = (l.klookup a).iget :=
by induction l with hd; [refl, {by_cases hd.1 = a; simp *}]
theorem perm_klookup_all (p : l₁ ~ l₂) : l₁.klookup_all a ~ l₂.klookup_all a :=
begin
induction p,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih {
by_cases h : hd.1 = a; simp [h, ih, perm.skip] },
case list.perm.swap : s₁ s₂ l {
by_cases h₁ : s₁.1 = a; by_cases h₂ : s₂.1 = a; simp [h₁, h₂, perm.swap] },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ {
exact perm.trans ih₁₂ ih₂₃ }
end
end klookup_all
/-- Key-based single-pair erasure in a list of dependent key-value pairs. The
result is the list minus the first key-matching pair, if one exists. -/
def kerase (a : α) : list (sigma β) → list (sigma β)
| [] := []
| (hd :: tl) := if hd.1 = a then tl else hd :: kerase tl
section kerase
variables {a a₁ a₂ : α} {s hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem kerase_nil : @kerase _ β _ a [] = [] :=
rfl
@[simp] theorem kerase_cons_eq (h : hd.1 = a) :
(hd :: tl).kerase a = tl :=
by simp [kerase, h]
@[simp] theorem kerase_cons_ne (h : hd.1 ≠ a) :
(hd :: tl).kerase a = hd :: tl.kerase a :=
by simp [kerase, h]
theorem kerase_cons (a : α) (hd : sigma β) (tl : list (sigma β)) :
hd.1 = a ∧ (hd :: tl).kerase a = tl ∨
hd.1 ≠ a ∧ (hd :: tl).kerase a = hd :: tl.kerase a :=
by by_cases h : hd.1 = a; simp [h]
@[simp] theorem mem_kerase_nil : s ∈ @kerase _ β _ a [] ↔ false :=
by simp
@[simp] theorem kerase_of_not_mem_keys (h : a ∉ l.keys) : l.kerase a = l :=
by induction l with _ _ ih;
[refl, {simp at h, simp [h.1, ne.symm h.1, ih h.2]}]
theorem exists_kerase_eq (h : a ∈ l.keys) :
∃ (b : β a) (l₁ l₂ : list (sigma β)),
a ∉ l₁.keys ∧
l = l₁ ++ ⟨a, b⟩ :: l₂ ∧
l.kerase a = l₁ ++ l₂ :=
begin
induction l,
case list.nil { cases h },
case list.cons : hd tl ih {
by_cases e : hd.1 = a,
{ induction e,
exact ⟨hd.2, [], tl, by simp, by cases hd; refl, by simp⟩ },
{ simp at h,
cases h,
case or.inl : h { exact absurd h (ne.symm e) },
case or.inr : h {
rcases ih h with ⟨b, tl₁, tl₂, h₁, h₂, h₃⟩,
exact ⟨b, hd :: tl₁, tl₂, not_mem_cons_of_ne_of_not_mem (ne.symm e) h₁,
by rw h₂; refl, by simp [e, h₃]⟩ } } }
end
theorem kerase_sublist (a : α) (l : list (sigma β)) : l.kerase a <+ l :=
if h : a ∈ l.keys then
match l, l.kerase a, exists_kerase_eq h with
| _, _, ⟨_, _, _, _, rfl, rfl⟩ := by simp
end
else
by simp [h]
theorem kerase_subset (a : α) (l : list (sigma β)) : l.kerase a ⊆ l :=
subset_of_sublist (kerase_sublist a l)
theorem kerase_sublist_kerase (a : α) : ∀ {l₁ l₂ : list (sigma β)},
l₁ <+ l₂ → l₁.kerase a <+ l₂.kerase a
| _ _ sublist.slnil := sublist.slnil
| _ _ (sublist.cons l₁ l₂ hd sl) :=
if h : hd.1 = a then
by rw [kerase_cons_eq h]; exact (kerase_sublist _ _).trans sl
else
by rw kerase_cons_ne h; exact (kerase_sublist_kerase sl).cons _ _ _
| _ _ (sublist.cons2 l₁ l₂ hd sl) :=
if h : hd.1 = a then
by repeat {rw kerase_cons_eq h}; exact sl
else
by repeat {rw kerase_cons_ne h}; exact (kerase_sublist_kerase sl).cons2 _ _ _
theorem mem_of_mem_kerase : s ∈ l.kerase a → s ∈ l :=
@kerase_subset _ _ _ _ _ _
@[simp] theorem mem_kerase_of_ne (h : s.1 ≠ a) : s ∈ l.kerase a ↔ s ∈ l :=
iff.intro mem_of_mem_kerase $ λ p,
if q : a ∈ l.keys then
match l, l.kerase a, exists_kerase_eq q, p with
| _, _, ⟨_, _, _, _, rfl, rfl⟩, p :=
by clear _match; cases s; simpa [h] using p
end
else
by simp [q, p]
theorem kerase_subset_keys (a : α) (l : list (sigma β)) :
(l.kerase a).keys ⊆ l.keys :=
subset_of_sublist (map_sublist_map _ (kerase_sublist a l))
theorem mem_keys_of_mem_keys_kerase : a₁ ∈ (l.kerase a₂).keys → a₁ ∈ l.keys :=
@kerase_subset_keys _ _ _ _ _ _
@[simp] theorem mem_keys_kerase_of_ne (h : a₂ ≠ a₁) :
a₁ ∈ (l.kerase a₂).keys ↔ a₁ ∈ l.keys :=
iff.intro mem_keys_of_mem_keys_kerase $ λ p,
if q : a₂ ∈ l.keys then
match l, l.kerase a₂, exists_kerase_eq q, p with
| _, _, ⟨_, _, _, _, rfl, rfl⟩, p := by simpa [ne.symm h] using p
end
else
by simp [q, p]
@[simp] theorem nodupkeys_kerase (a : α) :
l.nodupkeys → (l.kerase a).nodupkeys :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
intro nd,
simp at nd,
by_cases h : hd.1 = a,
{ simp [h, nd.2] },
{ rw [kerase_cons_ne h, nodupkeys_cons],
exact ⟨mt (mem_keys_kerase_of_ne (ne.symm h)).mp nd.1, ih nd.2⟩ } }
end
@[simp] theorem not_mem_keys_kerase_self (nd : l.nodupkeys) :
a ∉ (l.kerase a).keys :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
simp at nd,
by_cases h : hd.1 = a,
{ induction h, simp [nd.1] },
{ simp [h, ne.symm h, ih nd.2] } }
end
theorem kerase_append_left : ∀ {l₁ l₂ : list (sigma β)},
a ∈ l₁.keys → (l₁ ++ l₂).kerase a = l₁.kerase a ++ l₂
| [] _ h := by cases h
| (hd :: tl₁) l₂ h₁ :=
if h₂ : hd.1 = a then
by simp [h₂]
else
by simp at h₁; cases h₁;
[exact absurd h₁ (ne.symm h₂), simp [h₂, kerase_append_left h₁]]
theorem kerase_append_right : ∀ {l₁ l₂ : list (sigma β)},
a ∉ l₁.keys → (l₁ ++ l₂).kerase a = l₁ ++ l₂.kerase a
| [] _ h := rfl
| (_ :: tl₁) l₂ h := by simp at h; simp [ne.symm h.1, kerase_append_right h.2]
theorem kerase_comm (a₁ a₂ : α) (l : list (sigma β)) :
(l.kerase a₁).kerase a₂ = (l.kerase a₂).kerase a₁ :=
if h : a₂ = a₁ then
by simp [h]
else if ha₁ : a₁ ∈ l.keys then
if ha₂ : a₂ ∈ l.keys then
match l, l.kerase a₁, exists_kerase_eq ha₁, ha₂ with
| _, _, ⟨b₁, l₁, l₂, a₁_nin_l₁, rfl, rfl⟩, a₂_in_l₁_app_l₂ :=
if h' : a₂ ∈ l₁.keys then
by simp [kerase_append_left h',
kerase_append_right (mt (mem_keys_kerase_of_ne h).mp a₁_nin_l₁)]
else
by simp [kerase_append_right h', kerase_append_right a₁_nin_l₁,
@kerase_cons_ne _ _ _ a₂ ⟨a₁, b₁⟩ _ (ne.symm h)]
end
else
by simp [ha₂, mt mem_keys_of_mem_keys_kerase ha₂]
else
by simp [ha₁, mt mem_keys_of_mem_keys_kerase ha₁]
@[simp] theorem klookup_kerase (nd : l.nodupkeys) : (l.kerase a).klookup a = none :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
simp at nd,
by_cases h₁ : hd.1 = a,
{ by_cases h₂ : a ∈ tl.keys,
{ induction h₁, exact absurd h₂ nd.1 },
{ simp [h₁, klookup_not_mem_keys.mp h₂] } },
{ simp [h₁, ih nd.2] } }
end
theorem ne_of_nodupkeys_of_mem_kerase :
l.nodupkeys → s ∈ l.kerase a → a ≠ s.1 :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
intros nd h,
simp at nd,
rcases kerase_cons a hd tl with ⟨he, p⟩ | ⟨hn, p⟩,
{ induction he,
simp [p] at h,
exact ne.symm (ne_of_mem_of_not_mem (mem_keys_of_mem h) nd.1) },
{ simp [hn] at h,
cases h with h h,
{ induction h, exact ne.symm hn },
{ exact ih nd.2 h } } }
end
theorem nodupkeys_kerase_eq_filter (a : α) (nd : l.nodupkeys) :
l.kerase a = filter (λ s, s.1 ≠ a) l :=
begin
induction nd,
case pairwise.nil { refl },
case pairwise.cons : s l n p ih {
by_cases h : s.1 = a,
{ have : filter (λ (t : sigma β), t.1 ≠ a) l = l :=
filter_eq_self.mpr (λ t th, h ▸ ne.symm (n t th)),
simp [h, kerase, filter, this] },
{ simp [h, ih] } }
end
@[simp] theorem mem_kerase_of_nodupkeys (nd : l.nodupkeys) :
s ∈ l.kerase a ↔ s.1 ≠ a ∧ s ∈ l :=
by rw nodupkeys_kerase_eq_filter a nd; simp [and_comm]
theorem perm_kerase (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) (p : l₁ ~ l₂) :
l₁.kerase a ~ l₂.kerase a :=
begin
induction p,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih {
simp at nd₁ nd₂,
by_cases h : hd.1 = a; simp [p, h, ih nd₁.2 nd₂.2, perm.skip] },
case list.perm.swap : s₁ s₂ l nd₂₁ nd₁₂ {
simp at nd₁₂,
by_cases h₂ : s₂.1 = a,
{ induction h₂, simp [nd₁₂.1] },
{ by_cases h₁ : s₁.1 = a; simp [h₂, h₁, perm.swap] } },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ nd₁ nd₃ {
have nd₂ : l₂.nodupkeys := (perm_nodupkeys p₁₂).mp nd₁,
exact perm.trans (ih₁₂ nd₁ nd₂) (ih₂₃ nd₂ nd₃) }
end
end kerase
/-- `cons` with `kerase` of the first `s`-key-matching pair -/
def kinsert (s : sigma β) (l : list (sigma β)) : list (sigma β) :=
s :: l.kerase s.1
section kinsert
variables {a : α} {s t hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem kinsert_eq_cons_kerase : tl.kinsert hd = hd :: tl.kerase hd.1 :=
rfl
@[simp] theorem mem_kinsert : s ∈ kinsert t l ↔ s = t ∨ s ∈ l.kerase t.1 :=
by simp [kinsert]
@[simp] theorem mem_keys_kinsert : a ∈ (l.kinsert s).keys ↔ s.1 = a ∨ a ∈ l.keys :=
by by_cases h : s.1 = a; [simp [h], simp [h, ne.symm h]]
@[simp] theorem nodupkeys_kinsert (s : sigma β) (nd : l.nodupkeys) :
(l.kinsert s).nodupkeys :=
(nodupkeys_cons_of_not_mem_keys (not_mem_keys_kerase_self nd)).mpr $
nodupkeys_kerase _ nd
theorem perm_kinsert (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) (p : l₁ ~ l₂) :
l₁.kinsert s ~ l₂.kinsert s :=
perm.skip s $ perm_kerase nd₁ nd₂ p
end kinsert
/-- Key-based single-pair replacement in a list of dependent key-value pairs.
The result is the list with the first key-matching pair, if it exists, replaced
by the given pair. -/
def kreplace (s : sigma β) : list (sigma β) → list (sigma β)
| [] := []
| (hd :: tl) := if h : hd.1 = s.1 then s :: tl else hd :: kreplace tl
section kreplace
variables {a : α} {s t hd : sigma β} {l l₁ l₂ tl : list (sigma β)}
@[simp] theorem kreplace_nil : kreplace s [] = [] :=
rfl
@[simp] theorem kreplace_cons_eq (h : hd.1 = s.1) :
(hd :: tl).kreplace s = s :: tl :=
by simp [kreplace, h]
@[simp] theorem kreplace_cons_ne (h : hd.1 ≠ s.1) :
(hd :: tl).kreplace s = hd :: tl.kreplace s :=
by simp [kreplace, h]
theorem kreplace_cons (s hd : sigma β) (tl : list (sigma β)) :
hd.1 = s.1 ∧ (hd :: tl).kreplace s = s :: tl ∨
hd.1 ≠ s.1 ∧ (hd :: tl).kreplace s = hd :: tl.kreplace s :=
by by_cases h : hd.1 = s.1; simp [h]
theorem mem_of_mem_kreplace_ne (h : t.1 ≠ s.1) : s ∈ l.kreplace t → s ∈ l :=
begin
induction l generalizing s t,
case list.nil { simp },
case list.cons : hd tl ih {
by_cases p : hd.1 = t.1,
{ rw kreplace_cons_eq p,
exact mem_cons_of_mem hd ∘ mem_of_ne_key_of_mem_cons h },
{ rw [kreplace_cons_ne p, mem_cons_iff, mem_cons_iff],
exact or.imp_right (ih h) } }
end
theorem mem_keys_of_mem_keys_kreplace_ne (h₁ : a ≠ s.1) (h₂ : a ∈ (l.kreplace s).keys) :
a ∈ l.keys :=
let ⟨b, h₃⟩ := exists_mem_of_mem_keys h₂ in
@mem_keys_of_mem _ _ ⟨a, b⟩ _ (mem_of_mem_kreplace_ne (ne.symm h₁) h₃)
@[simp] theorem nodupkeys_kreplace (s : sigma β) :
l.nodupkeys → (l.kreplace s).nodupkeys :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
intro nd,
simp at nd,
by_cases p : hd.1 = s.1,
{ rw p at nd, simp [p, nd.1, nd.2] },
{ simp [p, nd.1, ih nd.2, mt (mem_keys_of_mem_keys_kreplace_ne p)] } }
end
theorem perm_kreplace (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) (p : l₁ ~ l₂) :
l₁.kreplace s ~ l₂.kreplace s :=
begin
induction p,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih {
simp at nd₁ nd₂,
by_cases h : hd.1 = s.1; simp [p, h, ih nd₁.2 nd₂.2, perm.skip] },
case list.perm.swap : s₁ s₂ l nd₂₁ nd₁₂ {
simp at nd₂₁ nd₁₂,
by_cases h₂ : s₂.1 = s.1,
{ rw kreplace_cons_eq h₂,
by_cases h₁ : s₁.1 = s.1,
{ rw kreplace_cons_eq h₁,
exact absurd (h₁.trans h₂.symm) nd₁₂.1 },
{ simp [h₁, h₂, perm.swap] } },
{ by_cases h₁ : s₁.1 = s.1; simp [h₁, h₂, perm.swap] } },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ nd₁ nd₃ {
have nd₂ : l₂.nodupkeys := (perm_nodupkeys p₁₂).mp nd₁,
exact perm.trans (ih₁₂ nd₁ nd₂) (ih₂₃ nd₂ nd₃) }
end
end kreplace
/-- Left-biased key-based union of lists of dependent key-value pairs.
The result of `l₁.kunion l₂` is constructed from `l₁` with `l₂` appended such
that the first pair matching each key in `l₁` is erased from `l₂`. Note that
the result can still have duplicates if duplicates exist in either argument. -/
def kunion : list (sigma β) → list (sigma β) → list (sigma β)
| [] l := l
| (hd :: tl) l := hd :: kunion tl (kerase hd.1 l)
section kunion
variables {a : α} {s hd : sigma β} {l l₁ l₂ l₃ l₄ tl : list (sigma β)}
@[simp] theorem nil_kunion (l : list (sigma β)) : [].kunion l = l :=
rfl
@[simp] theorem kunion_nil : ∀ (l : list (sigma β)), l.kunion [] = l
| [] := rfl
| (_ :: tl) := by rw [kunion, kerase_nil, kunion_nil tl]
@[simp] theorem kunion_cons : (hd :: tl).kunion l = hd :: tl.kunion (l.kerase hd.1) :=
rfl
@[simp] theorem kerase_kunion : ∀ {l₁ : list (sigma β)} (l₂ : list (sigma β)),
(l₁.kerase a).kunion (l₂.kerase a) = (l₁.kunion l₂).kerase a
| [] _ := rfl
| (hd :: _) l := by by_cases h : hd.1 = a;
simp [h, kerase_comm a hd.1 l, kerase_kunion]
@[simp] theorem map_kunion {γ : Type*} (f : sigma β → γ)
(dk : disjoint l₁.keys l₂.keys) : (l₁.kunion l₂).map f = l₁.map f ++ l₂.map f :=
by induction l₁ with _ _ ih; [refl, {simp at dk, simp [dk.1, ih dk.2.symm]}]
theorem keys_kunion (dk : disjoint l₁.keys l₂.keys) :
(l₁.kunion l₂).keys = l₁.keys ++ l₂.keys :=
by simp [keys, dk]
@[simp] theorem kinsert_kunion : (l₁.kinsert s).kunion l₂ = (l₁.kunion l₂).kinsert s :=
by simp
@[simp] theorem kunion_assoc : (l₁.kunion l₂).kunion l₃ = l₁.kunion (l₂.kunion l₃) :=
by induction l₁ generalizing l₂ l₃; simp *
theorem mem_of_mem_kunion : s ∈ l₁.kunion l₂ → s ∈ l₁ ∨ s ∈ l₂ :=
begin
induction l₁ generalizing l₂,
case list.nil { simp },
case list.cons : hd tl ih {
intro h,
simp at h,
cases h,
case or.inl : h { simp [h] },
case or.inr : h {
cases ih h,
case or.inl : h { simp [h] },
case or.inr : h { simp [mem_of_mem_kerase h] } } }
end
theorem mem_kunion_left (l₂ : list (sigma β)) (h : s ∈ l₁) : s ∈ l₁.kunion l₂ :=
by induction l₁ generalizing l₂; simp at h; cases h; simp *
theorem mem_kunion_right (h₁ : s.1 ∉ l₁.keys) (h₂ : s ∈ l₂) : s ∈ l₁.kunion l₂ :=
by induction l₁ generalizing l₂; simp at h₁; cases h₁; simp *
theorem mem_kunion_middle (dk : disjoint (l₁.kunion l₂).keys l₃.keys) (h : s ∈ l₁.kunion l₃) :
s ∈ (l₁.kunion l₂).kunion l₃ :=
match mem_of_mem_kunion h with
| or.inl h := mem_kunion_left _ (mem_kunion_left _ h)
| or.inr h := mem_kunion_right (disjoint_right.mp dk (mem_keys_of_mem h)) h
end
theorem mem_kunion_of_disjoint_keys (dk : disjoint l₁.keys l₂.keys) (h : s ∈ l₁ ∨ s ∈ l₂) :
s ∈ l₁.kunion l₂ :=
begin
cases h with h h,
{ exact mem_kunion_left _ h },
{ by_cases p : s.1 ∈ l₁.keys,
{ exact absurd h (mt mem_keys_of_mem (dk p)) },
{ exact mem_kunion_right p h } }
end
@[simp] theorem mem_kunion_iff (dk : disjoint l₁.keys l₂.keys) : s ∈ l₁.kunion l₂ ↔ s ∈ l₁ ∨ s ∈ l₂ :=
⟨mem_of_mem_kunion, mem_kunion_of_disjoint_keys dk⟩
@[simp] theorem mem_keys_kunion : a ∈ (l₁.kunion l₂).keys ↔ a ∈ l₁.keys ∨ a ∈ l₂.keys :=
by induction l₁ with hd _ ih generalizing l₂;
[simp, {by_cases h : hd.1 = a; [simp [h], simp [h, ne.symm h, ih]]}]
theorem nodupkeys_kunion (nd₁ : l₁.nodupkeys) (nd₂ : l₂.nodupkeys) :
(l₁.kunion l₂).nodupkeys :=
by induction l₁ generalizing l₂; simp at nd₁; simp *
theorem perm_kunion_left (l : list (sigma β)) (p : l₁ ~ l₂) : l₁.kunion l ~ l₂.kunion l :=
begin
induction p generalizing l,
case list.perm.nil { refl },
case list.perm.skip : hd tl₁ tl₂ p ih {
simp [ih (kerase hd.1 l), perm.skip] },
case list.perm.swap : s₁ s₂ l {
simp [kerase_comm, perm.swap] },
case list.perm.trans : l₁ l₂ l₃ p₁₂ p₂₃ ih₁₂ ih₂₃ {
exact perm.trans (ih₁₂ l) (ih₂₃ l) }
end
theorem perm_kunion_right : ∀ (l : list (sigma β)) {l₁ l₂ : list (sigma β)},
l₁.nodupkeys → l₂.nodupkeys → l₁ ~ l₂ → l.kunion l₁ ~ l.kunion l₂
| [] _ _ _ _ p := p
| (hd :: tl) l₁ l₂ nd₁ nd₂ p :=
by simp [perm.skip hd
(perm_kunion_right tl (nodupkeys_kerase hd.1 nd₁)
(nodupkeys_kerase hd.1 nd₂)
(perm_kerase nd₁ nd₂ p))]
theorem perm_kunion (nd₂ : l₂.nodupkeys) (nd₄ : l₄.nodupkeys)
(p₁₃ : l₁ ~ l₃) (p₂₄ : l₂ ~ l₄) : l₁.kunion l₂ ~ l₃.kunion l₄ :=
perm.trans (perm_kunion_left l₂ p₁₃) (perm_kunion_right l₃ nd₂ nd₄ p₂₄)
end kunion
end decidable_eq_α
end αβ
section α₁α₂α₃β₁β₂β₃
universes u v
variables {α₁ α₂ α₃ : Type u} {β₁ : α₁ → Type v} {β₂ : α₂ → Type v} {β₃ : α₃ → Type v}
section keys
variables {s : sigma β₁} {l : list (sigma β₁)} {f : sigma β₁ → sigma β₂}
theorem mem_keys_map_of_mem (f : sigma β₁ → sigma β₂) (ms : s ∈ l) :
(f s).1 ∈ (l.map f).keys :=
mem_keys_of_mem (mem_map_of_mem f ms)
theorem mem_keys_map (ff : sigma.fst_functional f) (h : s.1 ∈ l.keys) :
(f s).1 ∈ (l.map f).keys :=
let ⟨_, m, e⟩ := exists_of_mem_map h in ff e ▸ mem_keys_map_of_mem f m
theorem mem_keys_of_mem_keys_map (fi : sigma.fst_injective f) (h : (f s).1 ∈ (l.map f).keys) :
s.1 ∈ l.keys :=
have h : (sigma.fst ∘ f) s ∈ map (sigma.fst ∘ f) l, by simpa [keys] using h,
let ⟨_, m, e⟩ := exists_of_mem_map h in fi e ▸ mem_keys_of_mem m
-- Is this useful?
theorem mem_keys_of_mem_map (fi : sigma.fst_injective f) (h : f s ∈ l.map f) : s.1 ∈ l.keys :=
let ⟨_, m, e⟩ := exists_of_mem_map h in
fi (sigma.eq_fst e) ▸ mem_keys_of_mem m
@[simp] theorem mem_keys_map_iff (ff : sigma.fst_functional f) (fi : sigma.fst_injective f) :
(f s).1 ∈ (l.map f).keys ↔ s.1 ∈ l.keys :=
⟨mem_keys_of_mem_keys_map fi, mem_keys_map ff⟩
end keys
section nodupkeys
variables {s t : sigma β₁} {l : list (sigma β₁)} {f : sigma β₁ → sigma β₂}
-- Is this useful?
theorem nodupkeys_injective (fi : sigma.fst_injective f) (d : l.nodupkeys)
(ms : s ∈ l) (mt : t ∈ l) (h : f s = f t) : s = t :=
eq_of_nodupkeys_of_eq_fst d ms mt $ fi $ sigma.eq_fst h
theorem nodupkeys_of_nodupkeys_map (ff : sigma.fst_functional f) :
nodupkeys (map f l) → nodupkeys l :=
pairwise_of_pairwise_map f $ λ s t, mt (@ff s t)
theorem nodupkeys_map (fi : sigma.fst_injective f) :
l.nodupkeys → (l.map f).nodupkeys :=
pairwise_map_of_pairwise f
(λ s t (h : s ∈ l ∧ t ∈ l ∧ s.1 ≠ t.1), mt (@fi s t) h.2.2) ∘
pairwise.and_mem.mp
theorem nodupkeys_map_iff (ff : sigma.fst_functional f) (fi : sigma.fst_injective f) :
(l.map f).nodupkeys ↔ l.nodupkeys :=
⟨nodupkeys_of_nodupkeys_map ff, nodupkeys_map fi⟩
-- Is this useful?
theorem mem_map_of_mem_of_mem_keys_map (fi : sigma.fst_injective f) (d : l.nodupkeys)
(ms : s ∈ l) (mfs : (f s).1 ∈ (l.map f).keys) : f s ∈ l.map f :=
begin
simp [keys] at mfs,
rcases mfs with ⟨a, b, mab, ef⟩,
cases s with sa sb,
have ea : a = sa := fi ef,
subst ea,
have eb : b = sb := nodupkeys_functional d mab ms rfl,
subst eb,
exact mem_map_of_mem f mab,
end
end nodupkeys
section map_disjoint
variables {l₁ l₂ : list (sigma β₁)} {f : sigma β₁ → sigma β₂}
theorem map_disjoint_keys_of_disjoint_keys (fi : sigma.fst_injective f)
(dk : disjoint l₁.keys l₂.keys) : disjoint (l₁.map f).keys (l₂.map f).keys :=
λ a h₁ h₂,
have h₁ : a ∈ map (sigma.fst ∘ f) l₁, by simpa [keys] using h₁,
let ⟨s, m, e⟩ := exists_of_mem_map h₁ in
have e : (f s).1 = a := e,
dk (mem_keys_of_mem m) (mem_keys_of_mem_keys_map fi (e.symm ▸ h₂))
theorem disjoint_keys_of_map_disjoint_keys (ff : sigma.fst_functional f)
(dk : disjoint (l₁.map f).keys (l₂.map f).keys) : disjoint l₁.keys l₂.keys :=
λ a h₁ h₂, let ⟨b₁, h₁⟩ := exists_mem_of_mem_keys h₁ in
dk (mem_keys_map_of_mem f h₁) (mem_keys_map ff h₂)
@[simp] theorem map_disjoint_keys (ff : sigma.fst_functional f) (fi : sigma.fst_injective f) :
disjoint (l₁.map f).keys (l₂.map f).keys ↔ disjoint l₁.keys l₂.keys :=
⟨disjoint_keys_of_map_disjoint_keys ff, map_disjoint_keys_of_disjoint_keys fi⟩
end map_disjoint
section decidable_eq_α₁_α₂
variables [decidable_eq α₁] [decidable_eq α₂]
section map
variables {s : sigma β₁} {l : list (sigma β₁)} {f : sigma β₁ → sigma β₂}
@[simp] theorem map_kerase (ff : sigma.fst_functional f) (fi : sigma.fst_injective f) :
(l.kerase s.1).map f = (l.map f).kerase (f s).1 :=
begin
induction l,
case list.nil { simp },
case list.cons : hd tl ih {
by_cases h : (f hd).1 = (f s).1,
{ simp [h, fi h] },
{ simp [h, mt (@ff _ _) h, ih] } }
end
@[simp] theorem map_kinsert (ff : sigma.fst_functional f) (fi : sigma.fst_injective f) :
(l.kinsert s).map f = (l.map f).kinsert (f s) :=
by simp [ff, fi]
end map
end decidable_eq_α₁_α₂
end α₁α₂α₃β₁β₂β₃
section αβ₁β₂
universes u v
variables {α : Type u} {β₁ β₂ : α → Type v}
section nodupkeys
variables {l : list (sigma β₁)}
theorem nodupkeys_map_id_iff (f : ∀ a, β₁ a → β₂ a) :
(l.map (sigma.map id f)).nodupkeys ↔ l.nodupkeys :=
nodupkeys_map_iff (sigma.map_id_fst_functional f) (sigma.map_id_fst_injective f)
end nodupkeys
end αβ₁β₂
end list
|
State Before: α : Type ?u.236438
β : Type ?u.236441
γ : Type ?u.236444
r : α → α → Prop
s : β → β → Prop
t : γ → γ → Prop
o : Ordinal
n : ℕ
⊢ ↑n ≤ card o ↔ ↑n ≤ o State After: no goals Tactic: rw [← Cardinal.ord_le, Cardinal.ord_nat] |
[STATEMENT]
lemma evaldjf_ex: "Ifm bbs bs (evaldjf f ps) \<longleftrightarrow> (\<exists>p \<in> set ps. Ifm bbs bs (f p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Ifm bbs bs (evaldjf f ps) = (\<exists>p\<in>set ps. Ifm bbs bs (f p))
[PROOF STEP]
by (induct ps) (simp_all add: evaldjf_def djf_Or) |
#include "dot_net_solution.hpp"
#include <bacs/problem/error.hpp>
#include <bacs/problem/split.hpp>
#include <bunsan/filesystem/fstream.hpp>
#include <bunsan/filesystem/operations.hpp>
#include <bunsan/pm/index.hpp>
#include <bunsan/static_initializer.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/property_tree/ini_parser.hpp>
namespace bacs {
namespace problem {
namespace utilities {
BUNSAN_STATIC_INITIALIZER(bacs_problem_utilities_dot_net_solution, {
BUNSAN_FACTORY_REGISTER_TOKEN(
utility, dot_net_solution,
[](const boost::filesystem::path &location,
const boost::property_tree::ptree &config) {
return utility::make_shared<dot_net_solution>(location, config);
})
})
dot_net_solution::dot_net_solution(const boost::filesystem::path &location,
const boost::property_tree::ptree &config)
: utility(location, config),
m_solution(config.get<std::string>("build.solution")),
m_configuration(config.get_optional<std::string>("build.configuration")),
m_libs(split::get_vector(config, "build.libs")) {}
bool dot_net_solution::make_package(const boost::filesystem::path &destination,
const bunsan::pm::entry & /*package*/,
const Revision & /*revision*/) const {
try {
boost::filesystem::create_directories(destination);
bunsan::pm::index index;
// builder itself
index.source.import.source.push_back(
{".", "bacs/system/utility/dot_net_solution"});
// sources, note: only build section is needed from config
index.source.self.push_back({"src", "src"});
bunsan::filesystem::copy_tree(location(), destination / "src");
// utility configuration
index.package.self.push_back({"etc", "etc"});
boost::filesystem::create_directory(destination / "etc");
boost::property_tree::write_ini((destination / "etc" / target()).string(),
section("call"));
// modules: set binary name
index.source.self.push_back({"modules", "modules"});
boost::filesystem::create_directory(destination / "modules");
bunsan::filesystem::ofstream fout(destination / "modules" /
"utility.cmake");
BUNSAN_FILESYSTEM_FSTREAM_WRAP_BEGIN(fout) {
fout << "set(target " << target().string() << ")\n";
fout << "set(solution " << m_solution.string() << ")\n";
if (m_configuration)
fout << "set(configuration " << m_configuration << ")\n";
fout << "set(libraries";
for (const std::string &lib : m_libs) fout << " " << lib;
fout << ")\n";
} BUNSAN_FILESYSTEM_FSTREAM_WRAP_END(fout)
fout.close();
// dependencies
for (const std::string &lib : m_libs) {
index.source.import.package.push_back(
{".", "bacs/lib/dot_net_solution/" + lib});
}
// save it
index.save(destination / "index");
return true;
} catch (std::exception &) {
BOOST_THROW_EXCEPTION(
utility_make_package_error()
<< utility_make_package_error::destination(destination)
// << utility_make_package_error::package(package)
<< bunsan::enable_nested_current());
}
}
} // namespace utilities
} // namespace problem
} // namespace bacs
|
import metric_spaces.definitions
noncomputable theory
open set
namespace examples
-- Declaring X and element x with type X
inductive X : Type
| x : X
-- A metric that maps everything to zero is a metric on X
instance metric_space_example_X : metric_space X :=
{ dist := λ _ _, 0,
dist_self := λ _, rfl,
eq_of_dist_eq_zero := λ a b _, by {cases a, cases b, refl},
dist_comm := λ _ _, rfl,
dist_triangle := λ _ _ _, show (0 : ℝ) ≤ 0 + 0, by linarith
}
-- The absolute function is a metric on ℝ
instance metric_space_example_R : metric_space ℝ :=
{ dist := λ x y, abs (x - y),
dist_self := λ x, show abs (x - x) = 0, by simp,
eq_of_dist_eq_zero := λ x y, show abs (x - y) = 0 → x = y,
by { rw abs_eq_zero, intro h, linarith [h] },
dist_comm := λ x y, show abs (x - y) = abs (y - x), from abs_sub x y,
dist_triangle := λ x y z, show abs (x - z) ≤ abs (x - y) + abs (y - z),
by { convert abs_add (x - y) (y - z), linarith }
}
-- The discrete metric is a metric on any set X
open tactic
open_locale classical
definition metric_example (X : Type) (x y : X) : ℝ :=
if x = y then 0 else 1
variables {X : Type} {x y : X}
-- Useless tactic just for fun, maybe remove later
private meta def metric_example_tac : tactic unit :=
solve1 $ intros
>> `[unfold metric_example]
>> try `[simp]
instance metric_space_example_discrete : metric_space X :=
{ dist := λ x y, metric_example X x y,
dist_self := λ x, show metric_example X x x = 0, by metric_example_tac,
eq_of_dist_eq_zero := λ x y, show metric_example X x y = 0 → x = y,
by { unfold metric_example, split_ifs, all_goals {finish} },
dist_comm := λ x y, show metric_example X x y = metric_example X y x,
by { unfold metric_example, split_ifs, all_goals {finish} },
dist_triangle := λ x y z, show metric_example X x z ≤ metric_example X x y + metric_example X y z,
begin
unfold metric_example,
split_ifs, all_goals {try {norm_num}},
apply h, rwa [h_1, h_2]
end
}
end examples |
function test_fileformat_asa
% MEM 2gb
% WALLTIME 00:10:00
% DEPENDENCY ft_read_sens ft_read_headmodel ft_read_headshape ft_read_mri read_asa read_asa_dip read_asa_mri read_asa_vol read_asa_bnd read_asa_elc read_asa_msr
elcfile = dccnpath('/home/common/matlab/fieldtrip/data/test/original/electrodes/asa/standard_primed.elc');
volfile = dccnpath('/home/common/matlab/fieldtrip/data/test/original/headmodel/asa/standard.vol');
skinfile = dccnpath('/home/common/matlab/fieldtrip/data/test/original/headshape/asa/standard_skin_14038.vol');
mrifile = dccnpath('/home/common/matlab/fieldtrip/data/test/original/mri/asa/standard.mri');
elec = ft_read_sens(elcfile);
vol = ft_read_headmodel(volfile);
skin = ft_read_headshape(skinfile);
mri = ft_read_mri(mrifile);
|
import data.nat.gcd
variables x y z : ℕ
#check dvd_mul_left
#check dvd_mul_right
#check @dvd_mul_of_dvd_left
#check @dvd_mul_of_dvd_right
example (h₀ : x ∣ y) (h₁ : y ∣ z) : x ∣ z :=
dvd_trans h₀ h₁
example : x ∣ y * x * z :=
begin
apply dvd_mul_of_dvd_left,
apply dvd_mul_left,
end
example : x ∣ x^2 :=
by apply dvd_mul_right |
module Maybe
%default total
maybeAdd : Maybe Int -> Maybe Int -> Maybe Int
maybeAdd x y = case x of
Nothing => Nothing
Just a => case y of
Nothing => Nothing
Just b => Just (a + b)
mAdd : Maybe Int -> Maybe Int -> Maybe Int
mAdd mx my = do x <- mx
y <- my
Just (x + y) |
Require Export Kelley_Set_Theory.
(** Some necessary and additional definitions for the proof **)
Module BasicDefinition.
(* Nest *)
Definition Nest f : Prop := forall A B, A∈f /\ B∈f -> A⊂B \/ B⊂A.
Hint Unfold Nest : Axiom_of_Chioce.
(* Finite Characteristic Set *)
Definition FiniteSet f : Prop :=
Ensemble f /\ (forall F, F∈f -> (forall z, z ⊂ F /\ Finite z -> z∈f))
/\ (forall F, Ensemble F /\ (forall z, z ⊂ F /\ Finite z -> z∈f) -> F∈f).
Hint Unfold FiniteSet : Axiom_of_Chioce.
(* Property of Finite Characteristic Set *)
Hypothesis HPF_def : forall A φ, A ⊂ ∪ φ -> Finite A ->
exists C0 C1 C2, (C0∈φ /\ C1∈φ /\ C2∈φ) /\ A ⊂ (C0 ∪ C1 ∪ C2).
Proposition Property_FinSet : forall f: Class,
FiniteSet f /\ f ≠ Φ -> (forall A B, A ∈ f /\ B ⊂ A -> B ∈ f)
/\ (forall φ, φ ⊂ f /\ Nest φ -> (∪φ) ∈ f).
Proof.
intros; destruct H.
unfold FiniteSet in H; destruct H; split; intros.
- destruct H2; apply H1; intros; split.
+ apply Theorem33 in H3; Ens.
+ intros; destruct H4; apply H1 with (z:=z) in H2; auto.
split; try (apply Theorem28 with (y:=B); split); auto.
- destruct H2; apply H1.
split; try (apply AxiomVI; apply Theorem33 in H2); auto.
intro A; intros; destruct H4.
unfold Nest in H3; apply HPF_def in H4; auto.
destruct H4 as [C0 H4]; destruct H4 as [C1 H4].
destruct H4 as [C2 H4]; destruct H4, H4, H7.
assert (C0 ∈ φ /\ C1 ∈ φ). { split; auto. }
assert (C1 ∈ φ /\ C2 ∈ φ). { split; auto. }
assert (C0 ∈ φ /\ C2 ∈ φ). { split; auto. }
apply H3 in H10; destruct H10.
+ apply Theorem29 in H10; rewrite H10 in H6.
apply H3 in H11; destruct H11.
* apply Theorem29 in H11; rewrite H11 in H6.
unfold Included in H2; apply H2 in H8.
apply H1 with (z:=A) in H8; auto.
* apply Theorem29 in H11; rewrite Theorem6 in H11.
rewrite H11 in H6; unfold Included in H2; apply H2 in H4.
apply H1 with (z:=A) in H4; auto.
+ apply Theorem29 in H10; rewrite Theorem6 in H10.
rewrite H10 in H6; apply H3 in H9; destruct H9.
* apply Theorem29 in H9; rewrite H9 in H6.
unfold Included in H2; apply H2 in H7.
apply H1 with (z:=A) in H7; auto.
* apply Theorem29 in H9; rewrite Theorem6 in H9.
rewrite H9 in H6; unfold Included in H2; apply H2 in H4.
apply H1 with (z:=A) in H4; auto.
Qed.
(* Maximial Member : F is a maximal member of f iff no member of f is properly contained in F. [K55] *)
Definition MaxMember F f : Prop :=
f ≠ Φ -> F∈f /\ (forall x, x∈f -> ~ F ⊊ x).
Hint Unfold MaxMember : Axiom_of_Choice.
(* Minimial Member : Similarly F is a minimal member of f iff no member of f is properly contained in F. [K55] *)
Definition MinMember F f : Prop :=
f ≠ Φ -> F∈f /\ (forall x, x∈f -> ~ x ⊊ F).
Hint Unfold MaxMember MinMember : Axiom_of_Choice.
(* Choice Function *)
Definition Choice_Function ε X : Prop :=
Function ε /\ ran(ε) ⊂ X /\ dom(ε) = pow(X)~[Φ] /\
(forall A, A ∈ dom(ε) -> ε[A] ∈ A).
Hint Unfold Choice_Function : Axiom_of_Choice.
End BasicDefinition.
Export BasicDefinition.
|
%
% CMPT 213: Object Oriented Design in Java - A Course Overview
% Section: Object-Oriented Design
%
% Author: Jeffrey Leung
%
\section{Object-Oriented Design}
\label{sec:object-oriented-design}
\begin{easylist}
& Effective because classes represent stable problem domain concepts
& Design involves identifying classes, responsibilities, and relationships
& \textbf{Object:} Unique software entity with state and behaviours
&& \textbf{State:} Information about an object
&& \textbf{Behaviour:} Methods and operations which view and/or modify state
&& \textbf{Class:} Type of a set of objects with the same behaviours and set of possible states
&&& \textbf{Final class:} Class which cannot be inherited from
&& \textbf{Field/instance data:} Member variable stored by an object
&& \textbf{Method:} Member function of a class
&&& \textbf{Final method:} Method which cannot be overridden
&& Statics:
&&& \textbf{Static/class field:} Field for which only a single instance exists and is shared between all classes
&&& \textbf{Static/class method:} Method which can be called on the class without a constructed object
&& Unique types of objects:
&&& Agent: Object which performs a specific task
&&& Users/roles, events, systems, interfaces
& Anonymous classes:
&& \textbf{Anonymous class:} Single-use implementation of an interface, defined only once within another body of code
&& \textbf{Anonymous object:} Instance of an unnamed class
& Relationships between objects:
&& \textbf{Dependency (uses):} Relationship between two classes where one uses the other (i.e. the first may need to change if the second changes)
&&& \textbf{Coupling:} Relationship between two classes where one is a dependency of the other
&&&& The greater the coupling, the greater the changes required when one component is modified
&&&& Ideally minimized
&& \textbf{Aggregation (has a):} Relationship between two classes where one contains the other
&& \textbf{Inheritance (is a):} Relationship between two classes where one is a subclass of the other (see subsection~\ref{subsec:object-oriented-design:inheritance})
& \textbf{Side effect:} Observable change to state caused by code execution
& \textbf{Idiom:} Common practice
& \textbf{Protected:} Access class which is available at package-level as well as derived classes, but not external code
&& Breaks encapsulation; exposes implementation details to derived classes
&& Difficult because no control over the extending clases
\end{easylist}
\subsection{Public Interface}
\label{subsec:object-oriented-design:public-interface}
\begin{easylist}
& \textbf{Interface:} Set of methods which an implementing class can choose to implement
& Object type cannot be dynamically changed
\end{easylist}
\subsection{Principles}
\label{subsec:object-oriented-design:principles}
\begin{easylist}
& \textbf{Encapsulation:} Characteristic of a module which only exposes some public interfaces, allowing some internal changes to be made without breaking outward interface
&& Modules manage their own state
&& Reduces scope of change (which encourages modification) and cognitive load
& \textbf{Immutability:} Characteristic of an entity which has no methods which change its visible state
&& A modification to an object must return a new object
&& An accessor should return a value or immutable reference
&& Avoids issues from partial setters and shared references
&& \textbf{Final:} Keyword which declares a field to have an immutable reference
& \textbf{Command-Query Separation:} Principle which states that methods should not be both mutator and accessor, to avoid unexpected side effects
&& \textbf{Command:} Mutator method
&&& Should return null
&& \textbf{Query:} Accessor method
& \textbf{Responsibility heuristic:} Tendency to avoid exposing internal details only for external access
\end{easylist}
\subsection{SOLID Principles}
\label{subsec:object-oriented-design:solid-principles}
\begin{easylist}
& \textbf{Single Responsibility Principle:} Each class only has one responsibility
& \textbf{Open-Closed Principle:} Classes must be open for extension but closed for modification
& \textbf{Liskov Substitution Principle:} Subtype objects must be interchangeable with base objects
& \textbf{Interface Segregation Principle:} Models should support multiple possible interfaces
& \textbf{Dependency Inversion Principle:} Code should depend on abstractions, not concrete classes
\end{easylist}
\subsection{Polymorphism}
\label{subsec:object-oriented-design:polymorphism}
\begin{easylist}
& \textbf{Concrete type:} Exact instantiated type of an object
& \textbf{Interface:} Definition of a set of public methods to be implemented
&& A class implements an interface
&& Code to an interface, not a concrete type
&& Can inherit from another object
& \textbf{Polymorphism:} Characteristic of an object which can be one of multiple different concrete types
&& Possible due to composition
&& Allows late binding
& \textbf{Late binding:} Characteristic of a variable which is assigned a specific concrete type at runtime
&& Allows loose coupling
\end{easylist}
\subsection{Inheritance}
\label{subsec:object-oriented-design:inheritance}
\begin{easylist}
& \textbf{Inheritance:} Subclass created from and extending a superclass
&& Allows sharing members (properties and methods)
&& Allows polymorphism between two concrete classes
&& \textbf{Single inheritance:} A subclass may have at most 1 superclass
&& \textbf{Multiple inheritance:} A subclass may have multiple superclasses
&& \textbf{Liskov Substitution Principle (LSP):} Rule which states that class B can inherit from A if and only if, for each method in A, the corresponding method in B accepts the same parameters (or more) and executes the same operations (or more)
&&& I.e. Client code using the base class must be able to use the derived class without modification
&&& Is-a relationships must satisfy the Liskov Substitution Principle
&& Should be used for permanent relationships, not temporary assignments
&& Favor polymorphism over inheritance
& \textbf{Superclass/Base class:} Class from which another class inherits
&& Cannot be assigned to an instance of a subclass
&& Referred to by \lstinline!super!
&& Cannot be final
& \textbf{Subclass/Derived class:} Class which inherits from another
&& Can access non-private members of the superclass
&& Cannot directly access private members of the superclass
&& Can be assigned to an instance of a superclass
& \textbf{Constructor chaining:} Call to a superclass constructor by a constructor in the subclass
&& Ensures base classes are initialized first
&& In Java, if no superclass constructor is called, then superclass default 0-argument constructor is automatically called
& Abstracts:
&& \textbf{Abstract method:} Un-implemented method which must be implemented by any concrete subclass
&& \textbf{Abstract class:} Class with abstract methods
&&& Concrete derived classes must implement all abstract methods
&& Abstract classes vs. interfaces:
&&& Both require implementation of a given set of methods
&&& Abstract classes can have member variables and implemented methods; interfaces cannot
&&& A class can implement multiple interfaces; a class can only have a single (abstract) superclass
\end{easylist}
\subsection{Overriding}
\label{subsec:object-oriented-design:overriding}
\begin{easylist}
& The overriding method:
&& Must have identical signature (except for visibility and return type)
&& Visibility cannot be reduced
&& Cannot throw additional checked exceptions
&& Can return a subclass of the original return type
& Method to override:
&& Cannot be private, static, final
& \textbf{Shadow variable:} Subclass variable which overrides a variable of the superclass
\end{easylist}
\clearpage
|
@testset "AlgAssOrd" begin
include("AlgAssOrd/CSAMaxOrd.jl")
include("AlgAssOrd/PicardGroup.jl")
include("AlgAssOrd/LocallyFreeClassGroup.jl")
end
|
import data.set.basic
variables (R A : Type)
variables (𝕍 : set R → set A) (𝕀 : set A → set R)
open set
-- 𝕍 𝕀 𝕍 = 𝕍 for a contravariant Galois connection
-- for example the one between R=k[X₁,X₂,…,Xₙ] and A=𝔸ⁿ
-- in the theory of algebraic varieties
example
(𝕍_antimono : ∀ J₁ J₂ : set R, J₁ ⊆ J₂ → 𝕍 J₂ ⊆ 𝕍 J₁)
(𝕀_antimono : ∀ W₁ W₂ : set A, W₁ ⊆ W₂ → 𝕀 W₂ ⊆ 𝕀 W₁)
(galois : ∀ J : set R, ∀ W : set A, J ⊆ 𝕀 W ↔ W ⊆ 𝕍 J) :
∀ J : set R, 𝕍 (𝕀 (𝕍 J)) = 𝕍 J :=
begin
intro J,
apply set.subset.antisymm,
{ apply 𝕍_antimono,
rw galois},
{ rw ←galois}
end
|
# Surface Codes Tutorial II
---
The idea we implement here is to realize graphs on surfaces such as the following found in the papers [Feynman Diagrams and Rooted Maps](https://arxiv.org/pdf/1312.0934.pdf) and [Minors and planar embeddings of digraphs](https://www.researchgate.net/publication/37985042_Minors_and_planar_embeddings_of_digraphs), in order to use and study *surface codes*:
Below, we are going to create a class called `SurfaceCodeGraph` which implements a combinatorial object defining a graph which is cellularly embedded in a compact $2$-manifold, i.e. a Riemann surface. This will give us the necessary tools we need to start implementing and studying surface codes on arbitrary surfaces of any genus, and with any qubit connectivity we want. This combinatorial construction is very closely related to quantum gravity, string theory, lattice gauge theories, and number theory (dessins d'enfants), where certain questions in quantum complexity involving partition functions of Ising type models can be translated into questions about L-functions. We will provide references to relevant research papers at the end.
In building the graph, *bipartite graphs* from NetworkX are used to create *black nodes*, `bipartite=1`, and *white nodes*, `bipartite=0`. For more info on creating bipartite graphs see the [NetworkX documentation](https://networkx.github.io/documentation/stable/reference/algorithms/bipartite.html).
```python
from collections import Counter
from typing import Tuple, List
from networkx import MultiGraph
from networkx import nx
from networkx.algorithms import bipartite
from sympy.combinatorics import Permutation
import matplotlib.pyplot as plt
# from SurfaceCodes.utilites import permlist_to_tuple
class SurfaceCodeGraph(MultiGraph):
def __init__(self, sigma: Tuple[Tuple[int]], alpha: Tuple[Tuple[int]]):
super().__init__()
self.sigma = sigma # should include singletons corresponding to fixed points
self.alpha = alpha # should include singletons corresponding to fixed points
f = self.compute_phi()
self.phi = self.permlist_to_tuple(f)
self.build_node_info() # print dictionary for [sigma, alpha, phi]
self.node_dict = self.sigma_dict, self.alpha_dict, self.phi_dict
self.node_info = ["sigma:", self.sigma_dict,
"alpha:", self.alpha_dict,
"phi:", self.phi_dict]
self.code_graph = nx.MultiGraph()
# Create black nodes for each cycle in sigma along with white nodes
# representing "half edges" around the black nodes
for cycle in self.sigma:
self.code_graph.add_node(cycle, bipartite=1)
for node in cycle:
self.code_graph.add_node(node, bipartite=0)
self.code_graph.add_edge(cycle, node)
# Create black nodes for each cycle in phi along with white nodes
# representing "half edges" around the black nodes
for cycle in self.phi:
self.code_graph.add_node(cycle, bipartite=1)
for node in cycle:
self.code_graph.add_edge(cycle, node)
# Create nodes for each cycle in alpha then
# glue the nodes corresponding to a the pairs
for pair in self.alpha:
self.code_graph.add_node(pair)
self.code_graph = nx.contracted_nodes(self.code_graph, pair[0], pair[1], self_loops=True)
# Now contract pair with pair[0] to make sure edges (white nodes) are labeled
# by the pairs in alpha to keep track of the gluing from the previous step
self.code_graph = nx.contracted_nodes(self.code_graph, pair, pair[0], self_loops=True)
# Define the white and black nodes. White correspond to edges labeled by
# cycles in alpha. Black correspond to nodes labeled by cycles in sigma
# (vertices) and phi (faces)
self.black_nodes, self.white_nodes = bipartite.sets(self.code_graph)
def permlist_to_tuple(self, perms):
"""
convert list of lists to tuple of tuples in order to have two level iterables
that are hashable for the dictionaries used later
"""
return tuple(tuple(perm) for perm in perms)
def compute_phi(self):
"""compute the list of lists full cyclic form of phi (faces of dessin [sigma, alpha, phi])"""
s = Permutation(self.sigma)
a = Permutation(self.alpha)
f = ~(a * s)
f = f.full_cyclic_form # prints permutation as a list of lists including all singletons (fixed points)
return f
def build_node_info(self):
count = -1
self.sigma_dict = dict()
for count, cycle in enumerate(self.sigma):
self.sigma_dict[cycle] = count
self.phi_dict = dict()
for count, cycle in enumerate(self.phi, start=count + 1):
self.phi_dict[cycle] = count
self.alpha_dict = dict()
for count, pair in enumerate(self.alpha, start=count + 1):
self.alpha_dict[pair] = count
return tuple([self.sigma_dict, self.alpha_dict, self.phi_dict])
def boundary_1(self, edge):
"""
compute boundary of a single edge given by a white node (cycle in alpha)
"""
boundary1 = [node for node in self.code_graph.neighbors(edge) if node in self.sigma_dict]
return boundary1
def del_1(self, edges: List[Tuple[int]]):
"""
boundary of a list of edges, i.e. an arbitrary 1-chain over Z/2Z
"""
boundary_list = [self.boundary_1(edge) for edge in edges]
a = Counter([y for x in boundary_list for y in x])
boundary_list = [x[0] for x in a.items() if x[1] % 2 == 1]
return boundary_list
def boundary_2(self, face):
"""
compute boundary of a single face
"""
boundary = self.code_graph.neighbors(face)
return boundary
def del_2(self, faces: List[Tuple[int]]):
"""
boundary of a list of faces, i.e. an arbitrary 2-chain over Z/2Z
"""
boundary_list = [self.boundary_2(face) for face in faces]
a = Counter([y for x in boundary_list for y in x])
boundary_list = [x[0] for x in a.items() if x[1] % 2 == 1]
return boundary_list
def coboundary_1(self, star):
"""
compute coboundary of a single star
"""
coboundary = self.code_graph.neighbors(star)
return coboundary
def delta_1(self, stars: List[Tuple[int]]):
"""
coboundary of a list of stars, i.e. an arbitrary 0-cochain over Z/2Z
"""
coboundary_list = [self.coboundary_1(star) for star in stars]
a = Counter([y for x in coboundary_list for y in x])
coboundary_list = [x[0] for x in a.items() if x[1] % 2 == 1]
return coboundary_list
def coboundary_2(self, edge):
"""
compute coboundary of a single edge given by a white node (cycle in alpha)
"""
coboundary2 = [node for node in self.code_graph.neighbors(edge) if node in self.phi_dict]
return coboundary2
def delta_2(self, edges: List[Tuple[int]]):
"""
coboundary of a list of edges, i.e. an arbitrary 1-cochain over Z/2Z
given by a list of cycles in alpha
"""
coboundary_list = [self.coboundary_2(edge) for edge in edges]
a = Counter([y for x in coboundary_list for y in x])
coboundary_list = [x[0] for x in a.items() if x[1] % 2 == 1]
return coboundary_list
def euler_characteristic(self):
"""
Compute the Euler characteristic of the surface in which the graph is embedded
"""
chi = len(self.phi) - len(self.alpha) + len(self.sigma)
return (chi)
def genus(self):
"""
Compute the genus of the surface in which the graph is embedded
"""
g = int(-(len(self.phi) - len(self.alpha) + len(self.sigma) - 2) / 2)
return (g)
def draw(self, node_type='', layout = ''):
"""
Draw graph with vertices, edges, and faces labeled by colored nodes and their integer indices
corresponding to the qubit indices for the surface code
"""
if not node_type in ['cycles', 'dict']:
raise ValueError('node_type can be "cycles" or "dict"')
if layout == 'spring':
pos=nx.spring_layout(self.code_graph)
if layout == 'spectral':
pos=nx.spectral_layout(self.code_graph)
if layout == 'planar':
pos=nx.planar_layout(self.code_graph)
if layout == 'shell':
pos=nx.shell_layout(self.code_graph)
if layout == 'circular':
pos=nx.circular_layout(self.code_graph)
if layout == 'spiral':
pos=nx.spiral_layout(self.code_graph)
if layout == 'random':
pos=nx.random_layout(self.code_graph)
# white nodes
nx.draw_networkx_nodes(self.code_graph, pos,
nodelist=list(self.alpha),
node_color='c',
node_size=500,
alpha=0.3)
# vertex nodes
nx.draw_networkx_nodes(self.code_graph, pos,
nodelist=list(self.sigma),
node_color='b',
node_size=500,
alpha=0.6)
# face nodes
nx.draw_networkx_nodes(self.code_graph, pos,
nodelist=list(self.phi),
node_color='r',
node_size=500,
alpha=0.6)
# edges
nx.draw_networkx_edges(self.code_graph, pos, width=1.0, alpha=0.5)
labels={}
if node_type == 'cycles':
'''
label nodes the cycles of sigma, alpha, and phi
'''
for node in self.alpha_dict:
# stuff = self.alpha_dict[node]
labels[node]=f'$e$({node})'
for node in self.sigma_dict:
# something = self.sigma_dict[node]
labels[node]=f'$v$({node})'
for node in self.phi_dict:
# something2 = self.phi_dict[node]
labels[node]=f'$f$({node})'
nx.draw_networkx_labels(self.code_graph, pos, labels, font_size=12)
if node_type == 'dict':
'''
label nodes with v, e, f and indices given by node_dict corresponding to
qubit indices of surface code
'''
for node in self.alpha_dict:
# stuff = self.alpha_dict[node]
labels[node]=f'$e$({self.alpha_dict[node]})'
for node in self.sigma_dict:
# something = self.sigma_dict[node]
labels[node]=f'$v$({self.sigma_dict[node]})'
for node in self.phi_dict:
# something2 = self.phi_dict[node]
labels[node]=f'$f$({self.phi_dict[node]})'
nx.draw_networkx_labels(self.code_graph, pos, labels, font_size=12)
# plt.axis('off')
# plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
```
# Example 1
---
Here we define two permutations,
\begin{align}
\sigma &= (0,1,2)(3,4,5)(6,7) \\
\alpha &= (0,3)(1,6)(2,4)(5,7).
\end{align}
The cycles in $\sigma$ will correspond to vertices with *half-edges* attached to them in the cyclic ordering specified by each cycle. In this example, there are three vertices. One is labeled by $(0,1,2)$, the next is labeled by $(3,4,5)$, and the third is labeled by $(6,7)$. The cycles labeling each vertex tell us there are three half-edges connected to the first vertex, three on the second vertex, and two on the third vertex.
The permutation $\alpha$ gives us a way of *gluing* the half-edges to each other. So the first cycle of $\alpha$, $(0,3)$, tells us to glue the half-edges $0$ and $3$. Similarly, we glue the half-edges $1$ and $6$ since the second cycle of $\alpha$ is $(1,6)$. So, we can take the cycles in $\alpha$ as labels of the edges of the graph we are creating.
Our class `SurfaceCodeGraph` will create *white* nodes on these edges which are labeled by the cycles of $\alpha$. These wil be important later when we create the class `SurfaceCodeCircuit`. The next thing to note is that once we have this graph glued together, there is a minimal genus surface into which it naturally embeds. The genus of this surface can be computed very easily once we know what the faces are. To compute the faces we need to compute a third permutation $\phi = (\sigma \alpha)^{-1}$. This is done automatically by the class, and can be printed off. In our example
\begin{align}
\phi = (0, 4) (1, 3, 7) (2, 6, 5).
\end{align}
In order to perform computations with permutations we use SymPy. The documentation on permutations can be found [here](https://docs.sympy.org/latest/modules/combinatorics/permutations.html). Let's have a look at our example by creating an instance of `SurfaceCodeGraph` corresponding to the permutations
\begin{align}
\sigma &= (0,1,2)(3,4,5)(6,7) \\
\alpha &= (0,3)(1,6)(2,4)(5,7).
\end{align}
```python
sigma = ((0,1,2),(3,4,5),(6,7))
alpha = ((0,3),(1,6),(2,4),(5,7))
SCG = SurfaceCodeGraph(sigma, alpha)
SCG
```
<__main__.SurfaceCodeGraph at 0x7fc8b6def190>
Now, we can draw the graph with the `draw` method with option `'cycles'` to label the vertex, edge, and face nodes by the cycles from `sigma`, `alpha`, and `phi` respectively, along with a `v`, `e`, or `f`, depending on whether they correspond to a vertex, edge, or face. We can also use the following layout options,
- `'spring'`
- `'spectral'`
- `'planar'`
- `'shell'`
- `'circular'`
- `'spiral'`
- `'random'`
```python
SCG.draw('cycles', 'spring')
```
To see which nodes correspond to *faces*, we can print off $\phi$.
```python
SCG.phi
```
((0, 4), (1, 3, 7), (2, 6, 5))
Now, we can print off the `node_info`, which is a dictionary built by `SurfaceCodeGraph`. The dictionary enumerates the vertices of the graph corresponding to $\sigma$, $\alpha$, and $\phi$, so that we have a complete enumeration of all vertices of the graph created.
```python
SCG.node_info
```
['sigma:',
{(0, 1, 2): 0, (3, 4, 5): 1, (6, 7): 2},
'alpha:',
{(0, 3): 6, (1, 6): 7, (2, 4): 8, (5, 7): 9},
'phi:',
{(0, 4): 3, (1, 3, 7): 4, (2, 6, 5): 5}]
This graph has three *black* vertices corresponding to $\sigma$, `0`, `1`, and `2`. There are four *white* vertices corresponding to $\alpha$, `6`, `7`, `8`, and `9`. It also has three more *black* vertices corresponding to $\phi$, `3`, `4`, and `5`. We can print off all of the nodes at once with their original labels given by the cycles of the permutations $\sigma, \alpha$, and $\phi$ as follows.
```python
SCG.code_graph.nodes
```
NodeView(((0, 1, 2), (3, 4, 5), (6, 7), (0, 4), (1, 3, 7), (2, 6, 5), (0, 3), (1, 6), (2, 4), (5, 7)))
To see the bipartite sets we can use the following methods, which print off the `white_nodes` and `black_nodes`.
```python
bipartite.sets(SCG.code_graph)
```
({(0, 1, 2), (0, 4), (1, 3, 7), (2, 6, 5), (3, 4, 5), (6, 7)},
{(0, 3), (1, 6), (2, 4), (5, 7)})
```python
SCG.white_nodes
```
{(0, 3), (1, 6), (2, 4), (5, 7)}
```python
SCG.black_nodes
```
{(0, 1, 2), (0, 4), (1, 3, 7), (2, 6, 5), (3, 4, 5), (6, 7)}
There is an alternative method for drawing the graph which colors the vertices, edges, and faces with blue, cyan, and red, resepctively. It also prints `v`, `e`, or `f` depending on whether the node corresponds to a vertex, edge, or face, along with the dictionary value corresponding to the integer index that labels the qubits in the surface code (which we discuss a bit later).
```python
SCG.draw('dict', 'spring')
```
We can compute the *Euler characteristic*, which is give by the formula
\begin{align}
\chi &= V-E+F \\
&= |\sigma| - |\alpha| + |\phi|,
\end{align}
which in our case is
\begin{align}
3 - 4 + 3 = 2.
\end{align}
`SurfaceCodeGraph` has a method for computing the Euler characteristic.
```python
SCG.euler_characteristic()
```
2
The next natural thing we might want to know is the *genus* of the surface the graph is naturally embedded in. This can be computed using the formula
\begin{align}
\chi &= 2-2g \\
& \implies 2 = 2-2g \\
& \implies 0 = -2g \\
& \implies 0 = g.
\end{align}
So, we have a genus zero surface, a sphere. `SurfaceCodeGraph` has a method for this as well.
```python
SCG.genus()
```
0
Now, something that is important for working with surface codes is computing the *boundary operator* applied to a list of faces. The boundaries are computed over $\mathbb{Z}/2\mathbb{Z}$-homology. This is in general a map that takes faces to edges, and edges to vertices,
\begin{align}
\partial_2: & F \to E \\
\partial_1: & E \to V.
\end{align}
So, the way we have constructed our graph, this amounts to computing the edge nodes which are connected to a node corresponding to a face. We can do this for a single face, or for a list of faces. Since we are working with $\mathbb{Z}/2\mathbb{Z}$-homology, which is typical in surface code calculations, we only work with lists of faces. In more general settings, such as $\mathbb{Z}$-homology, we can have a vector with integer entries representing that we have multiple copies of faces, but this is more general than we need here, and the coefficients are just zero or one, so lists of faces will suffice.
As an example, if we compute the boundary of the face $(1,3,7)$, we get get a list of *white* nodes (always from $\alpha$) which are the boundary. The list will be $[(0, 3), (1, 6), (5, 7)]$. There is a method in `SurfaceCodeGraph` for computing the boundary of arbitrary lists of faces.
```python
SCG.del_2([(1,3,7)])
```
[(0, 3), (1, 6), (5, 7)]
Now, let's compute the boundary of $[(0, 4), (1,3,7)]$.
```python
SCG.del_2([(0, 4), (1,3,7)])
```
[(2, 4), (1, 6), (5, 7)]
Something important to notice here is that while $e = (0,3)$ is in the boundary of both $(0,4)$ and of $(1,3,7)$, since we are working over $\mathbb{Z}/2\mathbb{Z}$, $2e = 0e = 0$. In other words, two copies is the same as zero copies of an edge since we are working **mod** $2$. Mathematically this can be written as follows,
\begin{align}
\partial_2((0, 4) + (1,3,7)) &= \partial_2((0, 4)) + \partial_2((1,3,7))\\
&= [(2, 4)+(0, 3)] + [(0, 3) + (1, 6) + (5, 7)] \\
&= 2(0,3) + (2, 4) + (1, 6) + (5, 7)\\
&= (2, 4) + (1, 6) + (5, 7).
\end{align}
Now, we can also compute *coboundaries* of vertices (always given by lists of elements from $\sigma$). Coboundaries of vertices are the edges attached to a vertex. Coboundaries of edges are the faces on either side of the edge. So the coboundary maps are
\begin{align}
\delta_1: & V \to E \\
\delta_2: & E \to F.
\end{align}
As an example, let's use the method of `SurfaceCodeGraph` to compute the coboundary of the vertex $(0,1,2)$.
```python
SCG.delta_1([(0,1,2)])
```
[(0, 3), (1, 6), (2, 4)]
This is a list of edges (always given by cycles of $\alpha$, i.e. *white* nodes). Now, Let's compute the *coboundary* of the vertices $[(0,1,2), (3, 4, 5)]$.
```python
SCG.delta_1([(0,1,2), (3,4,5)])
```
[(1, 6), (5, 7)]
Notice, here again we have the white nodes $(0,3)$ and $(2,4)$ are both in the coboundary of the vertex $(0,1,2)$ and of the vertex $(3,4,5)$. However, since they show up twice, their coefficient is $2$, which is $0$ **mod** $2$.
Now, let's use the method `boundary_1` which computes the boundary of a single edge (given by a cycle in alpha, which corresponds to a white node in the graph). This should return a list of vertices labeled by cycles of $\sigma$.
```python
SCG.boundary_1((0,3))
```
[(0, 1, 2), (3, 4, 5)]
```python
SCG.boundary_1((1,6))
```
[(0, 1, 2), (6, 7)]
Next, we compute the boundary of a list of edges, i.e. an arbitrary $1$-chain. Here we use the method `del_1` ($\partial_1: E \to V$) and the method should return a list of vertices labeled by cycles in $\sigma$. Again, remember we are working over $\mathbb{Z}/2\mathbb{Z}$ so if there is an overlap in boundaries between two edges, they cancel **mod** $2$.
```python
SCG.del_1([(0,3), (1,6)])
```
[(3, 4, 5), (6, 7)]
Notice, the boundary vertex $(0,1,2)$ does not appear since we are working **mod** $2$, i.e. because it is in the boundary of both $(0,3)$ and $(1,6)$. Mathematically we have
\begin{align}
\partial_1((0,3) + (1,6)) &= \partial_1((0,3)) + \partial_1((1,6)) \\
&= [(0,1,2)+(3,4,5)] + [(0,1,2)+(6,7)] \\
&= 2(0,1,2)+(3,4,5)+(6, 7)\\
&= (3, 4, 5) + (6, 7).
\end{align}
Of course, we can use the `coboundary_2` method to compute coboundaries of a single edge too. This will return two faces labeled by cycles in $\phi$ that lie on either side of an edge.
```python
SCG.coboundary_2((0,3))
```
[(0, 4), (1, 3, 7)]
```python
SCG.coboundary_2((1,6))
```
[(1, 3, 7), (2, 6, 5)]
Next, let's combute the coboundary of a list of edges, i.e. an arbitrary $1$-cochain. This is done using the method `delta_2`:
\begin{align}
\delta_2:E \to F.
\end{align}
This will return a list of faces lying on either side of the edges, and as usual it will be over $\mathbb{Z}/2\mathbb{Z}$.
```python
SCG.delta_2([(0,3), (1,6)])
```
[(0, 4), (2, 6, 5)]
Mathematically, this is:
\begin{align}
\delta_2((0,3) + (1,6)) &= \delta_2((0,3)) + \delta_2((1,6)) \\
&= [(0, 4) + (1, 3, 7)] + [(1, 3, 7) + (2, 6, 5)] \\
&= (0, 4) + 2(1, 3, 7) + (2, 6, 5)\\
&= (0, 4) + (2, 6, 5),
\end{align}
since we are working over $\mathbb{Z}/2\mathbb{Z}$.
Now, we are going to create a class `SurfaceCodeCircuit`, which builds a quantum circuit in Qiskit corresponding to a `SurfaceCodeGraph`, which is built from the input data $\sigma, \alpha$.
```python
from typing import Tuple
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
# from SurfaceCodes.surface_code_class import SurfaceCodeGraph
# from SurfaceCodes.utilites import permlist_to_tuple
class SurfaceCodeCircuit(QuantumCircuit):
def __init__(self, sigma: Tuple[Tuple[int]], alpha: Tuple[Tuple[int]]):
super().__init__()
self.sigma = sigma
self.alpha = alpha
self.scgraph = SurfaceCodeGraph(self.sigma, self.alpha)
'''
Compute the permutation corresponding to phi and create a
'surface code circuit' based on a (multi)graph 'surface_code_graph'
given by sigma, alpha, and phi
Create quantum and classical registers based on the number of nodes in G
'''
# f = self.scgraph.compute_phi()
self.phi = self.scgraph.phi
self.qr = QuantumRegister(len(self.scgraph.code_graph.nodes))
self.cr = ClassicalRegister(len(self.scgraph.code_graph.nodes))
self.circ = QuantumCircuit(self.qr, self.cr)
self.node_info = self.scgraph.node_dict
self.sigma_dict, self.alpha_dict, self.phi_dict = self.node_info
for cycle in self.sigma:
self.circ.h(self.sigma_dict[cycle])
for cycle in self.phi:
self.circ.h(self.phi_dict[cycle])
def x_measurement(self, qubit: int, cbit: int):
"""Measure 'qubit' in the X-basis, and store the result in 'cbit'
:param qubit, cbit:
:return None
"""
# circuit.measure = measure # fix a bug in qiskit.circuit.measure
self.circ.h(qubit)
self.circ.measure(qubit, cbit)
self.circ.h(qubit)
def star_syndrome_measure(self, vertex: Tuple[int]):
"""
Applies CX gates to surrounding qubits of a star then measures star qubit in X-basis
:param vertex:
:return: self.circ, self.scgraph, self.node_info
"""
for node in self.scgraph.code_graph.neighbors(vertex):
self.circ.cx(self.sigma_dict[vertex], self.alpha_dict[node])
self.circ.barrier()
self.x_measurement(self.sigma_dict[vertex], self.sigma_dict[vertex])
self.circ.barrier()
return self.circ, self.scgraph, self.node_info
def face_syndrome_measure(self, vertex: Tuple[int]):
"""
Applies CZ gates to surrounding qubits of a face then measures face qubit in X-basis
:param vertex:
:return:
"""
for node in self.scgraph.code_graph.neighbors(vertex):
self.circ.cz(self.phi_dict[vertex], self.alpha_dict[node])
self.circ.barrier()
self.x_measurement(self.phi_dict[vertex], self.phi_dict[vertex])
self.circ.barrier()
return self.circ, self.scgraph, self.node_info
def product_Z(self, faces):
"""
Pauli product Z operator for arbitrary 2-chain boundary
"""
boundary_nodes = self.scgraph.del_2(faces)
for node in boundary_nodes:
self.circ.z(self.alpha_dict[node])
def product_X(self, stars):
"""
Pauli product X operator for arbitrary 0-cochain coboundary
"""
coboundary_nodes = self.scgraph.delta_1(stars)
for node in coboundary_nodes:
self.circ.x(self.alpha_dict[node])
def draw_graph(self, node_type='', layout = ''):
if layout == 'spring':
pos=nx.spring_layout(self.scgraph.code_graph)
if layout == 'spectral':
pos=nx.spectral_layout(self.scgraph.code_graph)
if layout == 'planar':
pos=nx.planar_layout(self.scgraph.code_graph)
if layout == 'shell':
pos=nx.shell_layout(self.scgraph.code_graph)
if layout == 'circular':
pos=nx.circular_layout(self.scgraph.code_graph)
if layout == 'spiral':
pos=nx.spiral_layout(self.scgraph.code_graph)
if layout == 'random':
pos=nx.random_layout(self.scgraph.code_graph)
if node_type == 'cycles':
self.scgraph.draw('cycles', layout)
if node_type == 'dict':
self.scgraph.draw('dict', layout)
```
This class will create a Qiskit circuit, with all black nodes of its `SurfaceCodeGraph` (corresponding to $\sigma$ and $\phi$) initialized in the $|+\rangle = H|0\rangle$ state, by applying a Hadamard gate. The *white* nodes, correspnding to the edges of the graph constructed by `SurfaceCodeGraph` (i.e. by cycles of $\alpha$), will be left alone and so will be in the $|0\rangle$ state.
```python
SCC = SurfaceCodeCircuit(sigma, alpha)
```
```python
SCC.circ.draw('mpl')
```
Now, we can draw the corresponding graph, which in this case is the same as the graph we created above for our examples of `SurfaceCodeGraph`.
```python
nx.draw(SCC.scgraph.code_graph, with_labels = True)
```
We can also use the `draw_graph()` method with either `'cycles'` or `'dict'` as the `node_type`. This will draw the same graph as the `SurfaceCodeGraph` method `draw()`. The nodes of the graph will be labeled by a `v`, `e`, or `f` depending on whether the nodes correspond to a vertex, edge, or face. If the option `cycles` is chosen the nodes are labeled by the corresponding cycles from `sigma`, `alpha`, and `phi`. If the option `dict` is chosen for `node_type` they will be labeled by the corresponding qubit indices (integers).
```python
SCC.draw_graph('cycles', 'spring')
```
```python
SCC.draw_graph('dict', 'spring')
```
We can again print out the `node_info`, which is a dictionary from the node lables to integers. The first row is for $\sigma$, the second row is for $\alpha$, and the third row is for $\phi$.
```python
SCC.node_info
```
({(0, 1, 2): 0, (3, 4, 5): 1, (6, 7): 2},
{(0, 3): 6, (1, 6): 7, (2, 4): 8, (5, 7): 9},
{(0, 4): 3, (1, 3, 7): 4, (2, 6, 5): 5})
We can print $\sigma, \alpha$, and $\phi$.
```python
SCC.sigma
```
((0, 1, 2), (3, 4, 5), (6, 7))
```python
SCC.alpha
```
((0, 3), (1, 6), (2, 4), (5, 7))
```python
SCC.phi
```
((0, 4), (1, 3, 7), (2, 6, 5))
We can also print of all of the nodes which are labeles by the cycles of $\sigma, \alpha$, and $\phi$.
```python
SCC.scgraph.code_graph.nodes
```
NodeView(((0, 1, 2), (3, 4, 5), (6, 7), (0, 4), (1, 3, 7), (2, 6, 5), (0, 3), (1, 6), (2, 4), (5, 7)))
Now, there is a `star_syndrome_measure` method in SurfaceCodeCircuit which implements the generalization of the following *syndrome measurement* found on page 108 of [Quantum Computation with Topological Codes: from qubit to topological fault-tolerance](https://arxiv.org/pdf/1504.01444.pdf):
The picture above (on the right) is for *star* syndrome measurements of the *toric code*, however our implementation will work for *arbitrary* surface codes given by *arbitrary* graphs embedded in surfaces. This will apply Controlled-$Z$ gates from a qubit corresponding to a vertex (given by a cycle in $\sigma$) to the target qubits given by the coboundary of that vertex, which are edges connected to it labeled by *white* nodes (corresponding to cycles in $\alpha$). The picture above (on the left) also has *plaquette* measurements. Again, our implementation will work for arbitrary surface codes (for arbitrary graphs embedded in surfaces). The *plaquette syndrome measurement* will apply Controlled-$NOT$ gates with source qubit given by a qubit corresponding to a face, and target qubits given by the boundary of that face. The boundary will correspond to *white* nodes (given by cycles in $\alpha$). Let's apply the star syndrome measurement for the vertex $(0,1,2)$.
```python
SCC.star_syndrome_measure(((0,1,2)))
SCC.circ.draw('mpl')
```
Now, let's apply the plaquette syndrome measurement corresponding to the face $(2,6,5)$.
```python
SCC.face_syndrome_measure(((2, 6, 5)))
SCC.circ.draw('mpl')
```
Again, it is important to note that if our list of faces or stars is given as input for the above two functions has faces with edges in common, an even number of occurences of the same edge means that edge doesn't show up in the output. On the other hand if there are an odd number of occurence of an edge, that edge does show up in the output. This reflects the $\mathbb{Z}/2\mathbb{Z}$-chain complex structure. This can all be phrased in terms of linear algebra and treating $V$, $E$, and $F$ as vector spaces over $\mathbb{Z}/2\mathbb{Z}$. For more background on the math involved see Chapter 3 of [Quantum Computation with Topological Codes: from qubit to topological fault-tolerance](https://arxiv.org/pdf/1504.01444.pdf).
At this point we should mention that a list of faces will form a $2$-chain, and a list of stars (vertices in $\sigma$) forms a $0$-cochain. From this, along with the boundary and coboundary operators we just defined, we can define the Pauli product operators. The first kind of Pauli product operators we define are
\begin{align}
W(c_1) = \bigotimes_{k}W(e_k)^{n(k)},
\end{align}
where $n(k) \in \{0,1\}$, and $c_1$ is a "$1$-chain" given by a list of edges (each edge corresponding to a cycle in $\alpha$). Each $W_k \in \{X_k, Y_k, Z_k\}$ is a Pauli operator that operates on the $k^{th}$ qubit, where $k$ is the index of the qubit corresponding to an edge. We can get this index using `node_info`, in particular `alpha_dict`, the middle dictionary, which gives us the indices of qubits corresponding to each edge (white node).
Now, if we compute the boundary $\partial_2(c_2)$ of a $2$-chain (given by a list of faces), we will get a $1$-chain. This $1$-chain will be a list of edges that form the boundary of the faces. From this, we can compute the "**plaquette operators**", i.e. **face operators**,
\begin{align}
Z(\partial_2f_k) = \bigotimes_{e_j \in \partial_2f_k} Z(e_j)
\end{align}
where $\partial_2f_k$ is the edge list forming the boundary of the face $f_k \in F$. We can also computer the "**star operators**",
\begin{align}
X(\delta_1 v_m) = \bigotimes_{e_j \in \delta_1v_m} X(e_j),
\end{align}
where $\delta_1 v_m$ is the edge list forming the *coboundary* of the vertex $v_m$ (which corresponds to some cycle permutation in $\sigma$). These form the **stabilizer generators** for the surface code. For a refresher on stabilizer generators we refer to the notebooks on [error correction](https://github.com/The-Singularity-Research/error-correction) and on [graph states](https://github.com/The-Singularity-Research/graph-state-quantum-cryptography/blob/master/certifying_graph_states.ipynb). Now, let's define a Pauli product $Z$ operator $Z(\partial_2 c_2)$ for the boundary of an arbitray $2$-chain $c_2$, as well as a Pauli product $X$ operator $X(\delta_1 c_0)$ for the coboundary of an arbitrary $0$-cochain $c_0$. The following image can be found on page 76 of [Quantum Computation with Topological Codes: from qubit to topological fault-tolerance](https://arxiv.org/pdf/1504.01444.pdf), and the table below can be found on page 68.
We can apply the *Pauli product*-$Z$ operator corresponding to arbitrary $2$-chain boundaries. Let's do this for the list of two faces $[(0, 4), (2, 6, 5)]$.
```python
SCC.product_Z([(0, 4), (2, 6, 5)])
SCC.circ.draw('mpl')
```
We can also apply the *Pauli product*-$X$ operator corresponding to arbitrary $0$-cochain coboundaries. Let's do this for the list of two vertices $[(3, 4, 5), (6, 7)]$.
```python
SCC.product_X([(3, 4, 5), (6, 7)])
SCC.circ.draw('mpl')
```
The two methods `product_Z` and `product_X` use the methods `del_2` and `delta_1` of `SurfaceCodeGraph`, respectively, in order to compute boundaries of faces and coboundaries of vertices to apply the Pauli-$Z$ and Pauli-$X$ gates to.
---
## Example 2
---
The following choice for $[\sigma, \alpha]$ will construct a graph on a sphere, which we can check by computing the genus of the surface.
```python
sigma = ((0,1),(2,3,4),(5,6,7),(8,9),(10,11,12),(13,14,15,16),(17,18,19,20),(21,22,23),(24,25,26),(27,28,29,30),(31,32,33,34),(35,36,37),(38,39),(40,41,42),(43,44,45),(46,47))
alpha = ((0,2),(1,10),(3,5),(4,14),(6,8),(7,18),(9,22),(11,13),(12,24),(15,17),(16,28),(19,21),(20,32),(23,36),(25,27),(26,38),(29,31),(30,41),(33,35),(34,44),(37,47),(39,40),(42,43),(45,46))
```
```python
SCG = SurfaceCodeGraph(sigma, alpha)
SCG.genus()
```
0
Now, we know this graph has a planar embedding since its genus is $g = 0$. So, let's attempt to use the `planar` drawing option to draw the graph in the plane.
```python
SCG.draw('dict', 'planar')
```
Clearly, this is a confusing drawing. So, what other options do we have? We can plot with the layout options:
- `'spring'`
- `'spectral'`
- `'planar'`
- `'shell'`
- `'circular'`
- `'spiral'`
- `'random'`
So, let's try the `'spectral'` option.
```python
SCG.draw('dict', 'spectral')
```
This is a little better, but it's still quite messy. Let's find out how many nodes the graph has.
```python
len(SCG.code_graph.nodes())
```
50
The number of vertices is just the number of cycles in `sigma`.
```python
len(SCG.sigma)
```
16
The number of edges is the number of cycles in `alpha`.
```python
len(SCG.alpha)
```
24
The number of faces is the number of cycles in `phi`.
```python
len(SCG.phi)
```
10
Let's print off the faces.
```python
SCG.phi
```
((0, 10, 24, 38, 40, 43, 46, 37, 23, 9, 6, 3),
(1, 2, 14, 11),
(4, 5, 18, 15),
(7, 8, 22, 19),
(12, 13, 28, 25),
(16, 17, 32, 29),
(20, 21, 36, 33),
(26, 27, 41, 39),
(30, 31, 44, 42),
(34, 35, 47, 45))
Now, let's print off all of the nodes, including those labeling the edges given by the cycles in `alpha`.
```python
SCG.code_graph.nodes()
```
NodeView(((0, 1), (2, 3, 4), (5, 6, 7), (8, 9), (10, 11, 12), (13, 14, 15, 16), (17, 18, 19, 20), (21, 22, 23), (24, 25, 26), (27, 28, 29, 30), (31, 32, 33, 34), (35, 36, 37), (38, 39), (40, 41, 42), (43, 44, 45), (46, 47), (0, 10, 24, 38, 40, 43, 46, 37, 23, 9, 6, 3), (1, 2, 14, 11), (4, 5, 18, 15), (7, 8, 22, 19), (12, 13, 28, 25), (16, 17, 32, 29), (20, 21, 36, 33), (26, 27, 41, 39), (30, 31, 44, 42), (34, 35, 47, 45), (0, 2), (1, 10), (3, 5), (4, 14), (6, 8), (7, 18), (9, 22), (11, 13), (12, 24), (15, 17), (16, 28), (19, 21), (20, 32), (23, 36), (25, 27), (26, 38), (29, 31), (30, 41), (33, 35), (34, 44), (37, 47), (39, 40), (42, 43), (45, 46)))
Now, we're going to remove one of the face nodes and all of the edges connected to it.
```python
SCG.code_graph.remove_node((0, 10, 24, 38, 40, 43, 46, 37, 23, 9, 6, 3))
```
```python
len(SCG.code_graph.nodes())
```
49
Now, let's try using the NetworkX methods for drawing the spectral layout of the graph without the newly deleted node.
```python
nx.draw_spectral(SCG.code_graph, with_labels = False)
```
Clearly, this graph can be drawn as a grid in the plane. The way we have constructed it, it is actually isomorphic to a $7 \times 7$ grid of qubits with nearest neighbor connectivity. Let's check this by defining such a graph in NetworkX.
```python
G = nx.Graph()
```
```python
pos = dict()
for x in range(7):
for y in range(7):
G.add_node((x,y))
pos[(x,y)] = (x,y)
if x>0:
G.add_edge((x-1,y),(x,y))
if y>0:
G.add_edge((x,y),(x,y-1))
nx.draw(G, pos=pos, with_labels = True)
```
We can check that the two graphs are isomorphic as follows.
```python
nx.is_isomorphic(SCG.code_graph, G)
```
True
Now, we are going to use a NetworkX method to find the actual isomorphism.
```python
G1, G2 = SCG.code_graph, G
```
```python
GM = nx.isomorphism.GraphMatcher(G1,G2)
GM.is_isomorphic()
```
True
The following gives us the mapping of the actual nodes. The keys in the dictionary are the nodes of the graph `SCG.code_graph` (without the deleted face node), the values are the nodes of the graph we just constructed.
```python
GM.mapping
```
{(0, 1): (0, 0),
(1, 10): (0, 1),
(10, 11, 12): (0, 2),
(12, 24): (0, 3),
(24, 25, 26): (0, 4),
(26, 38): (0, 5),
(38, 39): (0, 6),
(0, 2): (1, 0),
(1, 2, 14, 11): (1, 1),
(11, 13): (1, 2),
(12, 13, 28, 25): (1, 3),
(25, 27): (1, 4),
(26, 27, 41, 39): (1, 5),
(39, 40): (1, 6),
(2, 3, 4): (2, 0),
(4, 14): (2, 1),
(13, 14, 15, 16): (2, 2),
(16, 28): (2, 3),
(27, 28, 29, 30): (2, 4),
(30, 41): (2, 5),
(40, 41, 42): (2, 6),
(3, 5): (3, 0),
(4, 5, 18, 15): (3, 1),
(15, 17): (3, 2),
(16, 17, 32, 29): (3, 3),
(29, 31): (3, 4),
(30, 31, 44, 42): (3, 5),
(42, 43): (3, 6),
(5, 6, 7): (4, 0),
(7, 18): (4, 1),
(17, 18, 19, 20): (4, 2),
(20, 32): (4, 3),
(31, 32, 33, 34): (4, 4),
(34, 44): (4, 5),
(43, 44, 45): (4, 6),
(6, 8): (5, 0),
(7, 8, 22, 19): (5, 1),
(19, 21): (5, 2),
(20, 21, 36, 33): (5, 3),
(33, 35): (5, 4),
(34, 35, 47, 45): (5, 5),
(45, 46): (5, 6),
(8, 9): (6, 0),
(9, 22): (6, 1),
(21, 22, 23): (6, 2),
(23, 36): (6, 3),
(35, 36, 37): (6, 4),
(37, 47): (6, 5),
(46, 47): (6, 6)}
So, we could represent the original graph as a grid of qubits on the sphere, with on face node outside of the grid on the opposite side of the sphere, which is connected to all of the edge nodes around the outside of the grid of qubits we just drew above. To see exactly which nodes correspond to vertices, edges, and faces, let's build a new coloring of the nodes of the graph `G`.
```python
node_color = {(x[0], x[1]): 1-((x[0]+x[1])%2)/2 for x in G.nodes()}
for y in range(1,7,2):
for z in range(1,7,2):
node_color[(y,z)] = .2
node_color = [node_color[x] for x in sorted(node_color.keys())]
```
```python
nx.draw(G, pos = pos, node_color = node_color)
```
Here, the yellow nodes are vertices (cycle in $\sigma$), the blue nodes are edges (cycles in $\alpha$), and the purple nodes are faces (cycles in $\phi$).
---
## References
---
### Quantum Computing and Surface Codes
- [Quantum Computation with Topological Codes: from qubit to topological fault-tolerance](https://arxiv.org/pdf/1504.01444.pdf)
- [A graph-based formalism for surface codes and twists](https://www.youtube.com/watch?v=Ca85qdptceQ)
- [Constructions and Noise Threshold of Hyperbolic Surface Codes](https://arxiv.org/pdf/1506.04029.pdf)
- [Homological Quantum Codes Beyond the Toric Code](https://arxiv.org/pdf/1802.01520.pdf)
- [Hyperbolic Lattices in Circuit Quantum Electrodynamics](https://arxiv.org/pdf/1802.09549.pdf)
- [Quantum Simulation of Hyperbolic Space with Circuit Quantum Electrodynamics: From Graphs to Geometry](https://arxiv.org/pdf/1910.12318.pdf)
- [Quantum Error Correction for Quantum Memories](https://arxiv.org/pdf/1302.3428.pdf)
- [Ribbon Graphs on nLab](https://ncatlab.org/nlab/show/ribbon+graph)
### Quantum Gravity and String Theory
- [Maxim Kontsevich, Intersection Theory on the Moduli Space of Curves and the Matrix Airy Function](http://pagesperso.ihes.fr/~maxim/TEXTS/Intersection%20theory%20and%20Airy%20function.pdf)
- [Ed Witten, On the Kontsevich Model and other Models of Two-Dimensional Gravity](https://lib-extopc.kek.jp/preprints/PDF/1992/9203/9203552.pdf)
- [Lando & Zvonkin, Graphs on Surfaces and Their Applications](https://link.springer.com/book/10.1007/978-3-540-38361-1)
### The Langlands Program and the Generalized Riemann Hypothesis
- [SURFACE ALGEBRAS I: DESSINS D’ENFANTS, SURFACE ALGEBRAS, AND DESSIN
ORDERS](https://arxiv.org/pdf/1810.06750.pdf)
- [SURFACE ALGEBRAS AND SURFACE ORDERS II: AFFINE BUNDLES ON CURVES](https://arxiv.org/pdf/1812.00621.pdf)
### Hardware Related Papers
- [Hyperbolic Lattices in Circuit Quantum Electrodynamics](https://arxiv.org/pdf/1802.09549.pdf)
- [Hyperbolic Lattices in Circuit Quantum Electrodynamics](https://arxiv.org/pdf/1802.09549.pdf)
|
#### 2019-07-31, Huan asked me to create a folder for output, to contain the .pdf, .stat.txt, .nuc.txt files for used samples
#### 2019-07-29, Huan asked me to add sample filter on minimum read number
#### 2019-07-26, Huan asked me to re-implement Strata with .nuc.txt files as input
#### 2019-07-17, Huan asked me to also output stat.txt file of the averaged SHM and error bar
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("Biostrings", version = "3.8")
options(stringsAsFactors=FALSE)
#options(stringsAsFactors=FALSE, error=traceback)
### reference: yyx_echo_string_functions.20160420.r
`%.%` <- function(x,y) paste0(x,y)
join = function(sep, vec, ...) paste(collapse=sep, c(vec, ...))
cat0 = function(...) cat(sep="", ...)
echo_str <- function(x, sep=" =\t", collapse=", ") deparse(substitute(x)) %.% sep %.% join(collapse, x)
echo <- function(x, sep=" =\t", collapse=", ") cat0(deparse(substitute(x)), sep, join(collapse, x), "\n");
str_trim_left = function(str, trimLen){
strlen = nchar(str)
start = pmin(strlen, trimLen) + 1
end = strlen
substr(str, start, end)
}
str_left = function(str, len){
strlen = nchar(str)
start = 1
end = pmin(len, strlen)
substr(str, start, end)
}
### ref: yyx_rm_NA.yyx_convert_NA.20141114.r
yyx_NA2FALSE <- function(vec){
vec[is.na(vec)] <- FALSE
vec
}
yyx_convert_NA <- function(vec, default=FALSE){
vec[is.na(vec)] <- default
vec
}
### reference: SHMHelper.R
getBases <- function () {
bases <- c("A","C","G","T","N")
}
getAscii <- function () {
ascii <- c(65,67,71,84,78)
}
getBasecolors <- function () {
basecolors <- brewer.pal(7,"Set1")
}
### reference: Rsub.R
parseArgs <- function(scriptname, ARGS, OPTS=NULL) {
if (length(ARGS) %% 3 != 0)
stop("Error: ARGS must have length divisable by 3")
if (!is.null(OPTS) && length(OPTS) %% 4 != 0)
stop("Error: OPTS must have length divisable by 4")
ARGS <- matrix(ARGS, nrow=length(ARGS)/3, ncol=3, byrow=T, dimnames=list(c(),c("name","type","description")))
OPTS <- if (!is.null(OPTS)) matrix(OPTS, nrow=length(OPTS)/4, ncol=4, byrow=T, dimnames=list(c(),c("name","type","default","description")))
ARGV <- commandArgs(trailingOnly=TRUE)
catargs <- paste0("\t",ARGS[,"name"],"\t",ARGS[,"description"], collapse="\n")
catopts <- if (!is.null(OPTS)) paste0("\t-",OPTS[,"name"],"=",OPTS[,"default"],"\t", OPTS[,"description"], collapse="\n")
usage <- paste("\nUsage: Rscript ",scriptname," [OPTS] ",paste(ARGS[,"name"],collapse=" "),"\n\nOptions(=defaults):\n", catopts,"\n\nArguments:\n", catargs,"\n\n",sep="")
types <- c("character","numeric","integer","logical")
## initialize OPTS
if (!is.null(OPTS)) for (k in 1:nrow(OPTS)) {
if (!OPTS[k,"type"] %in% types)
stop("Unrecognized argument type ",OPTS[k,"type"]," for option ",OPTS[k,"name"],"\n",usage)
cmd <- paste(OPTS[k,"name"],"<<-as.",OPTS[k,"type"],"(\"",OPTS[k,"default"],"\")",sep="")
if(is.na(OPTS[k,"default"]) && OPTS[k,"type"]=="numeric"){
cmd <- OPTS[k,"name"] %.% "<<-NA"
}
# cat(paste0("[CMD] ", cmd, "\n"))
eval(parse(text=cmd))
}
if (length(ARGV) < nrow(ARGS)){
cat(usage)
stop("Not enough arguments.\n") # Yyx note 2019-07-29: stop() may has limit on the length of input string
}
## parse OPTS
k = 1
while(str_left(ARGV[k], 1)=="-"){
now_arg = str_trim_left(ARGV[k], 1)
now_regexec = regexec("(.*)=(.*)", now_arg)
# echo(now_arg)
# print(now_regexec)
if(now_regexec[[1]][1] >= 0){
now_matches = regmatches(now_arg, now_regexec)
now_name = now_matches[[1]][2]
now_value = now_matches[[1]][3]
if(now_name %in% OPTS[, "name"]){
idx <- match(now_name,OPTS[,"name"])
cmd <- paste(now_name,"<<-as.",OPTS[idx,"type"],"(\"",now_value,"\")",sep="")
# cat(paste0("[CMD] ", cmd, "\n"))
eval(parse(text=cmd))
}else{
stop("unrecognized option ",now_name," of ",ARGV[k],"\n",usage)
}
}else{
now_name = now_arg
if(now_name %in% OPTS[, "name"]){
idx <- match(now_name,OPTS[,"name"])
if(OPTS[idx,"type"]=="logical"){
cmd <- paste(OPTS[idx,"name"],"<<-!as.",OPTS[idx,"type"],"(\"",OPTS[idx,"default"],"\")",sep="")
# cat(paste0("[CMD] ", cmd, "\n"))
eval(parse(text=cmd))
}
}else{
# stop("option ",now_name," (",ARGV[k],") is not recognized\n",usage)
stop("unrecognized option or opt=value expression expected instead of ",ARGV[k],"\n",usage)
}
# cat(paste0("Warning: cannot recognize -", ARGV[k], " as -opt=value\n"))
# stop("opt=value expression expected instead of ",ARGV[k],"\n",usage)
}
k = k + 1
}
if (length(ARGV) - k + 1 < nrow(ARGS))
stop("Not enough arguments.\n",usage)
for (i in 1:nrow(ARGS)) {
if (! ARGS[i,"type"] %in% types)
stop("Unrecognized argument type ",ARGS[i,"type"]," for argument ",ARGS[i,"name"],"\n",usage)
cmd <- paste(ARGS[i,"name"],"<<-as.",ARGS[i,"type"],"(\"",ARGV[k],"\")",sep="")
# cat(paste0("[CMD] ", cmd, "\n"))
eval(parse(text=cmd))
k = k + 1
}
if (length(ARGV) - k + 1 > 0){
i = nrow(ARGS)
tail_ARGV_str = paste0(collapse="\",\"", ARGV[k:length(ARGV)])
cmd <- paste(ARGS[i,"name"],"<<-c(",ARGS[i,"name"],",as.",ARGS[i,"type"],"(c(\"",tail_ARGV_str,"\")))",sep="")
# cat(paste0("[CMD] ", cmd, "\n"))
eval(parse(text=cmd))
}
# if (length(ARGV) > i) for (j in (i+1):length(ARGV)) {
# opt <- unlist(strsplit(ARGV[j],"="))
# if (length(opt) != 2)
# stop("opt=value expression expected instead of ",ARGV[j],"\n",usage)
# if (is.null(OPTS) || ! opt[1] %in% OPTS[,"name"])
# stop("optional argument ",opt[1]," not recognized\n",usage)
# idx <- match(opt[1],OPTS[,"name"])
# cmd <- paste(opt[1],"<<-as.",OPTS[idx,"type"],"(\"",opt[2],"\")",sep="")
# eval(parse(text=cmd))
# }
}
### reference: SHMPlot2.noCov.R
ARGS <- c(
"output","character","file path prefix for output",
"statfile","character","file path of one guide (same V) .stat.txt file - 1~2th columns: Pos, Base",
"nucfile","character","file path of .nuc.txt file(s) - columns: Read_ID, base [ACGTN-.] on each position"
)
OPTS <- c(
"tstart","numeric",0,"Start of reference to view",
"tend","numeric",0,"End of reference to view",
"plotrows","numeric",1,"Rows on plot",
"ymax","numeric",0.75,"Maximum y-axis height",
"figureheight","numeric",2,"height in inches",
"showsequence","logical",FALSE,"display sequence on plots",
"regex1","character","AGCT","",
"regex2","character","[AGT](?=G[CT][AT])", "", # this is DGYW/WRCH motif
"cdr1_start","numeric",0,"Start of cdr1 region",
"cdr1_end","numeric",0,"End of cdr1 region",
"cdr2_start","numeric",0,"Start of cdr2 region",
"cdr2_end","numeric",0,"End of cdr2 region",
"cdr3_start","numeric",0,"Start of cdr3 region",
"cdr3_end","numeric",0,"End of cdr3 region",
"annotation","character","","V allele annotation (default: empty for no annotation)",
"mutMin","numeric",NA,"Minimum mutation number for stratification",
"mutMax","numeric",NA,"Maximum mutation number for stratification",
"mutMinProp","numeric",NA,"Minimum mutation number proportion for stratification",
"mutMaxProp","numeric",NA,"Maximum mutation number proportion for stratification",
"minReadNumB","numeric",0,"Minimum read number required for each sample before stratification",
"minReadNumA","numeric",0,"Minimum read number required for each sample after stratification",
"fo","logical",FALSE,"force output (default: stop if output file exists)"
)
#source_local <- function(fname){
# argv <- commandArgs(trailingOnly = FALSE)
# base_dir <- dirname(substring(argv[grep("--file=", argv)], 8))
# source(paste(base_dir, fname, sep="/"))
#}
#
#source_local("Rsub.R")
#source_local("SHMHelper.R")
parseArgs("yyx_SHMPlot2_Strata_ErrBar.20190731.r", ARGS, OPTS)
cat(paste0("Request to read ", length(nucfile), " nuc.txt files\n"))
for(k in 1:length(nucfile)){
cat(paste0("\t", k, "\t'", nucfile[k], "'\n"))
}
cat("\n")
output_pdf_filename = output %.% ".pdf"
output_stat_txt_filename = output %.% ".stat.txt"
output_used_nuc_list_filename = output %.% ".used_nuc_list.tsv"
if(!fo){
if(file.exists(output_pdf_filename)){
stop(paste0("Output pdf file '", output_pdf_filename, "' already exists.\n\tPlease remove it before run me.\n\tOr you can force output with the option '-fo=T'.\n"))
}
if(file.exists(output_stat_txt_filename)){
stop(paste0("Output stat.txt file '", output_stat_txt_filename, "' already exists.\n\tPlease remove it before run me.\n\tOr you can force output with the option '-fo=T'.\n"))
}
}
suppressPackageStartupMessages(library(RColorBrewer, quietly=TRUE))
suppressPackageStartupMessages(library(Biostrings, quietly=TRUE, verbose=FALSE))
bases <- getBases()
basecolors <- getBasecolors()
ascii <- getAscii()
cat("\n")
cat(paste0("Now reading guide '", statfile, "' ...\n"))
data <- read.delim(statfile, header=T, as.is=T)
data = data[, c("Pos", "Base")]
data$Pos = as.integer(data$Pos)
data = data[order(data$Pos),]
if (any(diff(data$Pos) != 1)) stop("Pos column must be sequential")
#echo(dim(data))
cat(" input gene number = " %.% nrow(data) %.% "\n")
refseq <- DNAString(paste(data$Base,collapse=""))
#cat(paste0(" initial nrow = ", nrow(data), "\n"))
each_total_Y_colidx = integer(0)
all_sample_prefixes = character(0)
used_nuc_files = character(0)
for(k in 1:length(nucfile)){
cat("Now reading in " %.% k %.% "-th nuc '" %.% nucfile[k] %.% "' ...\n")
now_sample_prefix = sub("^.*/", "", nucfile[k])
now_sample_prefix = sub("[.].*$", "", now_sample_prefix)
now_data <- read.delim(nucfile[k], header=T, as.is=T)
# echo(dim(now_data))
rownames(now_data) = now_data[,1]
now_data = now_data[,-1]
# echo(dim(now_data))
cat(" original dim = " %.% join(", ", dim(now_data)) %.% "\n")
if(ncol(now_data) != nrow(data)){
cat("Warning: .nuc.txt pos (ncol - 1) is not equal to .stat.txt pos (nrow), so I skip this .nuc.txt file\n")
next
}
# echo(dim(now_data))
## 2019-07-29, add sample filter on minReadNum
if(nrow(now_data) < minReadNumB){
cat("Warning: .nuc.txt read number (nrow=" %.% nrow(now_data) %.% ") is less than minReadNumB (" %.% minReadNumB %.% "), so I skip this .nuc.txt file\n")
next
}
## 2019-07-26, add stratification
### ref: intrinsicMutProfileStrata.20190726.py
eachReadMut = apply(now_data, 1, function(row) sum(row %in% c("A","C","G","T")))
eachReadWt = apply(now_data, 1, function(row) sum(row == "."))
eachReadTotal = eachReadMut + eachReadWt
eachReadMutProp = eachReadMut / eachReadTotal
# echo(length(eachReadTotal))
eachRead_boolIdx = rep(TRUE, nrow(now_data))
if(!is.na(mutMin)){
eachRead_boolIdx = eachRead_boolIdx & yyx_NA2FALSE(eachReadMut >= mutMin)
}
if(!is.na(mutMax)){
eachRead_boolIdx = eachRead_boolIdx & yyx_NA2FALSE(eachReadMut <= mutMax)
}
if(!is.na(mutMinProp)){
eachRead_boolIdx = eachRead_boolIdx & yyx_NA2FALSE(eachReadMutProp >= mutMinProp)
}
if(!is.na(mutMaxProp)){
eachRead_boolIdx = eachRead_boolIdx & yyx_NA2FALSE(eachReadMutProp <= mutMaxProp)
}
now_data = now_data[eachRead_boolIdx, ]
# echo(dim(now_data))
cat(" stratified dim = " %.% join(", ", dim(now_data)) %.% "\n")
## 2019-07-29, add sample filter on minReadNum
if(nrow(now_data) < minReadNumA){
cat("Warning: .nuc.txt read number (nrow=" %.% nrow(now_data) %.% ") is less than minReadNumA (" %.% minReadNumA %.% "), so I skip this .nuc.txt file\n")
next
}
eachPosMut = apply(now_data, 2, function(col) sum(col %in% c("A","C","G","T")))
eachPosWt = apply(now_data, 2, function(col) sum(col == "."))
eachPosTotal = eachPosMut + eachPosWt
# echo(length(eachPosTotal))
# data[["Mut." %.% k]] = eachPosMut
if(now_sample_prefix %in% all_sample_prefixes){
data[["Total." %.% now_sample_prefix %.% "." %.% (sum(all_sample_prefixes==now_sample_prefix)+1)]] = eachPosTotal
data[["Y." %.% now_sample_prefix %.% "." %.% (sum(all_sample_prefixes==now_sample_prefix)+1)]] = yyx_convert_NA(eachPosMut / eachPosTotal, 0)
}else{
data[["Total." %.% now_sample_prefix]] = eachPosTotal
data[["Y." %.% now_sample_prefix]] = yyx_convert_NA(eachPosMut / eachPosTotal, 0)
}
all_sample_prefixes = c(all_sample_prefixes, now_sample_prefix)
used_nuc_files = c(used_nuc_files, nucfile[k])
# cat(paste0(" updated nrow = ", nrow(data), "\n"))
each_total_Y_colidx = c(each_total_Y_colidx, ncol(data)-1, ncol(data))
}
#each_total_Y_colidx = c((1:length(nucfile))*3, (1:length(nucfile))*3-1)
#each_total_Y_colidx = c((1:length(nucfile))*2, (1:length(nucfile))*2-1)
#each_total_Y_colidx = sort(each_total_Y_colidx) + 2
cat("\nIn total, there are " %.% length(all_sample_prefixes) %.% " valid samples: " %.% join(", ", all_sample_prefixes) %.% "\n")
if(ncol(data) <= 2){ # no valid samples, maybe all filtered by minReadNum
cat("Note: because there seem to be no valid samples, so I just exit this R script\n")
q("no")
}
cat("\n")
cat(paste0("Now output used_nuc_list.tsv to '", output_used_nuc_list_filename, "' ...\n"))
write.table(data.frame(sample_name=all_sample_prefixes, nuc_path=used_nuc_files), file=output_used_nuc_list_filename, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
### reference: intrinsicMutProfileErrBar.20190131.py
data$Total = apply(data[,each_total_Y_colidx], 1, function(row){
total_vec = row[seq(1, length(row), by=2)]
sum(total_vec)
})
data$Y = apply(data[,each_total_Y_colidx], 1, function(row){
total_vec = row[seq(1, length(row), by=2)]
Y_vec = row[seq(2, length(row), by=2)]
Y_vec_2 = Y_vec[total_vec > 0]
mean(Y_vec_2)
})
data$Err = apply(data[,each_total_Y_colidx], 1, function(row){
total_vec = row[seq(1, length(row), by=2)]
Y_vec = row[seq(2, length(row), by=2)]
Y_vec_2 = Y_vec[total_vec > 0]
sd(Y_vec_2)/sqrt(length(Y_vec_2))
})
cat("\n")
cat(paste0("Now output stat.txt to '", output_stat_txt_filename, "' ...\n"))
write.table(data, file=output_stat_txt_filename, sep="\t", quote=FALSE, row.names=FALSE)
data$Y[is.na(data$Y)] = 0
data$Err[is.na(data$Err)] = 0
if (tstart < data$Pos[1]) {
tstart <- data$Pos[1]
}
if (tend == 0 || tend > tail(data$Pos,n=1)) {
tend <- tail(data$Pos,n=1)
}
data$Style <- match(data$Base,bases)
data$cov <- data$Total/max(data$Total)
rowwidth <- ceiling((tend-tstart+1)/plotrows)
tstarts <- tstart+rowwidth*0:(plotrows-1)
tends <- tstarts+rowwidth-1
plotline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(data$Y,data$Y))
plotline <- plotline[order(plotline$x),]
if (ymax==0) { ymax <- 1.1*max(plotline$y[plotline$x >= tstart & plotline$x <= tend]) }
refy <- -ymax/20
refseq_rc <- reverseComplement(refseq)
regex1_plot <- data.frame(x=NA,y=NA)
regex2_plot <- data.frame(x=NA,y=NA)
if (regex1 != "") {
regex1_match <- gregexpr(regex1,as.character(refseq))[[1]]
regex1_match_rc <- gregexpr(regex1,as.character(refseq_rc))[[1]]
if (regex1_match[1] > 0) {
for (i in 1:length(regex1_match)) {
len <- attr(regex1_match,"match.length")[i]
pos <- data$Pos[regex1_match[i]]
regex1_plot <- rbind(regex1_plot,c(pos-0.5,2*refy))
regex1_plot <- rbind(regex1_plot,plotline[plotline$x >= pos - 0.5 & plotline$x <= pos + len - 0.5, ])
regex1_plot <- rbind(regex1_plot,c(pos + len - 0.5,2*refy))
regex1_plot <- rbind(regex1_plot,c(NA,NA))
}
}
if (regex1_match_rc[1] > 0) {
for (i in 1:length(regex1_match_rc)) {
len <- attr(regex1_match_rc,"match.length")[i]
pos <- data$Pos[nrow(data) - regex1_match_rc[i] - len + 2]
regex1_plot <- rbind(regex1_plot,c(pos-0.5,2*refy))
regex1_plot <- rbind(regex1_plot,plotline[plotline$x >= pos - 0.5 & plotline$x <= pos + len - 0.5, ])
regex1_plot <- rbind(regex1_plot,c(pos + len - 0.5,2*refy))
regex1_plot <- rbind(regex1_plot,c(NA,NA))
}
}
}
if (regex2 != "") {
regex2_match <- gregexpr(regex2,as.character(refseq),perl=T)[[1]]
regex2_match_rc <- gregexpr(regex2,as.character(refseq_rc),perl=T)[[1]]
if (regex2_match[1] > 0) {
for (i in 1:length(regex2_match)) {
len <- 4 # attr(regex2_match,"match.length")[i]
pos <- data$Pos[regex2_match[i]]
regex2_plot <- rbind(regex2_plot,c(pos-0.5,2*refy))
regex2_plot <- rbind(regex2_plot,plotline[plotline$x >= pos - 0.5 & plotline$x <= pos + len - 0.5, ])
regex2_plot <- rbind(regex2_plot,c(pos + len - 0.5,2*refy))
regex2_plot <- rbind(regex2_plot,c(NA,NA))
}
}
if (regex2_match_rc[1] > 0) {
for (i in 1:length(regex2_match_rc)) {
len <- 4 # attr(regex2_match_rc,"match.length")[i]
pos <- data$Pos[nrow(data) - regex2_match_rc[i] - len + 2]
regex2_plot <- rbind(regex2_plot,c(pos-0.5,2*refy))
regex2_plot <- rbind(regex2_plot,plotline[plotline$x >= pos - 0.5 & plotline$x <= pos + len - 0.5, ])
regex2_plot <- rbind(regex2_plot,c(pos + len - 0.5,2*refy))
regex2_plot <- rbind(regex2_plot,c(NA,NA))
}
}
}
cat("\n")
cat(paste0("Now output pdf to '", output_pdf_filename, "' ...\n"))
pdf(output_pdf_filename, height=figureheight, width=11)
par(mai=c(0.2,0.75,0.5,0.75),omi=c(0.5,0,0,0))
if(annotation == ""){
par(mai=c(0.2,0.75,0.2,0.75))
}
layout(as.matrix(1:plotrows,ncol=1,nrow=plotrows))
for (i in 1:plotrows) {
plot(c(),c(),ylab="",xaxt="n",xlab="",xlim=c(tstarts[i]-0.5,tends[i]+0.5),ylim=c(refy,ymax),xaxs="i",bty="o")
axis(1,lwd=0,lwd.ticks=1)
if (nrow(regex2_plot) > 1) polygon(regex2_plot, col=rgb(254,196,79,max=255),border=rgb(254,196,79,max=255))
if (nrow(regex1_plot) > 1) polygon(regex1_plot, col=rgb(217,95,14,max=255),border=rgb(217,95,14,max=255))
rect(cdr1_start, -0.5, cdr1_end, ymax+0.2, col = rgb(0,0,0,alpha=0.12), border = "NA")
rect(cdr2_start, -0.5, cdr2_end, ymax+0.2, col = rgb(0,0,0,alpha=0.12), border = "NA")
rect(cdr3_start, -0.5, cdr3_end, ymax+0.2, col = rgb(0,0,0,alpha=0.12), border = "NA")
if ("cov" %in% colnames(data)) {
covpos = c(rep(ymax+0.07, length(data$cov)))
topcovline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(covpos+data$cov/5,covpos+data$cov/5))
topcovline <- topcovline[order(topcovline$x),]
botcovline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(covpos,covpos))
botcovline <- botcovline[order(botcovline$x),]
botcovline$y <- unlist(lapply(botcovline$y,function(y) {max(0,y)}))
#polygon(c(topcovline$x,rev(botcovline$x)),c(topcovline$y,rev(botcovline$y)),col='black',border=T,xpd=TRUE,density=00)
}
if ("Err" %in% colnames(data)) {
toperrline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(data$Y+data$Err,data$Y+data$Err))
toperrline <- toperrline[order(toperrline$x),]
boterrline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(data$Y,data$Y))
#boterrline <- data.frame(x=c(data$Pos-0.49,data$Pos+0.49),y=c(data$Y-data$Err,data$Y-data$Err))
boterrline <- boterrline[order(boterrline$x),]
boterrline$y <- unlist(lapply(boterrline$y,function(y) {max(0,y)}))
polygon(c(toperrline$x,rev(boterrline$x)),c(toperrline$y,rev(boterrline$y)),col="green",border=F)
}
#col=grey(0.5,0.5) --> col="green" on 3/18/2015 --> col=rgb(128,255,0,180)
grid(col=grey(0.5))
if (showsequence) points(data$Pos[1]:tail(data$Pos,n=1),rep(refy,nrow(data)),col=basecolors[data$Style],pch=ascii[data$Style],cex=0.6)
lines(plotline$x,plotline$y)
# if(annotation==''){
# genename = tail(strsplit(datafile, "/")[[1]],1)
# genename = strsplit(genename, ".", fixed=TRUE)[[1]][1]
# }else{
# genename = annotation
# }
# legendwords = paste(genename, " TotalReads=", max(data$Total), sep="")
# legend(240, ymax+0.5, legend=legendwords , xpd=TRUE, cex=.6,bty="n")
if(annotation != ""){
legendwords = paste(annotation, " TotalReads=", max(data$Total), sep="")
legend(240, ymax+0.5, legend=legendwords , xpd=TRUE, cex=.6,bty="n")
}
}
graphics.off()
cat("\nAll done. Congratulations!\n")
|
Specially crafted for the lighter experienced paddler, looking for ultra-manoeuvrability in a board.
This is an ideal board for intermediate and young paddlers, and the narrow profile, short board design allows it to perform very well in small surf or just cruising along. It’s our smallest 6" board, so this gives it the rigidity of a larger board, yet in a far smaller package. All this makes it a personal favourite for kids and teenagers with its eye-catching design, easy of carrying and responsive performance.
Package Contents: INCLUDES: Integrated kick pad Inflation adapter Centre fin. High Pressure hand pump. SUP Backpack Patch Kit & Manual. Action Camera mount kit. * *action camera not included.
EVA Deck footpad 4mm Honeycomb Groove, with an intergraded EVA foam Kick Pad.
DRings for optional Kayak seat.
I'm a SUP novice, but this THRIVE SUP is very sturdy and very good quality. All is included as advertised, just had to buy a paddle separately. Loot delivered my package in a record breaking time...Thanks! |
module Day3 (solvePart1, solvePart2) where
import Data.Complex
import Data.Map (Map, (!))
import qualified Data.Map as Map
createMovementList :: [Complex Double] -> [Int] -> [Complex Double]
createMovementList (f:s:rest) (x:xs) = replicate x f ++ replicate x s ++ createMovementList rest xs
movementList :: [Complex Double]
movementList = createMovementList (iterate (*(0 :+ 1)) (1 :+ 0)) [1..]
positions :: [Complex Double]
positions = scanl (+) (0 :+ 0) movementList
manhattenDistance :: Complex Double -> Int
manhattenDistance c = round (sum (fmap abs c))
solvePart1 :: String -> Int
solvePart1 input = let
n = read input
in manhattenDistance (positions !! (n-1))
solvePart2 :: String -> Int
solvePart2 input = let
n = read input
in populateMap n
populateMap :: Int -> Int
populateMap n = let
foobar (p:ps) m = let
v = eval p m
in if v <= n
then foobar ps (Map.insert p v m)
else v
in foobar positions Map.empty
eval :: Complex Double -> Map (Complex Double) Int -> Int
eval c m
| Map.null m = 1
| otherwise = sum (map snd (neighbours c m))
neighbours :: Complex Double -> Map (Complex Double) Int -> [(Complex Double, Int)]
neighbours (a :+ b) m = let
member k = Map.member k m
entry k = (k, m ! k)
neighbourhood = [(a+x) :+ (b+y) | x <- [-1, 0, 1], y <- [-1, 0, 1], x /= 0 || y /= 0]
in map entry (filter member neighbourhood)
instance (Ord a) => Ord (Complex a) where
(a :+ b) `compare` (c :+ d)
| a > c = GT
| a < c = LT
| b > d = GT
| b < d = LT
| b == d = EQ
|
[STATEMENT]
lemma "generalized_sfw (generalized_fw_join fw\<^sub>1 fw\<^sub>2) p = Some (u, d\<^sub>1,d\<^sub>2) \<longleftrightarrow> (\<exists>r\<^sub>1 r\<^sub>2. generalized_sfw fw\<^sub>1 p = Some (r\<^sub>1,d\<^sub>1) \<and> generalized_sfw fw\<^sub>2 p = Some (r\<^sub>2,d\<^sub>2) \<and> Some u = simple_match_and r\<^sub>1 r\<^sub>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (generalized_sfw (generalized_fw_join fw\<^sub>1 fw\<^sub>2) p = Some (u, d\<^sub>1, d\<^sub>2)) = (\<exists>r\<^sub>1 r\<^sub>2. generalized_sfw fw\<^sub>1 p = Some (r\<^sub>1, d\<^sub>1) \<and> generalized_sfw fw\<^sub>2 p = Some (r\<^sub>2, d\<^sub>2) \<and> Some u = simple_match_and r\<^sub>1 r\<^sub>2)
[PROOF STEP]
by (auto dest: generalized_fw_joinD sym simp add: generalized_fw_joinI) |
module Contexts where
open import Prelude
open import T
open import SubstTheory
module Contexts where
infix 60 _e$_ _$e_
-- Has a hole of type (Γ ⊢ A), produces a term of type (Γ' ⊢ A')
data TCtx (Γ : Ctx) (A : TTp) : (Γ' : Ctx) → (A' : TTp) → Set where
∘ : TCtx Γ A Γ A
_e$_ : ∀{Γ' A' B} (e₁ : TExp Γ' (A' ⇒ B)) (C₂ : TCtx Γ A Γ' A') → TCtx Γ A Γ' B
_$e_ : ∀{Γ' A' B} (C₁ : TCtx Γ A Γ' (A' ⇒ B)) (e₂ : TExp Γ' A') → TCtx Γ A Γ' B
Λ : ∀{A₁ A₂ Γ'} (C : TCtx Γ A (A₁ :: Γ') A₂) → TCtx Γ A Γ' (A₁ ⇒ A₂)
suc : ∀{Γ'} (C : TCtx Γ A Γ' nat) → TCtx Γ A Γ' nat
rec1 : ∀{Γ' B} → (C : TCtx Γ A Γ' nat) → (e₀ : TExp Γ' B) → (es : TExp (B :: Γ') B) →
TCtx Γ A Γ' B
rec2 : ∀{Γ' B} → (e : TExp Γ' nat) → (C₀ : TCtx Γ A Γ' B) → (es : TExp (B :: Γ') B) →
TCtx Γ A Γ' B
rec3 : ∀{Γ' B} → (e : TExp Γ' nat) → (e₀ : TExp Γ' B) → (Cs : TCtx Γ A (B :: Γ') B) →
TCtx Γ A Γ' B
_<_> : ∀{Γ A Γ' A'} → TCtx Γ A Γ' A' → TExp Γ A → TExp Γ' A'
∘ < e' > = e'
(e₁ e$ C₂) < e' > = e₁ $ C₂ < e' >
(C₁ $e e₂) < e' > = C₁ < e' > $ e₂
Λ C < e' > = Λ (C < e' >)
suc C < e' > = suc (C < e' >)
rec1 C e₀ es < e' > = rec (C < e' >) e₀ es
rec2 e C₀ es < e' > = rec e (C₀ < e' >) es
rec3 e e₀ Cs < e' > = rec e e₀ (Cs < e' >)
infix 70 _<<_>> _<_>
_<<_>> : ∀{Γ A Γ' A' Γ'' A''} → TCtx Γ' A' Γ'' A'' → TCtx Γ A Γ' A' →
TCtx Γ A Γ'' A''
∘ << C' >> = C'
(e₁ e$ C₂) << C' >> = e₁ e$ C₂ << C' >>
(C₁ $e e₂) << C' >> = C₁ << C' >> $e e₂
Λ C << C' >> = Λ (C << C' >>)
suc C << C' >> = suc (C << C' >>)
rec1 C e₀ es << C' >> = rec1 (C << C' >>) e₀ es
rec2 e C₀ es << C' >> = rec2 e (C₀ << C' >>) es
rec3 e e₀ Cs << C' >> = rec3 e e₀ (Cs << C' >>)
-- I hate having to prove this sort of theorem.
composing-commutes : ∀{Γ A Γ' A' Γ'' A''} →
(C : TCtx Γ' A' Γ'' A'') →
(C' : TCtx Γ A Γ' A') →
(e : TExp Γ A) →
((C << C' >>) < e >) ≡ C < C' < e > >
composing-commutes ∘ C' e = Refl
composing-commutes (e₁ e$ C) C' e = resp (_$_ e₁) (composing-commutes C C' e)
composing-commutes (C $e e₂) C' e = resp (λ x → x $ e₂) (composing-commutes C C' e)
composing-commutes (Λ C) C' e = resp Λ (composing-commutes C C' e)
composing-commutes (suc C) C' e = resp suc (composing-commutes C C' e)
composing-commutes (rec1 C e₀ es) C' e = resp (λ x → rec x e₀ es) (composing-commutes C C' e)
composing-commutes (rec2 e' C es) C' e = resp (λ x → rec e' x es) (composing-commutes C C' e)
composing-commutes (rec3 e' e₀ C) C' e = resp (λ x → rec e' e₀ x) (composing-commutes C C' e)
-- If the hole is closed, then the rest needs to be too.
tctx-empty-thing : ∀{F : Set} {Γ A A' B} → TCtx [] A (B :: Γ) A' → F
tctx-empty-thing (e₁ e$ C) = tctx-empty-thing C
tctx-empty-thing (C $e e₂) = tctx-empty-thing C
tctx-empty-thing (Λ C) = tctx-empty-thing C
tctx-empty-thing (suc C) = tctx-empty-thing C
tctx-empty-thing (rec1 C e₀ es) = tctx-empty-thing C
tctx-empty-thing (rec2 e C es) = tctx-empty-thing C
tctx-empty-thing (rec3 e e₀ C) = tctx-empty-thing C
-- Very restricted function for weakening a program context where
-- the hole also occurs under no free variables.
weaken-closed-tctx : ∀{Γ A A'} → TCtx [] A [] A' → TCtx Γ A Γ A'
weaken-closed-tctx ∘ = ∘
weaken-closed-tctx (e₁ e$ C) = weaken-closed e₁ e$ weaken-closed-tctx C
weaken-closed-tctx (C $e e₂) = weaken-closed-tctx C $e weaken-closed e₂
weaken-closed-tctx (Λ C) = tctx-empty-thing C
weaken-closed-tctx (suc C) = suc (weaken-closed-tctx C)
weaken-closed-tctx (rec1 C e₀ es) =
rec1 (weaken-closed-tctx C) (weaken-closed e₀) (ren (wk closed-wkγ) es)
weaken-closed-tctx (rec2 e C es) =
rec2 (weaken-closed e) (weaken-closed-tctx C) (ren (wk closed-wkγ) es)
weaken-closed-tctx (rec3 e e₀ C) = tctx-empty-thing C
-- Substitution commutes with a closed context
subst-commutes-w-closed-tctx : ∀{Γ A A'} → (γ : TSubst Γ []) → (C : TCtx [] A [] A') → (e : TExp Γ A) →
C < ssubst γ e > ≡ ssubst γ ((weaken-closed-tctx C) < e >)
subst-commutes-w-closed-tctx γ ∘ e = Refl
subst-commutes-w-closed-tctx γ (e₁ e$ C) e =
resp2 _$_
(symm (closed-subst _ e₁) ≡≡ symm (subren γ closed-wkγ e₁))
(subst-commutes-w-closed-tctx γ C e)
subst-commutes-w-closed-tctx γ (C $e e₂) e =
resp2 _$_
(subst-commutes-w-closed-tctx γ C e)
(symm (closed-subst _ e₂) ≡≡ symm (subren γ closed-wkγ e₂))
subst-commutes-w-closed-tctx γ (Λ C) e = tctx-empty-thing C
subst-commutes-w-closed-tctx γ (suc C) e =
resp suc (subst-commutes-w-closed-tctx γ C e)
subst-commutes-w-closed-tctx γ (rec1 C e₀ es) e with subren (liftγ γ) (wk closed-wkγ) es
... | lol1 with subeq (liftwk γ closed-wkγ) es
... | lol2 with lift-closed-subst (γ o closed-wkγ) es
... | lol3 with symm lol3 ≡≡ symm lol2 ≡≡ symm lol1
... | lol = resp3 rec
(subst-commutes-w-closed-tctx γ C e)
(symm (closed-subst _ e₀) ≡≡ symm (subren γ closed-wkγ e₀))
lol
subst-commutes-w-closed-tctx γ (rec2 en C es) e with subren (liftγ γ) (wk closed-wkγ) es
... | lol1 with subeq (liftwk γ closed-wkγ) es
... | lol2 with lift-closed-subst (γ o closed-wkγ) es
... | lol3 with symm lol3 ≡≡ symm lol2 ≡≡ symm lol1
... | lol = resp3 rec
(symm (closed-subst _ en) ≡≡ symm (subren γ closed-wkγ en))
(subst-commutes-w-closed-tctx γ C e)
lol
subst-commutes-w-closed-tctx γ (rec3 e e₀ C) e₁ = tctx-empty-thing C
open Contexts public
|
open import Relation.Binary.PropositionalEquality using (_≡_; _≢_; refl)
open import Relation.Nullary.Decidable using (False; toWitnessFalse)
open import Data.Fin using (Fin; _≟_)
open import Data.Nat using (ℕ; suc)
open import Data.Product using (_×_; _,_)
open import Common
data Global (n : ℕ) : Set where
endG : Global n
msgSingle : (p q : Fin n) -> p ≢ q -> Label -> Global n -> Global n
private
variable
n : ℕ
p p′ q q′ r s : Fin n
l l′ : Label
g gSub gSub′ : Global n
msgSingle′ : (p q : Fin n) -> {False (p ≟ q)} -> Label -> Global n -> Global n
msgSingle′ p q {p≢q} l gSub = msgSingle p q (toWitnessFalse p≢q) l gSub
size-g : ∀ { n : ℕ } -> (g : Global n) -> ℕ
size-g endG = 0
size-g (msgSingle _ _ _ _ gSub) = suc (size-g gSub)
size-g-reduces :
∀ { p≢q }
-> g ≡ msgSingle {n} p q p≢q l gSub
-> size-g g ≡ suc (size-g gSub)
size-g-reduces {g = endG} ()
size-g-reduces {g = msgSingle _ _ _ _ gSub} refl = refl
msgSingle-subst-left :
∀ { p≢q }
-> g ≡ msgSingle {n} p q p≢q l gSub
-> (p≡p′ : p ≡ p′)
-> g ≡ msgSingle {n} p′ q (≢-subst-left p≢q p≡p′) l gSub
msgSingle-subst-left refl refl = refl
msgSingle-subst-right :
∀ { p≢q }
-> g ≡ msgSingle {n} p q p≢q l gSub
-> (q≡q′ : q ≡ q′)
-> g ≡ msgSingle {n} p q′ (≢-subst-right p≢q q≡q′) l gSub
msgSingle-subst-right refl refl = refl
msgSingle-injective :
∀ { p≢q p′≢q′ }
-> msgSingle {n} p q p≢q l gSub ≡ msgSingle p′ q′ p′≢q′ l′ gSub′
-> p ≡ p′ × q ≡ q′ × l ≡ l′ × gSub ≡ gSub′
msgSingle-injective refl = refl , refl , refl , refl
data _-_→g_ {n : ℕ} : Global n -> Action n -> Global n -> Set where
→g-prefix :
∀ { p≢q p≢q′ }
-> (msgSingle p q p≢q l gSub) - (action p q p≢q′ l) →g gSub
→g-cont :
∀ { p≢q r≢s }
-> gSub - (action p q p≢q l) →g gSub′
-> p ≢ r
-> q ≢ r
-> p ≢ s
-> q ≢ s
-> (msgSingle r s r≢s l′ gSub) - (action p q p≢q l) →g (msgSingle r s r≢s l′ gSub′)
|
lemma mono_restrict_space: "sets M \<le> sets N \<Longrightarrow> sets (restrict_space M X) \<subseteq> sets (restrict_space N X)" |
[STATEMENT]
lemma lfp_the_while_option:
assumes "mono f" and "!!X. X \<subseteq> C \<Longrightarrow> f X \<subseteq> C" and "finite C"
shows "lfp f = the(while_option (\<lambda>A. f A \<noteq> A) f {})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lfp f = the (while_option (\<lambda>A. f A \<noteq> A) f {})
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. lfp f = the (while_option (\<lambda>A. f A \<noteq> A) f {})
[PROOF STEP]
obtain P where "while_option (\<lambda>A. f A \<noteq> A) f {} = Some P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>P. while_option (\<lambda>A. f A \<noteq> A) f {} = Some P \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using while_option_finite_subset_Some[OF assms]
[PROOF STATE]
proof (prove)
using this:
(\<And>X. X \<subseteq> C \<Longrightarrow> X \<subseteq> C) \<Longrightarrow> \<exists>P. while_option (\<lambda>A. f A \<noteq> A) f {} = Some P
goal (1 subgoal):
1. (\<And>P. while_option (\<lambda>A. f A \<noteq> A) f {} = Some P \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
while_option (\<lambda>A. f A \<noteq> A) f {} = Some P
goal (1 subgoal):
1. lfp f = the (while_option (\<lambda>A. f A \<noteq> A) f {})
[PROOF STEP]
with while_option_stop2[OF this] lfp_Kleene_iter[OF assms(1)]
[PROOF STATE]
proof (chain)
picking this:
\<exists>k. P = (f ^^ k) {} \<and> \<not> f P \<noteq> P
(f ^^ Suc ?k) {} = (f ^^ ?k) {} \<Longrightarrow> lfp f = (f ^^ ?k) {}
while_option (\<lambda>A. f A \<noteq> A) f {} = Some P
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>k. P = (f ^^ k) {} \<and> \<not> f P \<noteq> P
(f ^^ Suc ?k) {} = (f ^^ ?k) {} \<Longrightarrow> lfp f = (f ^^ ?k) {}
while_option (\<lambda>A. f A \<noteq> A) f {} = Some P
goal (1 subgoal):
1. lfp f = the (while_option (\<lambda>A. f A \<noteq> A) f {})
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
lfp f = the (while_option (\<lambda>A. f A \<noteq> A) f {})
goal:
No subgoals!
[PROOF STEP]
qed |
# ==============================================================================
#module NaquadahCore
include("Graphics/GraphTypes.jl")
include("NaquadahTypes.jl")
include("Graphics/Graphics.jl")
include("Events/Events.jl")
include("DOM/Dom.jl")
include("Layout/Layout.jl")
include("Layout/LayoutBuild.jl")
include("Layout/LayoutBegin.jl")
#end
# ==============================================================================
|
# COMP90051 Workshop 5
## Support Vector Machines
***
In this section, we'll explore how the SVM hyperparameters (i.e. the penalty parameter, the kernel, and any kernel parameters) affect the decision surface.
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from timeit import default_timer as timer
sns.set_style('darkgrid')
plt.rcParams['figure.dpi'] = 108
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GridSearchCV
```
### 1. Data set
To make visualisation and training easy, we'll consider a small binary classification data set called `cats.csv` (available from the LMS).
It contains observations for 150 cats.
There are two features: heart and body weight measured in kilograms.
The target variable is the sex of the cat (we encode 'male' as`-1` and 'female' as `+1`).
\[Note: the data set originates from the following paper: R. A. Fisher (1947) _The analysis of covariance method for the relation between a part and the whole_, Biometrics **3**, 65–68\]
Ensure that `cats.csv` is located in the same directory as this notebook, then run the following code block to read the CSV file using `pandas`.
```python
full_df = pd.read_csv('cats.csv')
full_df.SEX = full_df.SEX.map({'M': -1, 'F': 1})
full_df.head()
```
Let's split the data into train/test sets so that we can evaluate our trained SVM.
(Note that this is likely to be unreliable for such a small data set.)
```python
train_df, test_df = train_test_split(full_df, test_size=0.2, random_state=1)
```
Since SVMs incorporate a penalty term for the weights (proportional to $\|\mathbf{w}\|_2^2$), it's usually beneficial to _standardise_ the features so that they vary on roughly the same scale.
***
**Exercise:** Complete the code block below to standardise the features, so that each feature has zero mean/unit variance.
_Hint: use `StandardScaler` imported above._
***
```python
scaler = StandardScaler()
X_train = ... # fill in
y_train = ... # fill in
X_test = ... # fill in
y_test = ... # fill in
```
Let's plot the training data. Notice that it's not linearly separable.
```python
plt.scatter(X_train[y_train==1,0], X_train[y_train==1,1], label="Female ($y=1$)", c='r')
plt.scatter(X_train[y_train==-1,0], X_train[y_train==-1,1], label="Male ($y=-1$)", c='b')
plt.xlabel("Heart weight")
plt.ylabel("Body weight")
plt.legend()
plt.show()
```
### 2. Parameter grid search
Since the data is clearly not linearly separable, we're going to fit a kernelised SVM.
To do this, we'll use the `sklearn.svm.SVC` class, which is a wrapper for the popular [LIBSVM](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library.
\[Aside: LIBSVM solves the dual problem using a variant of the [sequential minimal optimisation (SMO) algorithm](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf).\]
The corresponding primal problem is as follows:
$$
\begin{align}
\min_{\mathbf{w}, b, \xi} \phantom{=} & \frac{1}{2} \mathbf{w}^T \mathbf{w} + C \sum_{i = 1}^{n} \xi_i \\
\mathrm{subject~to} \phantom{=} & y_{i}(\mathbf{w}^T \cdot \phi(\mathbf{x_i}) + b) \geq 1 - \xi_i \\
\phantom{=} & \xi_i \geq 0 \ \forall i
\end{align}
$$
Here $C$ is the penalty parameter, $\mathbf{w}$ are the weights, $b$ is the bias and $\phi$ is a mapping to a higher dimensional space---related to the kernel through $K(\mathbf{x}_i, \mathbf{x}_j) = \langle \phi(\mathbf{x}_i), \phi(\mathbf{x}_j) \rangle$.
For now, we'll use the radial basis function (RBF) kernel, which is parameterised in terms of $\gamma$ as follows:
$$
K(\mathbf{x}_i, \mathbf{x}_j) = \exp(-\gamma \|\mathbf{x}_i - \mathbf{x}_j\|^2)
$$
Returning to our classification problem: it's unclear how to set appropriate values for $C$ and $\gamma$ (named `C` and `gamma` in `sklearn`).
A simple way around this is to do an exhaustive cross validation grid search.
Below we define an evenly-spaced grid in log-space.
```python
C_range = np.logspace(-2, 5, 8)
gamma_range = np.logspace(-6, 1, 16)
# Visualise the grid
xx, yy = np.meshgrid(C_range, gamma_range)
plt.plot(xx, yy, 'ko')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$C$')
plt.ylabel(r'$\gamma$')
plt.show()
```
To do the grid search, we'll use the built-in `sklearn.model_selection.GridSearchCV` class.
It evaluates the model for each combination of parameter values using cross validation, and selects the combination with the best score.
We'll use `StratifiedShuffleSplit` for cross validation (it effectively generates bootstrap samples from the training data, while preserving the class ratio).
```python
cv = StratifiedShuffleSplit(n_splits=30, test_size=0.1, random_state=1)
grid = GridSearchCV(SVC(kernel='rbf'), param_grid={'gamma': gamma_range, 'C': C_range}, cv=cv)
grid.fit(X_train, y_train)
print("The best parameters are {0.best_params_} with an accuracy of {0.best_score_:.3g}".format(grid))
```
***
**Question:** Why aren't we using k-fold cross validation?
***
Below we visualise the cross validation accuracy over the grid of parameters.
```python
scores = grid.cv_results_['mean_test_score'].reshape(C_range.size, gamma_range.size)
plt.figure(figsize=(8, 6))
plt.imshow(scores, cmap='viridis')
plt.colorbar(shrink=0.7)
plt.xticks(np.arange(len(gamma_range)), ["%.2e" % gamma for gamma in gamma_range], rotation=90)
plt.yticks(np.arange(len(C_range)), ["%1.e" % C for C in C_range])
plt.title('Cross validation accuracy')
plt.xlabel(r'$\gamma$')
plt.ylabel('$C$')
plt.show()
```
***
**Question:** Interpret this plot. Is there a clear winning combination of parameters?
***
Now that we've found the "best" parameters, let's fit the SVM on the entire training set (without cross-validation).
(Note: we actually fit all parameter combinations, as they're needed for a plot generated below.)
```python
classifiers = {(C, gamma) : SVC(C=C, gamma=gamma, kernel='rbf').fit(X_train, y_train)
for C in C_range
for gamma in gamma_range}
```
Below we evaluate the "best" classifier on the test set.
```python
best_params = (grid.best_params_["C"], grid.best_params_["gamma"])
best_svm = classifiers[best_params]
best_train_acc = best_svm.score(X_train, y_train)
best_test_acc = best_svm.score(X_test, y_test)
print("The SVM with parameters C={0[0]:.3g}, gamma={0[1]:.3g} has training accuracy {1:.3g} and test accuracy {2:.3g}.".format(best_params, best_train_acc, best_test_acc))
```
***
**Question:** How does this compare to the training accuracy?
***
Below we visualise the decision functions for all parameter combinations (double-click output to expand to 100%)
```python
fig, ax = plt.subplots(C_range.size, gamma_range.size, figsize=(50,20))
border = 0.2
# Build meshgrid over the feature space
X_min = np.amin(X_train, axis=0)
X_max = np.amax(X_train, axis=0)
xx, yy = np.meshgrid(np.linspace(X_min[0] - border, X_max[0] + border, 100),
np.linspace(X_min[1] - border, X_max[1] + border, 100))
# Plot training data + decision function for all feature combinations
for (i, C) in enumerate(C_range):
for (j, gamma) in enumerate(gamma_range):
clf = classifiers[(C, gamma)]
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax[i,j].set_title("gamma={0.gamma:.3g}; C={0.C:.3g}".format(clf),
size='medium')
ax[i,j].pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
ax[i,j].scatter(X_train[y_train==1,0], X_train[y_train==1,1], c='r', edgecolors='k')
ax[i,j].scatter(X_train[y_train==-1,0], X_train[y_train==-1,1], c='b', edgecolors='k')
ax[i,j].set_xticks([])
ax[i,j].set_yticks([])
ax[i,j].axis('tight')
plt.show()
```
***
**Question:** Explain how `gamma` and `C` affect the decision surface qualitatively.
**Extension activity:** Re-run this section using a different kernel (e.g. the built-in polynomial kernel or a custom kernel).
***
|
library(methods)
{{rimport}}('__init__.r')
indir = {{i.indir | R}}
outdir = {{o.outdir | R}}
plink = {{args.plink | R}}
bedfile = Sys.glob(file.path(indir, '*.bed'))
input = tools::file_path_sans_ext(bedfile)
output = file.path(outdir, basename(input))
params = list(
bfile = input,
`check-sex` = T,
out = output
)
cmd = sprintf("%s %s 1>&2", plink, cmdargs(params, equal = ' '))
runcmd(cmd)
sexcheck = read.table(paste0(output, '.sexcheck'), header = T, row.names = NULL, check.names = F)
sex.sample.fail = sexcheck[which(sexcheck$STATUS == 'PROBLEM'), c('FID', 'IID'), drop=F]
write.table(sex.sample.fail, paste0(output, '.sex.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
|
subroutine splev(t,n,c,k,x,y,m,e,ier)
c subroutine splev evaluates in a number of points x(i),i=1,2,...,m
c a spline s(x) of degree k, given in its b-spline representation.
c
c calling sequence:
c call splev(t,n,c,k,x,y,m,e,ier)
c
c input parameters:
c t : array,length n, which contains the position of the knots.
c n : integer, giving the total number of knots of s(x).
c c : array,length n, which contains the b-spline coefficients.
c k : integer, giving the degree of s(x).
c x : array,length m, which contains the points where s(x) must
c be evaluated.
c m : integer, giving the number of points where s(x) must be
c evaluated.
c e : integer, if 0 the spline is extrapolated from the end
c spans for points not in the support, if 1 the spline
c evaluates to zero for those points, and if 2 ier is set to
c 1 and the subroutine returns.
c
c output parameter:
c y : array,length m, giving the value of s(x) at the different
c points.
c ier : error flag
c ier = 0 : normal return
c ier = 1 : argument out of bounds and e == 2
c ier =10 : invalid input data (see restrictions)
c
c restrictions:
c m >= 1
c-- t(k+1) <= x(i) <= x(i+1) <= t(n-k) , i=1,2,...,m-1.
c
c other subroutines required: fpbspl.
c
c references :
c de boor c : on calculating with b-splines, j. approximation theory
c 6 (1972) 50-62.
c cox m.g. : the numerical evaluation of b-splines, j. inst. maths
c applics 10 (1972) 134-149.
c dierckx p. : curve and surface fitting with splines, monographs on
c numerical analysis, oxford university press, 1993.
c
c author :
c p.dierckx
c dept. computer science, k.u.leuven
c celestijnenlaan 200a, b-3001 heverlee, belgium.
c e-mail : [email protected]
c
c latest update : march 1987
c
c++ pearu: 11 aug 2003
c++ - disabled cliping x values to interval [min(t),max(t)]
c++ - removed the restriction of the orderness of x values
c++ - fixed initialization of sp to double precision value
c
c ..scalar arguments..
integer n, k, m, e, ier
c ..array arguments..
real*8 t(n), c(n), x(m), y(m)
c ..local scalars..
integer i, j, k1, l, ll, l1, nk1
c++..
integer k2
c..++
real*8 arg, sp, tb, te
c ..local array..
real*8 h(20)
c ..
c before starting computations a data check is made. if the input data
c are invalid control is immediately repassed to the calling program.
ier = 10
c-- if(m-1) 100,30,10
c++..
if (m .lt. 1) go to 100
c..++
c-- 10 do 20 i=2,m
c-- if(x(i).lt.x(i-1)) go to 100
c-- 20 continue
30 ier = 0
c fetch tb and te, the boundaries of the approximation interval.
k1 = k + 1
c++..
k2 = k1 + 1
c..++
nk1 = n - k1
tb = t(k1)
te = t(nk1 + 1)
l = k1
l1 = l + 1
c main loop for the different points.
do 80 i = 1, m
c fetch a new x-value arg.
arg = x(i)
c check if arg is in the support
if (arg .lt. tb .or. arg .gt. te) then
if (e .eq. 0) then
goto 35
else if (e .eq. 1) then
y(i) = 0
goto 80
else if (e .eq. 2) then
ier = 1
goto 100
endif
endif
c search for knot interval t(l) <= arg < t(l+1)
c++..
35 if (arg .ge. t(l) .or. l1 .eq. k2) go to 40
l1 = l
l = l - 1
go to 35
c..++
40 if(arg .lt. t(l1) .or. l .eq. nk1) go to 50
l = l1
l1 = l + 1
go to 40
c evaluate the non-zero b-splines at arg.
50 call fpbspl(t, n, k, arg, l, h)
c find the value of s(x) at x=arg.
sp = 0.0d0
ll = l - k1
do 60 j = 1, k1
ll = ll + 1
sp = sp + c(ll)*h(j)
60 continue
y(i) = sp
80 continue
100 return
end
|
import inspect
import tempfile
import unittest
import numpy as np
import transformers
from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow
from .test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.clip.modeling_flax_clip import FlaxCLIPModel, FlaxCLIPTextModel, FlaxCLIPVisionModel
if is_torch_available():
import torch
class FlaxCLIPVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = CLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class FlaxCLIPVisionModelTest(FlaxModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (FlaxCLIPVisionModel,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxCLIPVisionModelTester(self)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(pixel_values, **kwargs):
return model(pixel_values=pixel_values, **kwargs).to_tuple()
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict)
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict)
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
# CLIP has a different seq_length
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True)
outputs = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(outputs)
class FlaxCLIPTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = CLIPTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
return config, input_ids, input_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_flax
class FlaxCLIPTextModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (FlaxCLIPTextModel,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxCLIPTextModelTester(self)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True)
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs)
class FlaxCLIPModelTester:
def __init__(self, parent, is_training=True):
self.parent = parent
self.text_model_tester = FlaxCLIPTextModelTester(parent)
self.vision_model_tester = FlaxCLIPVisionModelTester(parent)
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64)
return config, input_ids, attention_mask, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_flax
class FlaxCLIPModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (FlaxCLIPModel,) if is_flax_available() else ()
test_attention_outputs = False
def setUp(self):
self.model_tester = FlaxCLIPModelTester(self)
# hidden_states are tested in individual model tests
def test_hidden_states_output(self):
pass
@slow
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, pixel_values, **kwargs):
return model(input_ids=input_ids, pixel_values=pixel_values, **kwargs).to_tuple()
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict)
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict)
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs[:4], outputs[:4]):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_ids", "pixel_values", "attention_mask", "position_ids"]
self.assertListEqual(arg_names[:4], expected_arg_names)
def test_get_image_features(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = FlaxCLIPModel(config)
@jax.jit
def model_jitted(pixel_values):
return model.get_image_features(pixel_values=pixel_values)
with self.subTest("JIT Enabled"):
jitted_output = model_jitted(inputs_dict["pixel_values"])
with self.subTest("JIT Disabled"):
with jax.disable_jit():
output = model_jitted(inputs_dict["pixel_values"])
self.assertEqual(jitted_output.shape, output.shape)
self.assertTrue(np.allclose(jitted_output, output, atol=1e-3))
def test_get_text_features(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = FlaxCLIPModel(config)
@jax.jit
def model_jitted(input_ids, attention_mask, **kwargs):
return model.get_text_features(input_ids=input_ids, attention_mask=attention_mask)
with self.subTest("JIT Enabled"):
jitted_output = model_jitted(**inputs_dict)
with self.subTest("JIT Disabled"):
with jax.disable_jit():
output = model_jitted(**inputs_dict)
self.assertEqual(jitted_output.shape, output.shape)
self.assertTrue(np.allclose(jitted_output, output, atol=1e-3))
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True)
outputs = model(input_ids=np.ones((1, 1)), pixel_values=np.ones((1, 3, 224, 224)))
self.assertIsNotNone(outputs)
# overwrite from common since FlaxCLIPModel returns nested output
# which is not supported in the common test
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
# PyTorch CLIPModel returns loss, we skip it here as we don't return loss in JAX/Flax models
pt_outputs = pt_outputs[1:]
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2)
# overwrite from common since FlaxCLIPModel returns nested output
# which is not supported in the common test
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
# PyTorch CLIPModel returns loss, we skip it here as we don't return loss in JAX/Flax models
pt_outputs = pt_outputs[1:]
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
pt_outputs_loaded = pt_outputs_loaded[1:]
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
|
\documentclass{article}
\usepackage[margin=0.2cm]{geometry}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{ulem}
\usepackage{hyperref}
\usepackage{minted}
\usepackage{multicol}
\usepackage{graphicx}
\usepackage{float}
\usepackage{subfigure}
\begin{document}
\tableofcontents
\newpage
\section{Hall's marriage theorem}
Let $G=(V,E)=(X+Y,E)$ be a bipartite graph, $G$ has a complete matching from $X$ to $Y$ iff $\forall S\subseteq X\ |S|\leq |N(S)|$,\\
where $N_G(S)=\{y\in Y\mid \exists x\in S\ (x,y)\in E\}$.
\subsubsection{sufficiency}
($\Rightarrow$)\quad
Let $M=\{(x,f(x))\in E\mid x\in X\}\subseteq E$ be a complete matching.\\
$N_G(S)=\{y\in Y\mid \exists x\in S\ (x,y)\in E\}\supseteq \{f(x)\mid x\in S\}$.
\subsubsection{necessity}
($\Leftarrow$)\quad
Apply induction on $|X|$.
\begin{enumerate}
\item When $|X|=1$, the necessity is hold.
\item Suppose that the necessity is hold, for $|X|\leq k$
\item For $|X|=k+1$, $\forall S\subseteq X\ |S|\leq |N_G(X)|$
\begin{itemize}
\item $\forall \varnothing\subsetneq S\subsetneq X\ |N_G(S)|>|S|$\\
Select any edge $e=(x,y)$. Remove the vertices $x,y$ and every edges $(u,v)$ to get $G'$ s.t. $x\in\{u,v\}\lor y\in\{u,v\}$.\\
In $G'$, $\forall \varnothing\subsetneq S\subseteq (X\setminus \{x\})\quad |N_{G'}(S)|\geq |N_{G}(S)|-1\geq |S|$.\\
So we can find a complete matching $M':X'\to Y'$ in $G'$.\\
Thus A complete matching $M:X\to Y$ can generated by $M'\cup \{(x,y)\}$
\item $\exists \varnothing\subsetneq S\subsetneq X\ |N_G(S)|=|S|$\\
Let $G_0=(V_0,E_0),G_1=(V_1,E_1)$ be two induced subgraph of $G$, where $V_0=S+N_G(S),V_1=(X\setminus S)+ (Y\setminus N_G(S))$.
\begin{itemize}
\item For $G_0$: $\forall P\subseteq S\ N_{G_0}(P)=N_{G}(P)$ so $\forall P\subseteq S\ |N_{G_0}(P)|=|N_{G}(P)|\geq |P|$.
\item For $G_1$: We claim that $\forall Q\subseteq (X\setminus S)\ |N_{G_1}(Q)|\geq |Q|$.\\
Otherwise, let $Q_0$ be a subset s.t. $|N_{G_1}(Q_0)|<|Q_0|$.\\
Then $N_{G}(S\cup Q_0)=N_{G}(S)\cup N_{G}(Q_0)$, where $N_{G}(S)=N_{G_0}(S),\ N_{G}(Q_0)\subseteq (N_{G}(S)\cup N_{G_1}(Q)),\ N_{G_1(Q_0)}\cap N_G(S)=\varnothing$.\\
which leads to $|N_{G}(S\cup Q_0)|=|\leq |N_G(S)|+|N_{G_0}(Q_0)| < |S|+|Q_0|$, however $|N_{G}(S\cup Q_0)|\geq |S\cup Q_0|=|S|+|Q_0|$.\\
Therefore in $G_1$, $\forall Q\subseteq (X\setminus S)\ |N_{G_1}(Q)|\geq |Q|$.\\
\end{itemize}
We can find a complete matching by merging the complete matching in $G_0$ and $G_1$.
\end{itemize}
\end{enumerate}
\subsubsection{generalization}
In a bipartite graph $G=(X+Y,E)$
Let $\mathrm{def}_G(S)=|S|-|N_G(S)|$,
then the size of maximum matching in $G$ is $|X|-\max_{\varnothing\subseteq S\subseteq X}\mathrm{def}_G(S)$
\newpage
\section{Havel-Hakimi algorithm}
\subsubsection{the theorem}
Given a list of non-negative integers $(d_1,d_2\ldots d_n)$ where $d_1\geq d_2\geq d_3\ldots d_n\geq 0$.\\
Is there a undirected simple\footnote{no loops $(v,v)$, no dup-edges $k\times (v,v)$} graph $G=(V,E)$ s.t. the degree sequence of $G$ is $(d_1,d_2\ldots d_n)$?
\begin{minted}{python}
from typing import List
def check(deg_seq: List[int]) -> bool:
if len(deg_seq)==0:
return True
head,seq = deg_seq[0],deg_seq[1:]
if len(seq)>=head:
for i in range(head):
seq[i]=seq[i]-1
seq = sorted(seq, reverse=True)
return seq[-1]>=0 and check(seq)
return False
\end{minted}
\subsubsection{proof}
\emph{TODO}
\newpage
\section{graph isomorphism invariant}
\subsection{degree sequence}
$G\cong H\implies f(G)=H(G)$, where $f(G)=\mathrm{multi-set}\{\deg_G(v)\mid v\in G\}$.\\
This is a necessary but not sufficient condition, see the following example, where $H,G$ have the same degree sequence but are not isomorphic to each other.
\begin{minted}{C}
//!/usr/bin/dot
// in graphviz dot
graph G{
1 -- 2
2 -- 3
3 -- 4
4 -- 5
2 -- x
}
graph H{
1 -- 2
2 -- 3
3 -- 4
4 -- 5
3 -- x
}
\end{minted}
\newpage
\section{Lower bound of $\max(\omega(G),\alpha(G))$}
\subsection{futher readings}
\href{https://en.wikipedia.org/wiki/Ramsey_theory}{wikipedia: Ramsey theory}
\subsection{statement}
(Ramsey's Theorem) Every graph with $n$ vertices contains either a clique or an independent set with at least $\frac{1}{2}\log_2 n$ vertices.\\
$\omega(G)$: the clique number.
$\alpha(G)$: the independence number.
\subsection{proof}
\begin{enumerate}
\item $n=1$, hold
\item $n=2$, hold
\item Suppose that $n=1,2,3\ldots k$, the statement is true\\
For $n=k+1$, select a arbitary vertex $u$.
Let $A=\{v\mid \{u,v\}\in E,v\neq u\},\, B=\{v\mid \{u,v\}\not\in E,v\neq u\}$
\begin{itemize}
\item $A$, there exists a clique $C$ of at least $\frac{1}{2}\log_2 |A|$ vertices.
$C+\{u\}$ is still a clique.
\item $B$, there exists a independent set $I$ of at least $\frac{1}{2}\log_2 |B|$ vertices.
$I+\{u\}$ is still an independent set.
\end{itemize}
Therefore, we can find either a clique or an independent set consisting of at least $1+\frac{1}{2}\max\left(\log_2 |A|+\log_2 |B|\right)$\\
$|A+B|=|A|+|B|=n-1=k\implies \max(|A|,|B|)\geq \frac{1}{2}k\implies \max\left(\log_2 |A|,\log_2 |B|\right)\geq -1+\log_2 k$\\
Thus $\max(\omega(G),\alpha(G))\geq 1+\left(-1+\log_2 k\right)=\log_2 k\geq \frac{1}{2}\log_2 n$
\end{enumerate}
\newpage
\section{Sufficient conditions for existence of a hamilton circuit }
\begin{itemize}
\item Dirac's Theorem:
A simple graph $G=(V,E)$ s.t. $|V|\geq 3$ and $\forall v\in V\ \deg(v)\geq \frac{|V|}{2}$ has a Hamilton circuit.
\item Ore's Theorem:
A simple graph $G=(V,E)$ s.t. $|V|\geq 3$ and $\forall \{u,v\}\left( \{u,v\}\not\in E\rightarrow \deg(u)+\deg(v)\geq |V|\right)$
\end{itemize}
\subsection{proof}
\newpage
\section{Kuratowski's Theorem: the equivalent condition of planar graph}
\subsection{statement}
\begin{itemize}
\item \emph{elementary subdivision (expansion)}: Let $G=(V,E)$ be a undirected graph.\\
Delete an edge $\{u,v\}$ and add a vertex $w$, two edges $\{u,w\},\{w,v\}$.\\
(adding a new vertex in the middle of an edge)
\item \emph{subdivision (expansion)}: subdivision of $G$: graphs that can be obtained by performing a series of elementary subdivision on $G$.
\item \emph{smoothing}: the reverse process of subdivision (or expansion).
\item \emph{homeomorphic}: Two graphs $G,H$ are call homeomorphic to each other
if a subdivision of $G$ is isomorphic to $H$
or a subdivision of $H$ is isomorphic to $G$.
\item \emph{theorem} if $H,G$ are homeomorphic graphs then $H$ is an planar graphs iff $G$ is planar.
\item \emph{Kuratowski's Theorem}: A graph $G$ is planar iff it does not have a subgraph that is homeomorphic to $K_{3,3}$ or $K_5$.
\end{itemize}
\newpage
\section{coloring planar graphs}
\emph{the graphs discussed in this section should be simple graphs (no self-loops nor multi-edges)}
\subsection{lemmans}
\begin{itemize}
\item A graph is planar iff its dual graph is planar.
\item \emph{Euler's formula for (connected) planar graph}: $V-E+R=2$,
where $V,E,R$ be the number of vertices, the number of edges, the number of regions.
\item \emph{degree} Define the degree of a vertex and a region,
$\deg(v)=\left|\{e\in E\mid e=\{u,v\}\}\right|$
and
$\deg(r)=\left|\{e\in E\mid \text{$e$ is on the boundary of $r$}\}\right|$
\item \emph{hand-shaking lemma}: For any undirected graph $G=(V,E)$, we have $2|E|=\sum_{v\in V}\deg(v)$\\
Apply it on the dual graph of a planar graph to get the practical property: $2|E|=\sum_{r\in \text{regions}}\deg(r)$
\item \emph{upper bound of edges in planar graph}: For a connected planar graph, if every region $r_i$ has $\deg(r_i)\geq d$,\\
then $2E=\sum_{r\in \text{region}}\deg(r)\geq dR=d(2+E-V)$,
thus $E\leq \frac{d}{d-2}(V-2)$\\
helpful collaries
\begin{itemize}
\item $V\geq 3$, planar, connected $\Rightarrow$ $E\leq 3V-6$
\item $V\geq 3$, planar, connected, no 3-cycle $\Rightarrow$ $E\leq 2V-4$
\item \emph{5 is an upper bound of $\delta$}: Every planar graph has a vertex of degree at most $5$
\end{itemize}
\end{itemize}
\subsection{The Six coloring theorem}
Induction, find the vertex $u$ with the least degree (by previous lemma $\deg(u)\leq 5$), assign a color that differs from all its neighbors' colors to $u$.
\subsection{The Four coloring theorem}
\subsection{The Five coloring theorem}
\begin{enumerate}
\item $V\leq 5$, hold.
\item Suppose that property is hold for $V=1,2,3\ldots k$
\item For $V=k+1$, find the vertex with the least degree denoted as $u$, find a $5$-coloring of $G-u$\\
WLOG, let $\deg(u)=5$, and the five neighbors are colorized with $A,B,C,D,E$.\\
Denote $v_1,v_2,v_2,v_4,v_5$ the neighbors of $v$ \emph{in clockwise order}\footnote{we would partition the plane with the edges $\{u,v_1\},\{u,v_2\},\{u,v_3\},\{u,v_4\},\{u,v_5\}$}\\
\begin{itemize}
\item If there is a path $P$ from $v_1$ to $v_3$ such that the vertices on the path have either color $A$ or color $C$.\\
Consider the circuit $u\to v_1\to P\to v_3\to u$, it split the plane into two region (namely, inner and outer part) where $v_2$ is inside and $v_4,v_5$ are outside.\\
Perform the re-coloring opertion $(A,B,C,D,E)\to (A,D,C,B,E)$ inside the circle.\\
This is still a valid $5$-coloring of $G-u$ and we can now assign color $B$ to $u$ obtaining a $5$-coloring of $G$.
\item $v_1,v_3$ are not connected only using the vertices of color $A$ or $C$.\\
Take the subgraph $H$ induced by all vertices of color $A$ and $C$, $v_1,v_3$ are in different connected component. Perform re-coloring opertion $(A,C)\to (C,A)$ in the connected component of $H$ that contains $v_1$\\
This is still a valid $5$-coloring of $G-u$ and we can now assign color $A$ to $u$ obtaining a $5$-coloring of $G$.
\end{itemize}
\end{enumerate}
\newpage
\subsection*{illustration}
\includegraphics{images/ch5-five-coloring-p1.png}
\includegraphics{images/ch5-five-coloring-p2.png}
\newpage
\section{Number of centroid/center s in a (unrooted) tree}
\newcommand{\ECC}{\mathrm{ECC}}
\newcommand{\V}{\mathrm{Vertex}}
\newcommand{\N}{\mathrm{Neighbor}}
\newcommand{\dis}{\mathrm{dis}}
\newcommand{\subtree}{\mathrm{subtree}}
\newcommand{\height}{\mathrm{height}}
\subsection{center of a tree}
The eccentricity of a vertex in an unrooted tree is the length of the longest simple path beginning at this vertex.
A vertex is called a center if no vertex in the tree has smaller eccentricity than this vertex.\\
Let $\ECC{u}=\max_{v\in \V{T}}\dis{u}{v}$, the vertices with maximum ECC is called centers.
\subsection{counting centers}
only two cases
\begin{itemize}
\item a unique center.
\item two adjacent centers.
\end{itemize}
Suppose that $c\in \V(T)$ is a center of $T$.
Make $c$ the root of $T$ to obtain a rooted tree $T'$.\\
Let $\N(c)=\{u_1,u_2\ldots u_k\}$
s.t. $\height (u_1)=H_1\geq \height (u_2)=H_2\cdots \height (u_k)=H_k$,
then $\ECC_T(u)=H_1+1$\\
Denote the leaf with maximum level in $\subtree(u_i)$ using $L_i$.
\begin{enumerate}
\item $H_1=H_2\geq H_3\geq H_4\cdots H_k$\\
$c$ is the unique center of $T$
\begin{itemize}
\item Centers in $\subtree(u_1)$ or $\subtree(u_2)$ do not exist.\\
Consider the path $c'\to c\to u_2\to L_2$,
we have $\ECC(c')\geq 1+1+H_2=2+H_1>\ECC(u)$.
\item Centers in $\subtree(u_3)\ldots \subtree(u_k)$ do not exist.\\
The path $c'\to u_i\to c\to u_1\to L_1$,
we have $\ECC(c')\geq 1+1+H_1> \ECC(u)$.
\end{itemize}
\item $H_1-1=H_2\geq H_3\geq H_4\cdots H_k$\\
$c,u_1$ are the two adjacent centers of $T$.
The longest simple path from $u_1$ is $u_1\to c\to u_2\to L_2$, $\ECC(u_1)=2+H_2=H_1+1=\ECC(c)$
\begin{itemize}
\item Centers in $\subtree(u_1)-\{u_1\}$ do not exist.\\
path $c'\to u_1\to c\to u_2\to L_2$,
we have $\ECC(c')\geq 2+1+H_2=H_1+2>\ECC(c)$
\item Centers in $\subtree(u_2)\ldots \subtree(u_k)$ do not exist.\\
path $c'\to u_k\to c\to u_1\to L_1$,
we have $\ECC(c')\geq 2+1+H_1>\ECC(c)$
\end{itemize}
\item $H_1-2=H_2\geq H_3\geq H_4\cdots H_k$\\
This is impossible.
Otherwise $\ECC(u_1)=\max(H_1,H_2+1)=H_1<H_1+1 =\ECC(c)$,
$c$ can not be a center of $T$.
Contradiction.
\end{enumerate}
\end{document}
|
{-# OPTIONS --cubical #-}
module Cubical.Categories.Functor where
open import Cubical.Foundations.Prelude
open import Cubical.HITs.PropositionalTruncation
open import Cubical.Categories.Category
private
variable
ℓ𝒞 ℓ𝒞' ℓ𝒟 ℓ𝒟' : Level
record Functor (𝒞 : Precategory ℓ𝒞 ℓ𝒞') (𝒟 : Precategory ℓ𝒟 ℓ𝒟') : Type (ℓ-max (ℓ-max ℓ𝒞 ℓ𝒞') (ℓ-max ℓ𝒟 ℓ𝒟')) where
no-eta-equality
open Precategory
field
F-ob : 𝒞 .ob → 𝒟 .ob
F-hom : {x y : 𝒞 .ob} → 𝒞 .hom x y → 𝒟 .hom (F-ob x) (F-ob y)
F-idn : {x : 𝒞 .ob} → F-hom (𝒞 .idn x) ≡ 𝒟 .idn (F-ob x)
F-seq : {x y z : 𝒞 .ob} (f : 𝒞 .hom x y) (g : 𝒞 .hom y z) → F-hom (𝒞 .seq f g) ≡ 𝒟 .seq (F-hom f) (F-hom g)
is-full = (x y : _) (F[f] : 𝒟 .hom (F-ob x) (F-ob y)) → ∥ Σ (𝒞 .hom x y) (λ f → F-hom f ≡ F[f]) ∥
is-faithful = (x y : _) (f g : 𝒞 .hom x y) → F-hom f ≡ F-hom g → f ≡ g
|
\documentclass[journal]{IEEEtran}
\usepackage{graphicx}
\graphicspath{ {../report/} }
\usepackage{hyperref}
\ifCLASSINFOpdf
\else
\fi
\hyphenation{op-tical net-works semi-conduc-tor}
\begin{document}
\title{GPU Optimized Pedestrian Detection}
\author{Sam Kreter}
\markboth{High Performance Computing}
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for IEEE Journals}
\maketitle
\begin{abstract}
The OpenCV library has a good CPU and GPU implementation of a Histogram of Oriented Gradients as well as an Support Vector Machine. This paper will go through the comparisons of the two implementations for finding and identify objects on different size images. It will also look into the effect that the number of threads per block has on the final computation time of the GPU implementation.
\end{abstract}
\begin{IEEEkeywords}
HOG (Histogram of Oriented Gradients), SVM (Support Vector Machine), OpenCV, GPU, Cuda
\end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction}
\IEEEPARstart{P}{edestrian} detection is a main component in many applications such as crowd control systems and people tracking. One common way of implementing a people detection system is using a Histogram of Oriented Gradients to extract the features from an image and an Support Vector Machine to classify each element of the image as a person or not. HOG works by segmenting the image in many windows then creating a histogram of the orientations or directions of each cell of the segment. The histograms are then concatenated together to create a feature vector of floating points. The SVM then takes the feature vector to classify the objects found in the image. \\
\section{Implementation}
In order to implement the Pedestrian detection system, it is necessary to have an implementation of a Histogram of Oriented Gradients and a SVM that is trained on the HOG features of a person in different walking positions. The openCV library comes with both a standard CPU implementation and a accelerated GPU implementation of both the HOG feature extraction as well as the pre trained SVM. \\
\subsection{CPU Implementation}
For OpenCV’s CPU implementation, the HOG feature extractor is abstracted into an object where simple function calls are used to set the different properties and load the image. The object itself comes with a built in function “getDefaultPeopleDetector()” that will return OpenCV’s pre trained SVM which then is passed into a simple “setSVMDetector()” to set the SVM for the object. Then with one call to a detectMultiScale function the openCV will compute the HOG for the image and return the specifications such as the location width and height for the detected objects.
From an interface perspective the GPU implementation works almost the same as the CPU implementations. It is abstracted into an object but instead of instantiating the object, a function call will return a pointer to it. The rest of the implementation follows the CPU except for before the detection is a function call to “upload” the image to the GPU which is an abstraction for the cudamemcpy. \\
\subsection{GPU Implementation}
The GPU implementation under the hood make use of three kernel functions that run on the GPU, one to compute the gradients, one to compute the histogram and one to classify the histogram. Due to the abstraction that OpenCV uses around the Cuda code for the GPU kernels, it was necessary to build the OpenCV library from source and recompile the code base with changes to the number of threads per block passed to the kernel on the GPU.
The library’s original code had 256 hard coded in for the number of threads per block in the compute gradients and classify histogram kernels. The compute histogram uses the number of cells in the histogram as a base for the number of threads per block. \\
After compiling and installing the modified library I used put together the both the CPU and GPU implementations in order to build the pedestrian Detector.
\section{Experiments}
In order to get quantitative results for the speed up that comes with the GPU, I used a simple image of 4 people walking. I then timed booth the CPU and GPU implementations’ computation times. I ran this 5 times for each image sizes 550x366 , 1100x732 , 2200x1464 , 2750x1830 , 5500x3660 as well as re compiling the OpenCV library for each of the threads per block sizes 1, 32, 128, 256, 512, 1024. I then took the average of the 5 runs for each combinations to get a more stable final results. I found that the GPU had the greatest speed up time of 4 times faster than the CPU for 128 threads per block and an image size of 2750x1830 or 5500x3660. \\
\begin{figure}[h]
\caption{Size of the Image vs the Computation Time. All GPU kernels are being called with 128 threads per block.}
\includegraphics[width=9cm]{SizeTime}
\label{fig:SizeTime}
\end{figure}
Figure \ref{fig:SizeTime} shows the increase speed up of the GPU implementation as the size of the image is increased. The figure also shows that as the image size increases, the CPU implementation’s computation time grows at a much faster rate than the GPU implantation does. \\
\begin{figure}[h]
\caption{Number of threads per block vs the Computation Time. It is hard to tell with the scaling that the gpu times continually decrease until 128 threads per block then slowly increase. Image size of 2750x1830 is used for all tests.}
\includegraphics[width=9cm]{ThreadTime}
\label{fig:ThreadTime}
\end{figure}
Figure \ref{fig:ThreadTime} shows that at 1 thread per block the GPU performs much worse than the CPU implementation. This is due to the slow down of having to move the data from the main memory to the GPU’s memory and then the retrieval of the data. After 1 thread per block the GPU takes advantage and hits its maximum speed up at 128 then very slowly starts to have a slower speed up as more threads per block are added. This is due to the overhead of the idle threads per block that do not have work to do on the image.
\section{Challenges}
One of the biggest challenges I faced was the compilation of the OpenCV library with Cuda capabilities from source. This was not the first time building and installing libraries from source but it turned out to be very challenging to have the correct build flags set to compile for the specific GPU software on the AWS Nvidia AMI that I used for deployment. It took many iterations of rebuilding with different flags and installing dependencies to finally get a working version that could be modified rebuild and reinstalled. \\
Anther challenge I faced was dealing with the AWS Nvidia AMI. None of the Cuda examples were installed with the SDK. One of the biggest problems I faced with the AMI was that I would repeatedly get an error “(-217) all CUDA-capable devices are busy or unavailable in function allocate” and would have to completely stop the machine then start it in order to have the proper connection with the GPUs. \\
A final challenge was with the differences between OpenCV 2 and 3’s implementation of Cuda abstraction. In order to make the final program I went through a lot of the source code to find how to use the interfaces for the GPU. \\
\section{Conclusion}
OpenCV shows that for a GPU pedestrian detection system using HOG and the default pre trained SVM should be around 8 times faster than the CPU implementation which can be seen in figure \ref{fig:opencvSpeed}. My final results show that at the fastest point which is using 128 threads per block on and image larger than 2750x1830 I would only have the GPU implementation at 4 times that of the CPU implementation. \\
\begin{figure}[h]
\caption{OpenCV's chart for GPU speed up compared to CPU.}
\includegraphics[width=9cm]{opencvSpeed}
\label{fig:opencvSpeed}
\end{figure}
I also found it interesting that after 1 thread per block the number of threads per block had a very small impact on the processing speed of the GPU. One reason for this is that calculating the grid size and number of blocks based off of the threads per block helps eliminate idle threads.
\ifCLASSOPTIONcaptionsoff
\newpage
\fi
\begin{thebibliography}{1}
\bibitem{OpenCVCuda}
\emph{OpenCV Cuda Comparison} \url{http://opencv.org/platforms/cuda.html}
\bibitem{OpenCVHog}
Tulloch, Andrew. \emph{OpenCV Cuda HOG Docs} \url{http://docs.opencv.org/3.0-beta/modules/cuda/doc/object_detection.html}
\end{thebibliography}
\end{document} |
OFFICIAL WEBSITE®. Vista Alegre Hostel in Alcudia, Majorca.
The Vista Alegre Hostel By Eurotels is an affordable option to enjoy some pleasant holidays in Alcudia. It’s situated on the beachfront at the Alcudia Port and has views of the bay and the Alcudiamar Nautical Club as well as ocean view rooms with balconies and free Wi-Fi Internet access throughout the hotel. Discover our Vista Alegre Hostel at the Alcudia Port now! |
REBOL [
Title: "Red/System compilation error test"
File: %comp-err-test.r
License: "BSD-3 - https://github.com/dockimbel/Red/blob/master/BSD-3-License.txt"
]
change-dir %../ ;; revert to tests/ dir (from runnable)
~~~start-file~~~ "comp-err"
--test-- "sample compilation error test"
--compile-this {
Red/System []
i := 1;
}
--assert parse qt/comp-output [
thru "*** Compilation Error: undefined symbol"
thru "at line: 3"
thru "near: [" thru "i := 1" thru "]"
to end
]
--clean
--test-- "error line reporting test"
--compile-this {
Red/System []
foo: func [][
if true [
either true [
a
][
123
]
]
]
}
--assert parse qt/comp-output [
thru "*** Compilation Error: undefined symbol: a"
thru "at line: 6"
to end
]
--clean
~~~end-file~~~
|
-- Andreas, 2014-09-23
-- Syntax declaration for overloaded constructor.
-- {-# OPTIONS -v scope.operators:50 #-}
syntax c x = ⟦ x ⟧
data D1 : Set where
c : D1
data D2 : Set where
c : D1 → D2
test : D2
test = ⟦ c ⟧
-- Should work.
|
module JS.Nullable
import Data.List.Elem
import JS.Inheritance
import JS.Marshall
import JS.Util
export
data Nullable : Type -> Type where [external]
%foreign "javascript:lambda:()=>null"
prim__null : AnyPtr
export
null : Nullable a
null = believe_me prim__null
export
nonNull : a -> Nullable a
nonNull = believe_me
||| Tests, whether a value of questionable origin is null
export
isNull : a -> Bool
isNull = eqv prim__null
export
maybeToNullable : Maybe a -> Nullable a
maybeToNullable = maybe null nonNull
export
mayUp : (0 _ : JSType a) => Maybe a -> {auto 0 _ : Elem b (Types a)} -> Nullable b
mayUp x = maybe null (\v => nonNull $ up v) x
export
nullableToMaybe : Nullable a -> Maybe a
nullableToMaybe v = if isNull v then Nothing else Just (believe_me v)
export
ToFFI a b => ToFFI (Maybe a) (Nullable b) where
toFFI = maybeToNullable . map toFFI
export
FromFFI a b => FromFFI (Maybe a) (Nullable b) where
fromFFI v = case nullableToMaybe v of
Nothing => Just Nothing
Just x => map Just $ fromFFI x
export
SafeCast a => SafeCast (Nullable a) where
safeCast ptr = if isNull ptr
then Just null
else map nonNull $ safeCast ptr
|
lemma space_empty: "space M = {} \<Longrightarrow> M = count_space {}" |
Suppose $S$ is an open set in the complex plane, and $\mathcal{F}$ is a sequence of holomorphic functions on $S$ such that the range of $\mathcal{F}$ is a bounded set. Then there exists a subsequence $\mathcal{F}'$ of $\mathcal{F}$ and a holomorphic function $g$ on $S$ such that $\mathcal{F}'$ converges uniformly to $g$ on every compact subset of $S$. |
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Ipc_R
imports Finalise_R
begin
context begin interpretation Arch . (*FIXME: arch_split*)
lemmas lookup_slot_wrapper_defs'[simp] =
lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def
lemma getMessageInfo_corres: "corres ((=) \<circ> message_info_map)
(tcb_at t) (tcb_at' t)
(get_message_info t) (getMessageInfo t)"
apply (rule corres_guard_imp)
apply (unfold get_message_info_def getMessageInfo_def fun_app_def)
apply (simp add: ARM_H.msgInfoRegister_def
ARM.msgInfoRegister_def ARM_A.msg_info_register_def)
apply (rule corres_split_eqr[OF asUser_getRegister_corres])
apply (rule corres_trivial, simp add: message_info_from_data_eqv)
apply (wp | simp)+
done
lemma get_mi_inv'[wp]: "\<lbrace>I\<rbrace> getMessageInfo a \<lbrace>\<lambda>x. I\<rbrace>"
by (simp add: getMessageInfo_def, wp)
definition
"get_send_cap_relation rv rv' \<equiv>
(case rv of Some (c, cptr) \<Rightarrow> (\<exists>c' cptr'. rv' = Some (c', cptr') \<and>
cte_map cptr = cptr' \<and>
cap_relation c c')
| None \<Rightarrow> rv' = None)"
lemma cap_relation_mask:
"\<lbrakk> cap_relation c c'; msk' = rights_mask_map msk \<rbrakk> \<Longrightarrow>
cap_relation (mask_cap msk c) (maskCapRights msk' c')"
by simp
lemma lsfco_cte_at':
"\<lbrace>valid_objs' and valid_cap' cap\<rbrace>
lookupSlotForCNodeOp f cap idx depth
\<lbrace>\<lambda>rv. cte_at' rv\<rbrace>, -"
apply (simp add: lookupSlotForCNodeOp_def)
apply (rule conjI)
prefer 2
apply clarsimp
apply (wp)
apply (clarsimp simp: split_def unlessE_def
split del: if_split)
apply (wp hoare_drop_imps throwE_R)
done
declare unifyFailure_wp [wp]
(* FIXME: move *)
lemma unifyFailure_wp_E [wp]:
"\<lbrace>P\<rbrace> f -, \<lbrace>\<lambda>_. E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> unifyFailure f -, \<lbrace>\<lambda>_. E\<rbrace>"
unfolding validE_E_def
by (erule unifyFailure_wp)+
(* FIXME: move *)
lemma unifyFailure_wp2 [wp]:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. Q\<rbrace>"
shows "\<lbrace>P\<rbrace> unifyFailure f \<lbrace>\<lambda>_. Q\<rbrace>"
by (wp x, simp)
definition
ct_relation :: "captransfer \<Rightarrow> cap_transfer \<Rightarrow> bool"
where
"ct_relation ct ct' \<equiv>
ct_receive_root ct = to_bl (ctReceiveRoot ct')
\<and> ct_receive_index ct = to_bl (ctReceiveIndex ct')
\<and> ctReceiveDepth ct' = unat (ct_receive_depth ct)"
(* MOVE *)
lemma valid_ipc_buffer_ptr_aligned_2:
"\<lbrakk>valid_ipc_buffer_ptr' a s; is_aligned y 2 \<rbrakk> \<Longrightarrow> is_aligned (a + y) 2"
unfolding valid_ipc_buffer_ptr'_def
apply clarsimp
apply (erule (1) aligned_add_aligned)
apply (simp add: msg_align_bits)
done
(* MOVE *)
lemma valid_ipc_buffer_ptr'D2:
"\<lbrakk>valid_ipc_buffer_ptr' a s; y < max_ipc_words * 4; is_aligned y 2\<rbrakk> \<Longrightarrow> typ_at' UserDataT (a + y && ~~ mask pageBits) s"
unfolding valid_ipc_buffer_ptr'_def
apply clarsimp
apply (subgoal_tac "(a + y) && ~~ mask pageBits = a && ~~ mask pageBits")
apply simp
apply (rule mask_out_first_mask_some [where n = msg_align_bits])
apply (erule is_aligned_add_helper [THEN conjunct2])
apply (erule order_less_le_trans)
apply (simp add: msg_align_bits max_ipc_words )
apply simp
done
lemma loadCapTransfer_corres:
"corres ct_relation \<top> (valid_ipc_buffer_ptr' buffer) (load_cap_transfer buffer) (loadCapTransfer buffer)"
apply (simp add: load_cap_transfer_def loadCapTransfer_def
captransfer_from_words_def
capTransferDataSize_def capTransferFromWords_def
msgExtraCapBits_def word_size add.commute add.left_commute
msg_max_length_def msg_max_extra_caps_def word_size_def
msgMaxLength_def msgMaxExtraCaps_def msgLengthBits_def wordSize_def wordBits_def
del: upt.simps)
apply (rule corres_guard_imp)
apply (rule corres_split[OF load_word_corres])
apply (rule corres_split[OF load_word_corres])
apply (rule corres_split[OF load_word_corres])
apply (rule_tac P=\<top> and P'=\<top> in corres_inst)
apply (clarsimp simp: ct_relation_def)
apply (wp no_irq_loadWord)+
apply simp
apply (simp add: conj_comms)
apply safe
apply (erule valid_ipc_buffer_ptr_aligned_2, simp add: is_aligned_def)+
apply (erule valid_ipc_buffer_ptr'D2, simp add: max_ipc_words, simp add: is_aligned_def)+
done
lemma getReceiveSlots_corres:
"corres (\<lambda>xs ys. ys = map cte_map xs)
(tcb_at receiver and valid_objs and pspace_aligned)
(tcb_at' receiver and valid_objs' and pspace_aligned' and pspace_distinct' and
case_option \<top> valid_ipc_buffer_ptr' recv_buf)
(get_receive_slots receiver recv_buf)
(getReceiveSlots receiver recv_buf)"
apply (cases recv_buf)
apply (simp add: getReceiveSlots_def)
apply (simp add: getReceiveSlots_def split_def)
apply (rule corres_guard_imp)
apply (rule corres_split[OF loadCapTransfer_corres])
apply (rule corres_empty_on_failure)
apply (rule corres_splitEE)
apply (rule corres_unify_failure)
apply (rule lookup_cap_corres)
apply (simp add: ct_relation_def)
apply simp
apply (rule corres_splitEE)
apply (rule corres_unify_failure)
apply (simp add: ct_relation_def)
apply (erule lookupSlotForCNodeOp_corres [OF _ refl])
apply simp
apply (simp add: split_def liftE_bindE unlessE_whenE)
apply (rule corres_split[OF get_cap_corres])
apply (rule corres_split_norE)
apply (rule corres_whenE)
apply (case_tac cap, auto)[1]
apply (rule corres_trivial, simp)
apply simp
apply (rule corres_trivial, simp add: returnOk_def)
apply (wp lookup_cap_valid lookup_cap_valid' lsfco_cte_at | simp)+
done
lemma get_recv_slot_inv'[wp]:
"\<lbrace> P \<rbrace> getReceiveSlots receiver buf \<lbrace>\<lambda>rv'. P \<rbrace>"
apply (case_tac buf)
apply (simp add: getReceiveSlots_def)
apply (simp add: getReceiveSlots_def
split_def unlessE_def)
apply (wp | simp)+
done
lemma get_rs_cte_at'[wp]:
"\<lbrace>\<top>\<rbrace>
getReceiveSlots receiver recv_buf
\<lbrace>\<lambda>rv s. \<forall>x \<in> set rv. cte_wp_at' (\<lambda>c. cteCap c = capability.NullCap) x s\<rbrace>"
apply (cases recv_buf)
apply (simp add: getReceiveSlots_def)
apply (wp,simp)
apply (clarsimp simp add: getReceiveSlots_def
split_def whenE_def unlessE_whenE)
apply wp
apply simp
apply (rule getCTE_wp)
apply (simp add: cte_wp_at_ctes_of cong: conj_cong)
apply wp+
apply simp
done
lemma get_rs_real_cte_at'[wp]:
"\<lbrace>valid_objs'\<rbrace>
getReceiveSlots receiver recv_buf
\<lbrace>\<lambda>rv s. \<forall>x \<in> set rv. real_cte_at' x s\<rbrace>"
apply (cases recv_buf)
apply (simp add: getReceiveSlots_def)
apply (wp,simp)
apply (clarsimp simp add: getReceiveSlots_def
split_def whenE_def unlessE_whenE)
apply wp
apply simp
apply (wp hoare_drop_imps)[1]
apply simp
apply (wp lookup_cap_valid')+
apply simp
done
declare word_div_1 [simp]
declare word_minus_one_le [simp]
declare word32_minus_one_le [simp]
lemma loadWordUser_corres':
"\<lbrakk> y < unat max_ipc_words; y' = of_nat y * 4 \<rbrakk> \<Longrightarrow>
corres (=) \<top> (valid_ipc_buffer_ptr' a) (load_word_offs a y) (loadWordUser (a + y'))"
apply simp
apply (erule loadWordUser_corres)
done
declare loadWordUser_inv [wp]
lemma getExtraCptrs_inv[wp]:
"\<lbrace>P\<rbrace> getExtraCPtrs buf mi \<lbrace>\<lambda>rv. P\<rbrace>"
apply (cases mi, cases buf, simp_all add: getExtraCPtrs_def)
apply (wp dmo_inv' mapM_wp' loadWord_inv)
done
lemma badge_derived_mask [simp]:
"badge_derived' (maskCapRights R c) c' = badge_derived' c c'"
by (simp add: badge_derived'_def)
declare derived'_not_Null [simp]
lemma maskCapRights_vsCapRef[simp]:
"vsCapRef (maskCapRights msk cap) = vsCapRef cap"
unfolding vsCapRef_def
apply (cases cap, simp_all add: maskCapRights_def isCap_simps Let_def)
apply (rename_tac arch_capability)
apply (case_tac arch_capability;
simp add: maskCapRights_def ARM_H.maskCapRights_def isCap_simps Let_def)
done
lemma corres_set_extra_badge:
"b' = b \<Longrightarrow>
corres dc (in_user_frame buffer)
(valid_ipc_buffer_ptr' buffer and
(\<lambda>_. msg_max_length + 2 + n < unat max_ipc_words))
(set_extra_badge buffer b n) (setExtraBadge buffer b' n)"
apply (rule corres_gen_asm2)
apply (drule storeWordUser_corres [where a=buffer and w=b])
apply (simp add: set_extra_badge_def setExtraBadge_def buffer_cptr_index_def
bufferCPtrOffset_def Let_def)
apply (simp add: word_size word_size_def wordSize_def wordBits_def
bufferCPtrOffset_def buffer_cptr_index_def msgMaxLength_def
msg_max_length_def msgLengthBits_def store_word_offs_def
add.commute add.left_commute)
done
crunch typ_at': setExtraBadge "\<lambda>s. P (typ_at' T p s)"
lemmas setExtraBadge_typ_ats' [wp] = typ_at_lifts [OF setExtraBadge_typ_at']
crunch valid_pspace' [wp]: setExtraBadge valid_pspace'
crunch cte_wp_at' [wp]: setExtraBadge "cte_wp_at' P p"
crunch ipc_buffer' [wp]: setExtraBadge "valid_ipc_buffer_ptr' buffer"
crunch inv'[wp]: getExtraCPtr P (wp: dmo_inv' loadWord_inv)
lemmas unifyFailure_discard2
= corres_injection[OF id_injection unifyFailure_injection, simplified]
lemma deriveCap_not_null:
"\<lbrace>\<top>\<rbrace> deriveCap slot cap \<lbrace>\<lambda>rv. K (rv \<noteq> NullCap \<longrightarrow> cap \<noteq> NullCap)\<rbrace>,-"
apply (simp add: deriveCap_def split del: if_split)
apply (case_tac cap)
apply (simp_all add: Let_def isCap_simps)
apply wp
apply simp
done
lemma deriveCap_derived_foo:
"\<lbrace>\<lambda>s. \<forall>cap'. (cte_wp_at' (\<lambda>cte. badge_derived' cap (cteCap cte)
\<and> capASID cap = capASID (cteCap cte) \<and> cap_asid_base' cap = cap_asid_base' (cteCap cte)
\<and> cap_vptr' cap = cap_vptr' (cteCap cte)) slot s
\<and> valid_objs' s \<and> cap' \<noteq> NullCap \<longrightarrow> cte_wp_at' (is_derived' (ctes_of s) slot cap' \<circ> cteCap) slot s)
\<and> (cte_wp_at' (untyped_derived_eq cap \<circ> cteCap) slot s
\<longrightarrow> cte_wp_at' (untyped_derived_eq cap' \<circ> cteCap) slot s)
\<and> (s \<turnstile>' cap \<longrightarrow> s \<turnstile>' cap') \<and> (cap' \<noteq> NullCap \<longrightarrow> cap \<noteq> NullCap) \<longrightarrow> Q cap' s\<rbrace>
deriveCap slot cap \<lbrace>Q\<rbrace>,-"
using deriveCap_derived[where slot=slot and c'=cap] deriveCap_valid[where slot=slot and c=cap]
deriveCap_untyped_derived[where slot=slot and c'=cap] deriveCap_not_null[where slot=slot and cap=cap]
apply (clarsimp simp: validE_R_def validE_def valid_def split: sum.split)
apply (frule in_inv_by_hoareD[OF deriveCap_inv])
apply (clarsimp simp: o_def)
apply (drule spec, erule mp)
apply safe
apply fastforce
apply (drule spec, drule(1) mp)
apply fastforce
apply (drule spec, drule(1) mp)
apply fastforce
apply (drule spec, drule(1) bspec, simp)
done
lemma valid_mdb_untyped_incD':
"valid_mdb' s \<Longrightarrow> untyped_inc' (ctes_of s)"
by (simp add: valid_mdb'_def valid_mdb_ctes_def)
lemma cteInsert_cte_wp_at:
"\<lbrace>\<lambda>s. cte_wp_at' (\<lambda>c. is_derived' (ctes_of s) src cap (cteCap c)) src s
\<and> valid_mdb' s \<and> valid_objs' s
\<and> (if p = dest then P cap
else cte_wp_at' (\<lambda>c. P (maskedAsFull (cteCap c) cap)) p s)\<rbrace>
cteInsert cap src dest
\<lbrace>\<lambda>uu. cte_wp_at' (\<lambda>c. P (cteCap c)) p\<rbrace>"
apply (simp add: cteInsert_def)
apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp
| clarsimp simp: comp_def
| unfold setUntypedCapAsFull_def)+
apply (drule cte_at_cte_wp_atD)
apply (elim exE)
apply (rule_tac x=cte in exI)
apply clarsimp
apply (drule cte_at_cte_wp_atD)
apply (elim exE)
apply (rule_tac x=ctea in exI)
apply clarsimp
apply (cases "p=dest")
apply (clarsimp simp: cte_wp_at'_def)
apply (cases "p=src")
apply clarsimp
apply (intro conjI impI)
apply ((clarsimp simp: cte_wp_at'_def maskedAsFull_def split: if_split_asm)+)[2]
apply clarsimp
apply (rule conjI)
apply (clarsimp simp: maskedAsFull_def cte_wp_at_ctes_of split:if_split_asm)
apply (erule disjE) prefer 2 apply simp
apply (clarsimp simp: is_derived'_def isCap_simps)
apply (drule valid_mdb_untyped_incD')
apply (case_tac cte, case_tac cteb, clarsimp)
apply (drule untyped_incD', (simp add: isCap_simps)+)
apply (frule(1) ctes_of_valid'[where p = p])
apply (clarsimp simp:valid_cap'_def capAligned_def split:if_splits)
apply (drule_tac y ="of_nat fb" in word_plus_mono_right[OF _ is_aligned_no_overflow',rotated])
apply simp+
apply (rule word_of_nat_less)
apply simp
apply (simp add:p_assoc_help)
apply (simp add: max_free_index_def)
apply (clarsimp simp: maskedAsFull_def is_derived'_def badge_derived'_def
isCap_simps capMasterCap_def cte_wp_at_ctes_of
split: if_split_asm capability.splits)
done
lemma cteInsert_weak_cte_wp_at3:
assumes imp:"\<And>c. P c \<Longrightarrow> \<not> isUntypedCap c"
shows " \<lbrace>\<lambda>s. if p = dest then P cap
else cte_wp_at' (\<lambda>c. P (cteCap c)) p s\<rbrace>
cteInsert cap src dest
\<lbrace>\<lambda>uu. cte_wp_at' (\<lambda>c. P (cteCap c)) p\<rbrace>"
by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp
| clarsimp simp: comp_def cteInsert_def
| unfold setUntypedCapAsFull_def
| auto simp: cte_wp_at'_def dest!: imp)+
lemma maskedAsFull_null_cap[simp]:
"(maskedAsFull x y = capability.NullCap) = (x = capability.NullCap)"
"(capability.NullCap = maskedAsFull x y) = (x = capability.NullCap)"
by (case_tac x, auto simp:maskedAsFull_def isCap_simps )
lemma maskCapRights_eq_null:
"(RetypeDecls_H.maskCapRights r xa = capability.NullCap) =
(xa = capability.NullCap)"
apply (cases xa; simp add: maskCapRights_def isCap_simps)
apply (rename_tac arch_capability)
apply (case_tac arch_capability)
apply (simp_all add: ARM_H.maskCapRights_def isCap_simps)
done
lemma cte_refs'_maskedAsFull[simp]:
"cte_refs' (maskedAsFull a b) = cte_refs' a"
apply (rule ext)+
apply (case_tac a)
apply (clarsimp simp:maskedAsFull_def isCap_simps)+
done
lemma transferCapsToSlots_corres:
"\<lbrakk> list_all2 (\<lambda>(cap, slot) (cap', slot'). cap_relation cap cap'
\<and> slot' = cte_map slot) caps caps';
mi' = message_info_map mi \<rbrakk> \<Longrightarrow>
corres ((=) \<circ> message_info_map)
(\<lambda>s. valid_objs s \<and> pspace_aligned s \<and> pspace_distinct s \<and> valid_mdb s
\<and> valid_list s
\<and> (case ep of Some x \<Rightarrow> ep_at x s | _ \<Rightarrow> True)
\<and> (\<forall>x \<in> set slots. cte_wp_at (\<lambda>cap. cap = cap.NullCap) x s \<and>
real_cte_at x s)
\<and> (\<forall>(cap, slot) \<in> set caps. valid_cap cap s \<and>
cte_wp_at (\<lambda>cp'. (cap \<noteq> cap.NullCap \<longrightarrow> cp'\<noteq>cap \<longrightarrow> cp' = masked_as_full cap cap )) slot s )
\<and> distinct slots
\<and> in_user_frame buffer s)
(\<lambda>s. valid_pspace' s
\<and> (case ep of Some x \<Rightarrow> ep_at' x s | _ \<Rightarrow> True)
\<and> (\<forall>x \<in> set (map cte_map slots).
cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) x s
\<and> real_cte_at' x s)
\<and> distinct (map cte_map slots)
\<and> valid_ipc_buffer_ptr' buffer s
\<and> (\<forall>(cap, slot) \<in> set caps'. valid_cap' cap s \<and>
cte_wp_at' (\<lambda>cte. cap \<noteq> NullCap \<longrightarrow> cteCap cte \<noteq> cap \<longrightarrow> cteCap cte = maskedAsFull cap cap) slot s)
\<and> 2 + msg_max_length + n + length caps' < unat max_ipc_words)
(transfer_caps_loop ep buffer n caps slots mi)
(transferCapsToSlots ep buffer n caps'
(map cte_map slots) mi')"
(is "\<lbrakk> list_all2 ?P caps caps'; ?v \<rbrakk> \<Longrightarrow> ?corres")
proof (induct caps caps' arbitrary: slots n mi mi' rule: list_all2_induct)
case Nil
show ?case using Nil.prems by (case_tac mi, simp)
next
case (Cons x xs y ys slots n mi mi')
note if_weak_cong[cong] if_cong [cong del]
assume P: "?P x y"
show ?case using Cons.prems P
apply (clarsimp split del: if_split)
apply (simp add: Let_def split_def word_size liftE_bindE
word_bits_conv[symmetric] split del: if_split)
apply (rule corres_const_on_failure)
apply (simp add: dc_def[symmetric] split del: if_split)
apply (rule corres_guard_imp)
apply (rule corres_if2)
apply (case_tac "fst x", auto simp add: isCap_simps)[1]
apply (rule corres_split[OF corres_set_extra_badge])
apply (clarsimp simp: is_cap_simps)
apply (drule conjunct1)
apply simp
apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1]
apply (case_tac mi, simp)
apply (simp add: split_def)
apply (wp hoare_vcg_const_Ball_lift)
apply (subgoal_tac "obj_ref_of (fst x) = capEPPtr (fst y)")
prefer 2
apply (clarsimp simp: is_cap_simps)
apply (simp add: split_def)
apply (wp hoare_vcg_const_Ball_lift)
apply (rule_tac P="slots = []" and Q="slots \<noteq> []" in corres_disj_division)
apply simp
apply (rule corres_trivial, simp add: returnOk_def)
apply (case_tac mi, simp)
apply (simp add: list_case_If2 split del: if_split)
apply (rule corres_splitEE)
apply (rule unifyFailure_discard2)
apply (case_tac mi, clarsimp)
apply (rule deriveCap_corres)
apply (simp add: remove_rights_def)
apply clarsimp
apply (rule corres_split_norE)
apply (rule corres_whenE)
apply (case_tac cap', auto)[1]
apply (rule corres_trivial, simp)
apply (case_tac mi, simp)
apply simp
apply (simp add: liftE_bindE)
apply (rule corres_split_nor)
apply (rule cteInsert_corres, simp_all add: hd_map)[1]
apply (simp add: tl_map)
apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1]
apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift
hoare_vcg_const_Ball_lift cap_insert_weak_cte_wp_at)
apply (wp hoare_vcg_const_Ball_lift | simp add:split_def del: imp_disj1)+
apply (wp cap_insert_cte_wp_at)
apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift
cteInsert_valid_pspace
| simp add: split_def)+
apply (wp cteInsert_weak_cte_wp_at hoare_valid_ipc_buffer_ptr_typ_at')+
apply (wpsimp wp: hoare_vcg_const_Ball_lift cteInsert_cte_wp_at valid_case_option_post_wp
simp: split_def)
apply (unfold whenE_def)
apply wp+
apply (clarsimp simp: conj_comms ball_conj_distrib split del: if_split)
apply (rule_tac Q' ="\<lambda>cap' s. (cap'\<noteq> cap.NullCap \<longrightarrow>
cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s
\<and> QM s cap')" for QM
in hoare_post_imp_R)
prefer 2
apply clarsimp
apply assumption
apply (subst imp_conjR)
apply (rule hoare_vcg_conj_liftE_R)
apply (rule derive_cap_is_derived)
apply (wp derive_cap_is_derived_foo)+
apply (simp split del: if_split)
apply (rule_tac Q' ="\<lambda>cap' s. (cap'\<noteq> capability.NullCap \<longrightarrow>
cte_wp_at' (\<lambda>c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s
\<and> QM s cap')" for QM
in hoare_post_imp_R)
prefer 2
apply clarsimp
apply assumption
apply (subst imp_conjR)
apply (rule hoare_vcg_conj_liftE_R)
apply (rule hoare_post_imp_R[OF deriveCap_derived])
apply (clarsimp simp:cte_wp_at_ctes_of)
apply (wp deriveCap_derived_foo)
apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def
real_cte_tcb_valid if_apply_def2
split del: if_split)
apply (rule conjI, (clarsimp split del: if_split)+)
apply (clarsimp simp:conj_comms split del:if_split)
apply (intro conjI allI)
apply (clarsimp split:if_splits)
apply (case_tac "cap = fst x",simp+)
apply (clarsimp simp:masked_as_full_def is_cap_simps cap_master_cap_simps)
apply (clarsimp split del: if_split)
apply (intro conjI)
apply (clarsimp simp:neq_Nil_conv)
apply (drule hd_in_set)
apply (drule(1) bspec)
apply (clarsimp split:if_split_asm)
apply (fastforce simp:neq_Nil_conv)
apply (intro ballI conjI)
apply (clarsimp simp:neq_Nil_conv)
apply (intro impI)
apply (drule(1) bspec[OF _ subsetD[rotated]])
apply (clarsimp simp:neq_Nil_conv)
apply (clarsimp split:if_splits)
apply clarsimp
apply (intro conjI)
apply (drule(1) bspec,clarsimp)+
subgoal for \<dots> aa _ _ capa
by (case_tac "capa = aa"; clarsimp split:if_splits simp:masked_as_full_def is_cap_simps)
apply (case_tac "isEndpointCap (fst y) \<and> capEPPtr (fst y) = the ep \<and> (\<exists>y. ep = Some y)")
apply (clarsimp simp:conj_comms split del:if_split)
apply (subst if_not_P)
apply clarsimp
apply (clarsimp simp:valid_pspace'_def cte_wp_at_ctes_of split del:if_split)
apply (intro conjI)
apply (case_tac "cteCap cte = fst y",clarsimp simp: badge_derived'_def)
apply (clarsimp simp: maskCapRights_eq_null maskedAsFull_def badge_derived'_def isCap_simps
split: if_split_asm)
apply (clarsimp split del: if_split)
apply (case_tac "fst y = capability.NullCap")
apply (clarsimp simp: neq_Nil_conv split del: if_split)+
apply (intro allI impI conjI)
apply (clarsimp split:if_splits)
apply (clarsimp simp:image_def)+
apply (thin_tac "\<forall>x\<in>set ys. Q x" for Q)
apply (drule(1) bspec)+
apply clarsimp+
apply (drule(1) bspec)
apply (rule conjI)
apply clarsimp+
apply (case_tac "cteCap cteb = ab")
by (clarsimp simp: isCap_simps maskedAsFull_def split:if_splits)+
qed
declare constOnFailure_wp [wp]
lemma transferCapsToSlots_pres1[crunch_rules]:
assumes x: "\<And>cap src dest. \<lbrace>P\<rbrace> cteInsert cap src dest \<lbrace>\<lambda>rv. P\<rbrace>"
assumes eb: "\<And>b n. \<lbrace>P\<rbrace> setExtraBadge buffer b n \<lbrace>\<lambda>_. P\<rbrace>"
shows "\<lbrace>P\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. P\<rbrace>"
apply (induct caps arbitrary: slots n mi)
apply simp
apply (simp add: Let_def split_def whenE_def
cong: if_cong list.case_cong
split del: if_split)
apply (rule hoare_pre)
apply (wp x eb | assumption | simp split del: if_split | wpc
| wp (once) hoare_drop_imps)+
done
lemma cteInsert_cte_cap_to':
"\<lbrace>ex_cte_cap_to' p and cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) dest\<rbrace>
cteInsert cap src dest
\<lbrace>\<lambda>rv. ex_cte_cap_to' p\<rbrace>"
apply (simp add: ex_cte_cap_to'_def)
apply (rule hoare_pre)
apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState])
apply (clarsimp simp:cteInsert_def)
apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases
setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp)
apply (clarsimp simp:cte_wp_at_ctes_of)
apply (rule_tac x = "cref" in exI)
apply (rule conjI)
apply clarsimp+
done
declare maskCapRights_eq_null[simp]
crunch ex_cte_cap_wp_to' [wp]: setExtraBadge "ex_cte_cap_wp_to' P p"
(rule: ex_cte_cap_to'_pres)
crunch valid_objs' [wp]: setExtraBadge valid_objs'
crunch aligned' [wp]: setExtraBadge pspace_aligned'
crunch distinct' [wp]: setExtraBadge pspace_distinct'
lemma cteInsert_assume_Null:
"\<lbrace>P\<rbrace> cteInsert cap src dest \<lbrace>Q\<rbrace> \<Longrightarrow>
\<lbrace>\<lambda>s. cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) dest s \<longrightarrow> P s\<rbrace>
cteInsert cap src dest
\<lbrace>Q\<rbrace>"
apply (rule hoare_name_pre_state)
apply (erule impCE)
apply (simp add: cteInsert_def)
apply (rule hoare_seq_ext[OF _ getCTE_sp])+
apply (rule hoare_name_pre_state)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (erule hoare_pre(1))
apply simp
done
crunch mdb'[wp]: setExtraBadge valid_mdb'
lemma cteInsert_weak_cte_wp_at2:
assumes weak:"\<And>c cap. P (maskedAsFull c cap) = P c"
shows
"\<lbrace>\<lambda>s. if p = dest then P cap else cte_wp_at' (\<lambda>c. P (cteCap c)) p s\<rbrace>
cteInsert cap src dest
\<lbrace>\<lambda>uu. cte_wp_at' (\<lambda>c. P (cteCap c)) p\<rbrace>"
apply (rule hoare_pre)
apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState])
apply (clarsimp simp:cteInsert_def)
apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases
setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp)
apply (clarsimp simp:cte_wp_at_ctes_of weak)
apply auto
done
lemma transferCapsToSlots_presM:
assumes x: "\<And>cap src dest. \<lbrace>\<lambda>s. P s \<and> (emx \<longrightarrow> cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) dest s \<and> ex_cte_cap_to' dest s)
\<and> (vo \<longrightarrow> valid_objs' s \<and> valid_cap' cap s \<and> real_cte_at' dest s)
\<and> (drv \<longrightarrow> cte_wp_at' (is_derived' (ctes_of s) src cap \<circ> cteCap) src s
\<and> cte_wp_at' (untyped_derived_eq cap o cteCap) src s
\<and> valid_mdb' s)
\<and> (pad \<longrightarrow> pspace_aligned' s \<and> pspace_distinct' s)\<rbrace>
cteInsert cap src dest \<lbrace>\<lambda>rv. P\<rbrace>"
assumes eb: "\<And>b n. \<lbrace>P\<rbrace> setExtraBadge buffer b n \<lbrace>\<lambda>_. P\<rbrace>"
shows "\<lbrace>\<lambda>s. P s
\<and> (emx \<longrightarrow> (\<forall>x \<in> set slots. ex_cte_cap_to' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) x s) \<and> distinct slots)
\<and> (vo \<longrightarrow> valid_objs' s \<and> (\<forall>x \<in> set slots. real_cte_at' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) x s)
\<and> (\<forall>x \<in> set caps. s \<turnstile>' fst x ) \<and> distinct slots)
\<and> (pad \<longrightarrow> pspace_aligned' s \<and> pspace_distinct' s)
\<and> (drv \<longrightarrow> vo \<and> pspace_aligned' s \<and> pspace_distinct' s \<and> valid_mdb' s
\<and> length slots \<le> 1
\<and> (\<forall>x \<in> set caps. s \<turnstile>' fst x \<and> (slots \<noteq> []
\<longrightarrow> cte_wp_at' (\<lambda>cte. fst x \<noteq> NullCap \<longrightarrow> cteCap cte = fst x) (snd x) s)))\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. P\<rbrace>"
apply (induct caps arbitrary: slots n mi)
apply (simp, wp, simp)
apply (simp add: Let_def split_def whenE_def
cong: if_cong list.case_cong split del: if_split)
apply (rule hoare_pre)
apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift
| assumption | wpc)+
apply (rule cteInsert_assume_Null)
apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp)
apply (rule cteInsert_weak_cte_wp_at2,clarsimp)
apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+
apply (rule cteInsert_weak_cte_wp_at2,clarsimp)
apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp
deriveCap_derived_foo)+
apply (thin_tac "\<And>slots. PROP P slots" for P)
apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def
real_cte_tcb_valid if_apply_def2
split del: if_split)
apply (rule conjI)
apply (clarsimp simp:cte_wp_at_ctes_of untyped_derived_eq_def)
apply (intro conjI allI)
apply (clarsimp simp:Fun.comp_def cte_wp_at_ctes_of)+
apply (clarsimp simp:valid_capAligned)
done
lemmas transferCapsToSlots_pres2
= transferCapsToSlots_presM[where vo=False and emx=True
and drv=False and pad=False, simplified]
lemma transferCapsToSlots_aligned'[wp]:
"\<lbrace>pspace_aligned'\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. pspace_aligned'\<rbrace>"
by (wp transferCapsToSlots_pres1)
lemma transferCapsToSlots_distinct'[wp]:
"\<lbrace>pspace_distinct'\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. pspace_distinct'\<rbrace>"
by (wp transferCapsToSlots_pres1)
lemma transferCapsToSlots_typ_at'[wp]:
"\<lbrace>\<lambda>s. P (typ_at' T p s)\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv s. P (typ_at' T p s)\<rbrace>"
by (wp transferCapsToSlots_pres1 setExtraBadge_typ_at')
lemma transferCapsToSlots_valid_objs[wp]:
"\<lbrace>valid_objs' and valid_mdb' and (\<lambda>s. \<forall>x \<in> set slots. real_cte_at' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
and (\<lambda>s. \<forall>x \<in> set caps. s \<turnstile>' fst x) and K(distinct slots)\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_objs'\<rbrace>"
apply (rule hoare_pre)
apply (rule transferCapsToSlots_presM[where vo=True and emx=False and drv=False and pad=False])
apply (wp | simp)+
done
abbreviation(input)
"transferCaps_srcs caps s \<equiv> \<forall>x\<in>set caps. cte_wp_at' (\<lambda>cte. fst x \<noteq> NullCap \<longrightarrow> cteCap cte = fst x) (snd x) s"
lemma transferCapsToSlots_mdb[wp]:
"\<lbrace>\<lambda>s. valid_pspace' s \<and> distinct slots
\<and> length slots \<le> 1
\<and> (\<forall>x \<in> set slots. ex_cte_cap_to' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
\<and> (\<forall>x \<in> set slots. real_cte_at' x s)
\<and> transferCaps_srcs caps s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_mdb'\<rbrace>"
apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True])
apply clarsimp
apply (frule valid_capAligned)
apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def)
apply wp
apply (clarsimp simp: valid_pspace'_def)
apply (clarsimp simp:cte_wp_at_ctes_of)
apply (drule(1) bspec,clarify)
apply (case_tac cte)
apply (clarsimp dest!:ctes_of_valid_cap' split:if_splits)
apply (fastforce simp:valid_cap'_def)
done
crunch no_0' [wp]: setExtraBadge no_0_obj'
lemma transferCapsToSlots_no_0_obj' [wp]:
"\<lbrace>no_0_obj'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. no_0_obj'\<rbrace>"
by (wp transferCapsToSlots_pres1)
lemma transferCapsToSlots_vp[wp]:
"\<lbrace>\<lambda>s. valid_pspace' s \<and> distinct slots
\<and> length slots \<le> 1
\<and> (\<forall>x \<in> set slots. ex_cte_cap_to' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
\<and> (\<forall>x \<in> set slots. real_cte_at' x s)
\<and> transferCaps_srcs caps s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_pspace'\<rbrace>"
apply (rule hoare_pre)
apply (simp add: valid_pspace'_def | wp)+
apply (fastforce simp: cte_wp_at_ctes_of dest: ctes_of_valid')
done
crunches setExtraBadge, doIPCTransfer
for sch_act [wp]: "\<lambda>s. P (ksSchedulerAction s)"
(wp: crunch_wps mapME_wp' simp: zipWithM_x_mapM)
crunches setExtraBadge
for pred_tcb_at' [wp]: "\<lambda>s. pred_tcb_at' proj P p s"
and ksCurThread[wp]: "\<lambda>s. P (ksCurThread s)"
and ksCurDomain[wp]: "\<lambda>s. P (ksCurDomain s)"
and obj_at' [wp]: "\<lambda>s. P' (obj_at' P p s)"
and queues [wp]: "\<lambda>s. P (ksReadyQueues s)"
and queuesL1 [wp]: "\<lambda>s. P (ksReadyQueuesL1Bitmap s)"
and queuesL2 [wp]: "\<lambda>s. P (ksReadyQueuesL2Bitmap s)"
lemma tcts_sch_act[wp]:
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1)
lemma tcts_vq[wp]:
"\<lbrace>Invariants_H.valid_queues\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
by (wp valid_queues_lift transferCapsToSlots_pres1)
lemma tcts_vq'[wp]:
"\<lbrace>valid_queues'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
by (wp valid_queues_lift' transferCapsToSlots_pres1)
crunch state_refs_of' [wp]: setExtraBadge "\<lambda>s. P (state_refs_of' s)"
lemma tcts_state_refs_of'[wp]:
"\<lbrace>\<lambda>s. P (state_refs_of' s)\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv s. P (state_refs_of' s)\<rbrace>"
by (wp transferCapsToSlots_pres1)
crunch if_live' [wp]: setExtraBadge if_live_then_nonz_cap'
lemma tcts_iflive[wp]:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap' s \<and> distinct slots \<and>
(\<forall>x\<in>set slots.
ex_cte_cap_to' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. if_live_then_nonz_cap'\<rbrace>"
by (wp transferCapsToSlots_pres2 | simp)+
crunch if_unsafe' [wp]: setExtraBadge if_unsafe_then_cap'
lemma tcts_ifunsafe[wp]:
"\<lbrace>\<lambda>s. if_unsafe_then_cap' s \<and> distinct slots \<and>
(\<forall>x\<in>set slots. cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s \<and>
ex_cte_cap_to' x s)\<rbrace> transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. if_unsafe_then_cap'\<rbrace>"
by (wp transferCapsToSlots_pres2 | simp)+
crunch it[wp]: ensureNoChildren "\<lambda>s. P (ksIdleThread s)"
crunch idle'[wp]: deriveCap "valid_idle'"
crunch valid_idle' [wp]: setExtraBadge valid_idle'
lemma tcts_idle'[wp]:
"\<lbrace>\<lambda>s. valid_idle' s\<rbrace> transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_idle'\<rbrace>"
apply (rule hoare_pre)
apply (wp transferCapsToSlots_pres1)
apply simp
done
lemma tcts_ct[wp]:
"\<lbrace>cur_tcb'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. cur_tcb'\<rbrace>"
by (wp transferCapsToSlots_pres1 cur_tcb_lift)
crunch valid_arch_state' [wp]: setExtraBadge valid_arch_state'
lemma transferCapsToSlots_valid_arch [wp]:
"\<lbrace>valid_arch_state'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. valid_arch_state'\<rbrace>"
by (rule transferCapsToSlots_pres1; wp)
crunch valid_global_refs' [wp]: setExtraBadge valid_global_refs'
lemma transferCapsToSlots_valid_globals [wp]:
"\<lbrace>valid_global_refs' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned' and K (distinct slots)
and K (length slots \<le> 1)
and (\<lambda>s. \<forall>x \<in> set slots. real_cte_at' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
and transferCaps_srcs caps\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_global_refs'\<rbrace>"
apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=True] | clarsimp)+
apply (clarsimp simp:cte_wp_at_ctes_of)
apply (drule(1) bspec,clarsimp)
apply (case_tac cte,clarsimp)
apply (frule(1) CSpace_I.ctes_of_valid_cap')
apply (fastforce simp:valid_cap'_def)
done
crunch irq_node' [wp]: setExtraBadge "\<lambda>s. P (irq_node' s)"
lemma transferCapsToSlots_irq_node'[wp]:
"\<lbrace>\<lambda>s. P (irq_node' s)\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv s. P (irq_node' s)\<rbrace>"
by (wp transferCapsToSlots_pres1)
lemma valid_irq_handlers_ctes_ofD:
"\<lbrakk> ctes_of s p = Some cte; cteCap cte = IRQHandlerCap irq; valid_irq_handlers' s \<rbrakk>
\<Longrightarrow> irq_issued' irq s"
by (auto simp: valid_irq_handlers'_def cteCaps_of_def ran_def)
crunch valid_irq_handlers' [wp]: setExtraBadge valid_irq_handlers'
lemma transferCapsToSlots_irq_handlers[wp]:
"\<lbrace>valid_irq_handlers' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned'
and K(distinct slots \<and> length slots \<le> 1)
and (\<lambda>s. \<forall>x \<in> set slots. real_cte_at' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
and transferCaps_srcs caps\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. valid_irq_handlers'\<rbrace>"
apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False])
apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def)
apply (erule(2) valid_irq_handlers_ctes_ofD)
apply wp
apply (clarsimp simp:cte_wp_at_ctes_of | intro ballI conjI)+
apply (drule(1) bspec,clarsimp)
apply (case_tac cte,clarsimp)
apply (frule(1) CSpace_I.ctes_of_valid_cap')
apply (fastforce simp:valid_cap'_def)
done
crunch irq_state' [wp]: setExtraBadge "\<lambda>s. P (ksInterruptState s)"
lemma setExtraBadge_irq_states'[wp]:
"\<lbrace>valid_irq_states'\<rbrace> setExtraBadge buffer b n \<lbrace>\<lambda>_. valid_irq_states'\<rbrace>"
apply (wp valid_irq_states_lift')
apply (simp add: setExtraBadge_def storeWordUser_def)
apply (wpsimp wp: no_irq dmo_lift' no_irq_storeWord)
apply assumption
done
lemma transferCapsToSlots_irq_states' [wp]:
"\<lbrace>valid_irq_states'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>_. valid_irq_states'\<rbrace>"
by (wp transferCapsToSlots_pres1)
crunch valid_pde_mappings' [wp]: setExtraBadge valid_pde_mappings'
lemma transferCapsToSlots_pde_mappings'[wp]:
"\<lbrace>valid_pde_mappings'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. valid_pde_mappings'\<rbrace>"
by (wp transferCapsToSlots_pres1)
lemma transferCapsToSlots_irqs_masked'[wp]:
"\<lbrace>irqs_masked'\<rbrace> transferCapsToSlots ep buffer n caps slots mi \<lbrace>\<lambda>rv. irqs_masked'\<rbrace>"
by (wp transferCapsToSlots_pres1 irqs_masked_lift)
lemma storeWordUser_vms'[wp]:
"\<lbrace>valid_machine_state'\<rbrace> storeWordUser a w \<lbrace>\<lambda>_. valid_machine_state'\<rbrace>"
proof -
have aligned_offset_ignore:
"\<And>(l::word32) (p::word32) sz. l<4 \<Longrightarrow> p && mask 2 = 0 \<Longrightarrow>
p+l && ~~ mask pageBits = p && ~~ mask pageBits"
proof -
fix l p sz
assume al: "(p::word32) && mask 2 = 0"
assume "(l::word32) < 4" hence less: "l<2^2" by simp
have le: "2 \<le> pageBits" by (simp add: pageBits_def)
show "?thesis l p sz"
by (rule is_aligned_add_helper[simplified is_aligned_mask,
THEN conjunct2, THEN mask_out_first_mask_some,
where n=2, OF al less le])
qed
show ?thesis
apply (simp add: valid_machine_state'_def storeWordUser_def
doMachineOp_def split_def)
apply wp
apply clarsimp
apply (drule use_valid)
apply (rule_tac x=p in storeWord_um_inv, simp+)
apply (drule_tac x=p in spec)
apply (erule disjE, simp_all)
apply (erule conjE)
apply (erule disjE, simp)
apply (simp add: pointerInUserData_def word_size)
apply (subgoal_tac "a && ~~ mask pageBits = p && ~~ mask pageBits", simp)
apply (simp only: is_aligned_mask[of _ 2])
apply (elim disjE, simp_all)
apply (rule aligned_offset_ignore[symmetric], simp+)+
done
qed
lemma setExtraBadge_vms'[wp]:
"\<lbrace>valid_machine_state'\<rbrace> setExtraBadge buffer b n \<lbrace>\<lambda>_. valid_machine_state'\<rbrace>"
by (simp add: setExtraBadge_def) wp
lemma transferCapsToSlots_vms[wp]:
"\<lbrace>\<lambda>s. valid_machine_state' s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>_ s. valid_machine_state' s\<rbrace>"
by (wp transferCapsToSlots_pres1)
crunches setExtraBadge, transferCapsToSlots
for pspace_domain_valid[wp]: "pspace_domain_valid"
crunch ct_not_inQ[wp]: setExtraBadge "ct_not_inQ"
lemma tcts_ct_not_inQ[wp]:
"\<lbrace>ct_not_inQ\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>_. ct_not_inQ\<rbrace>"
by (wp transferCapsToSlots_pres1)
crunch gsUntypedZeroRanges[wp]: setExtraBadge "\<lambda>s. P (gsUntypedZeroRanges s)"
crunch ctes_of[wp]: setExtraBadge "\<lambda>s. P (ctes_of s)"
lemma tcts_zero_ranges[wp]:
"\<lbrace>\<lambda>s. untyped_ranges_zero' s \<and> valid_pspace' s \<and> distinct slots
\<and> (\<forall>x \<in> set slots. ex_cte_cap_to' x s \<and> cte_wp_at' (\<lambda>cte. cteCap cte = capability.NullCap) x s)
\<and> (\<forall>x \<in> set slots. real_cte_at' x s)
\<and> length slots \<le> 1
\<and> transferCaps_srcs caps s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. untyped_ranges_zero'\<rbrace>"
apply (wp transferCapsToSlots_presM[where emx=True and vo=True
and drv=True and pad=True])
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (simp add: cteCaps_of_def)
apply (rule hoare_pre, wp untyped_ranges_zero_lift)
apply (simp add: o_def)
apply (clarsimp simp: valid_pspace'_def ball_conj_distrib[symmetric])
apply (drule(1) bspec)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (case_tac cte, clarsimp)
apply (frule(1) ctes_of_valid_cap')
apply auto[1]
done
crunch ct_idle_or_in_cur_domain'[wp]: setExtraBadge ct_idle_or_in_cur_domain'
crunch ct_idle_or_in_cur_domain'[wp]: transferCapsToSlots ct_idle_or_in_cur_domain'
crunch ksCurDomain[wp]: transferCapsToSlots "\<lambda>s. P (ksCurDomain s)"
crunch ksDomSchedule[wp]: setExtraBadge "\<lambda>s. P (ksDomSchedule s)"
crunch ksDomScheduleIdx[wp]: setExtraBadge "\<lambda>s. P (ksDomScheduleIdx s)"
crunch ksDomSchedule[wp]: transferCapsToSlots "\<lambda>s. P (ksDomSchedule s)"
crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\<lambda>s. P (ksDomScheduleIdx s)"
lemma transferCapsToSlots_invs[wp]:
"\<lbrace>\<lambda>s. invs' s \<and> distinct slots
\<and> (\<forall>x \<in> set slots. cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) x s)
\<and> (\<forall>x \<in> set slots. ex_cte_cap_to' x s)
\<and> (\<forall>x \<in> set slots. real_cte_at' x s)
\<and> length slots \<le> 1
\<and> transferCaps_srcs caps s\<rbrace>
transferCapsToSlots ep buffer n caps slots mi
\<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: invs'_def valid_state'_def)
apply (wp valid_irq_node_lift)
apply fastforce
done
lemma grs_distinct'[wp]:
"\<lbrace>\<top>\<rbrace> getReceiveSlots t buf \<lbrace>\<lambda>rv s. distinct rv\<rbrace>"
apply (cases buf, simp_all add: getReceiveSlots_def
split_def unlessE_def)
apply (wp, simp)
apply (wp | simp only: distinct.simps list.simps empty_iff)+
apply simp
done
lemma transferCaps_corres:
"\<lbrakk> info' = message_info_map info;
list_all2 (\<lambda>x y. cap_relation (fst x) (fst y) \<and> snd y = cte_map (snd x))
caps caps' \<rbrakk>
\<Longrightarrow>
corres ((=) \<circ> message_info_map)
(tcb_at receiver and valid_objs and
pspace_aligned and pspace_distinct and valid_mdb
and valid_list
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at x s | _ \<Rightarrow> True)
and case_option \<top> in_user_frame recv_buf
and (\<lambda>s. valid_message_info info)
and transfer_caps_srcs caps)
(tcb_at' receiver and valid_objs' and
pspace_aligned' and pspace_distinct' and no_0_obj' and valid_mdb'
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at' x s | _ \<Rightarrow> True)
and case_option \<top> valid_ipc_buffer_ptr' recv_buf
and transferCaps_srcs caps'
and (\<lambda>s. length caps' \<le> msgMaxExtraCaps))
(transfer_caps info caps ep receiver recv_buf)
(transferCaps info' caps' ep receiver recv_buf)"
apply (simp add: transfer_caps_def transferCaps_def
getThreadCSpaceRoot)
apply (rule corres_assume_pre)
apply (rule corres_guard_imp)
apply (rule corres_split[OF getReceiveSlots_corres])
apply (rule_tac x=recv_buf in option_corres)
apply (rule_tac P=\<top> and P'=\<top> in corres_inst)
apply (case_tac info, simp)
apply simp
apply (rule corres_rel_imp, rule transferCapsToSlots_corres,
simp_all add: split_def)[1]
apply (case_tac info, simp)
apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp
| simp only: ball_conj_distrib)+
apply (simp add: cte_map_def tcb_cnode_index_def split_def)
apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2
split_def
cong: option.case_cong)
apply (drule(1) bspec)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (frule(1) Invariants_AI.caps_of_state_valid)
apply (fastforce simp:valid_cap_def)
apply (cases info)
apply (clarsimp simp: msg_max_extra_caps_def valid_message_info_def
max_ipc_words msg_max_length_def
msgMaxExtraCaps_def msgExtraCapBits_def
shiftL_nat valid_pspace'_def)
apply (drule(1) bspec)
apply (clarsimp simp:cte_wp_at_ctes_of)
apply (case_tac cte,clarsimp)
apply (frule(1) ctes_of_valid_cap')
apply (fastforce simp:valid_cap'_def)
done
crunch typ_at'[wp]: transferCaps "\<lambda>s. P (typ_at' T p s)"
lemmas transferCaps_typ_ats[wp] = typ_at_lifts [OF transferCaps_typ_at']
lemma isIRQControlCap_mask [simp]:
"isIRQControlCap (maskCapRights R c) = isIRQControlCap c"
apply (case_tac c)
apply (clarsimp simp: isCap_simps maskCapRights_def Let_def)+
apply (rename_tac arch_capability)
apply (case_tac arch_capability)
apply (clarsimp simp: isCap_simps ARM_H.maskCapRights_def
maskCapRights_def Let_def)+
done
lemma isPageCap_maskCapRights[simp]:
" isArchCap isPageCap (RetypeDecls_H.maskCapRights R c) = isArchCap isPageCap c"
apply (case_tac c; simp add: isCap_simps isArchCap_def maskCapRights_def)
apply (rename_tac arch_capability)
apply (case_tac arch_capability; simp add: isCap_simps ARM_H.maskCapRights_def)
done
lemma capReplyMaster_mask[simp]:
"isReplyCap c \<Longrightarrow> capReplyMaster (maskCapRights R c) = capReplyMaster c"
by (clarsimp simp: isCap_simps maskCapRights_def)
lemma is_derived_mask' [simp]:
"is_derived' m p (maskCapRights R c) = is_derived' m p c"
apply (rule ext)
apply (simp add: is_derived'_def badge_derived'_def)
done
lemma updateCapData_ordering:
"\<lbrakk> (x, capBadge cap) \<in> capBadge_ordering P; updateCapData p d cap \<noteq> NullCap \<rbrakk>
\<Longrightarrow> (x, capBadge (updateCapData p d cap)) \<in> capBadge_ordering P"
apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def
capBadge_def ARM_H.updateCapData_def
split: if_split_asm)
apply fastforce+
done
lemma lookup_cap_to'[wp]:
"\<lbrace>\<top>\<rbrace> lookupCap t cref \<lbrace>\<lambda>rv s. \<forall>r\<in>cte_refs' rv (irq_node' s). ex_cte_cap_to' r s\<rbrace>,-"
by (simp add: lookupCap_def lookupCapAndSlot_def | wp)+
lemma grs_cap_to'[wp]:
"\<lbrace>\<top>\<rbrace> getReceiveSlots t buf \<lbrace>\<lambda>rv s. \<forall>x \<in> set rv. ex_cte_cap_to' x s\<rbrace>"
apply (cases buf; simp add: getReceiveSlots_def split_def unlessE_def)
apply (wp, simp)
apply (wp | simp | rule hoare_drop_imps)+
done
lemma grs_length'[wp]:
"\<lbrace>\<lambda>s. 1 \<le> n\<rbrace> getReceiveSlots receiver recv_buf \<lbrace>\<lambda>rv s. length rv \<le> n\<rbrace>"
apply (simp add: getReceiveSlots_def split_def unlessE_def)
apply (rule hoare_pre)
apply (wp | wpc | simp)+
done
lemma transferCaps_invs' [wp]:
"\<lbrace>invs' and transferCaps_srcs caps\<rbrace>
transferCaps mi caps ep receiver recv_buf
\<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: transferCaps_def Let_def split_def)
apply (wp get_rs_cte_at' hoare_vcg_const_Ball_lift
| wpcw | clarsimp)+
done
lemma get_mrs_inv'[wp]:
"\<lbrace>P\<rbrace> getMRs t buf info \<lbrace>\<lambda>rv. P\<rbrace>"
by (simp add: getMRs_def load_word_offs_def getRegister_def
| wp dmo_inv' loadWord_inv mapM_wp'
asUser_inv det_mapM[where S=UNIV] | wpc)+
lemma copyMRs_typ_at':
"\<lbrace>\<lambda>s. P (typ_at' T p s)\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv s. P (typ_at' T p s)\<rbrace>"
by (simp add: copyMRs_def | wp mapM_wp [where S=UNIV, simplified] | wpc)+
lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at']
lemma copy_mrs_invs'[wp]:
"\<lbrace> invs' and tcb_at' s and tcb_at' r \<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. invs' \<rbrace>"
including no_pre
apply (simp add: copyMRs_def)
apply (wp dmo_invs' no_irq_mapM no_irq_storeWord|
simp add: split_def)
apply (case_tac sb, simp_all)[1]
apply wp+
apply (case_tac rb, simp_all)[1]
apply (wp mapM_wp dmo_invs' no_irq_mapM no_irq_storeWord no_irq_loadWord)
apply blast
apply (rule hoare_strengthen_post)
apply (rule mapM_wp)
apply (wp | simp | blast)+
done
crunch aligned'[wp]: transferCaps pspace_aligned'
(wp: crunch_wps simp: zipWithM_x_mapM)
crunch distinct'[wp]: transferCaps pspace_distinct'
(wp: crunch_wps simp: zipWithM_x_mapM)
crunch aligned'[wp]: setMRs pspace_aligned'
(wp: crunch_wps simp: crunch_simps)
crunch distinct'[wp]: setMRs pspace_distinct'
(wp: crunch_wps simp: crunch_simps)
crunch aligned'[wp]: copyMRs pspace_aligned'
(wp: crunch_wps simp: crunch_simps wp: crunch_wps)
crunch distinct'[wp]: copyMRs pspace_distinct'
(wp: crunch_wps simp: crunch_simps wp: crunch_wps)
crunch aligned'[wp]: setMessageInfo pspace_aligned'
(wp: crunch_wps simp: crunch_simps)
crunch distinct'[wp]: setMessageInfo pspace_distinct'
(wp: crunch_wps simp: crunch_simps)
crunch valid_objs'[wp]: storeWordUser valid_objs'
crunch valid_pspace'[wp]: storeWordUser valid_pspace'
lemma set_mrs_valid_objs' [wp]:
"\<lbrace>valid_objs'\<rbrace> setMRs t a msgs \<lbrace>\<lambda>rv. valid_objs'\<rbrace>"
apply (simp add: setMRs_def zipWithM_x_mapM split_def)
apply (wp asUser_valid_objs crunch_wps)
done
crunch valid_objs'[wp]: copyMRs valid_objs'
(wp: crunch_wps simp: crunch_simps)
crunch valid_queues'[wp]: asUser "Invariants_H.valid_queues'"
(simp: crunch_simps wp: hoare_drop_imps)
lemma setMRs_invs_bits[wp]:
"\<lbrace>valid_pspace'\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. valid_pspace'\<rbrace>"
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s\<rbrace>
setMRs t buf mrs \<lbrace>\<lambda>rv s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>
setMRs t buf mrs \<lbrace>\<lambda>rv s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
"\<lbrace>Invariants_H.valid_queues\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
"\<lbrace>valid_queues'\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
"\<lbrace>\<lambda>s. P (state_refs_of' s)\<rbrace>
setMRs t buf mrs
\<lbrace>\<lambda>rv s. P (state_refs_of' s)\<rbrace>"
"\<lbrace>if_live_then_nonz_cap'\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. if_live_then_nonz_cap'\<rbrace>"
"\<lbrace>ex_nonz_cap_to' p\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. ex_nonz_cap_to' p\<rbrace>"
"\<lbrace>cur_tcb'\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. cur_tcb'\<rbrace>"
"\<lbrace>if_unsafe_then_cap'\<rbrace> setMRs t buf mrs \<lbrace>\<lambda>rv. if_unsafe_then_cap'\<rbrace>"
by (simp add: setMRs_def zipWithM_x_mapM split_def storeWordUser_def | wp crunch_wps)+
crunch no_0_obj'[wp]: setMRs no_0_obj'
(wp: crunch_wps simp: crunch_simps)
lemma copyMRs_invs_bits[wp]:
"\<lbrace>valid_pspace'\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. valid_pspace'\<rbrace>"
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s\<rbrace> copyMRs s sb r rb n
\<lbrace>\<lambda>rv s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
"\<lbrace>Invariants_H.valid_queues\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
"\<lbrace>valid_queues'\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
"\<lbrace>\<lambda>s. P (state_refs_of' s)\<rbrace>
copyMRs s sb r rb n
\<lbrace>\<lambda>rv s. P (state_refs_of' s)\<rbrace>"
"\<lbrace>if_live_then_nonz_cap'\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. if_live_then_nonz_cap'\<rbrace>"
"\<lbrace>ex_nonz_cap_to' p\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. ex_nonz_cap_to' p\<rbrace>"
"\<lbrace>cur_tcb'\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. cur_tcb'\<rbrace>"
"\<lbrace>if_unsafe_then_cap'\<rbrace> copyMRs s sb r rb n \<lbrace>\<lambda>rv. if_unsafe_then_cap'\<rbrace>"
by (simp add: copyMRs_def storeWordUser_def | wp mapM_wp' | wpc)+
crunch no_0_obj'[wp]: copyMRs no_0_obj'
(wp: crunch_wps simp: crunch_simps)
lemma mi_map_length[simp]: "msgLength (message_info_map mi) = mi_length mi"
by (cases mi, simp)
crunch cte_wp_at'[wp]: copyMRs "cte_wp_at' P p"
(wp: crunch_wps)
lemma lookupExtraCaps_srcs[wp]:
"\<lbrace>\<top>\<rbrace> lookupExtraCaps thread buf info \<lbrace>transferCaps_srcs\<rbrace>,-"
apply (simp add: lookupExtraCaps_def lookupCapAndSlot_def
split_def lookupSlotForThread_def
getSlotCap_def)
apply (wp mapME_set[where R=\<top>] getCTE_wp')
apply (rule_tac P=\<top> in hoare_trivE_R)
apply (simp add: cte_wp_at_ctes_of)
apply (wp | simp)+
done
crunch inv[wp]: lookupExtraCaps "P"
(wp: crunch_wps mapME_wp' simp: crunch_simps)
lemma invs_mdb_strengthen':
"invs' s \<longrightarrow> valid_mdb' s" by auto
lemma lookupExtraCaps_length:
"\<lbrace>\<lambda>s. unat (msgExtraCaps mi) \<le> n\<rbrace> lookupExtraCaps thread send_buf mi \<lbrace>\<lambda>rv s. length rv \<le> n\<rbrace>,-"
apply (simp add: lookupExtraCaps_def getExtraCPtrs_def)
apply (rule hoare_pre)
apply (wp mapME_length | wpc)+
apply (clarsimp simp: upto_enum_step_def Suc_unat_diff_1 word_le_sub1)
done
lemma getMessageInfo_msgExtraCaps[wp]:
"\<lbrace>\<top>\<rbrace> getMessageInfo t \<lbrace>\<lambda>rv s. unat (msgExtraCaps rv) \<le> msgMaxExtraCaps\<rbrace>"
apply (simp add: getMessageInfo_def)
apply wp
apply (simp add: messageInfoFromWord_def Let_def msgMaxExtraCaps_def
shiftL_nat)
apply (subst nat_le_Suc_less_imp)
apply (rule unat_less_power)
apply (simp add: word_bits_def msgExtraCapBits_def)
apply (rule and_mask_less'[unfolded mask_2pm1])
apply (simp add: msgExtraCapBits_def)
apply wpsimp+
done
lemma lookupCapAndSlot_corres:
"cptr = to_bl cptr' \<Longrightarrow>
corres (lfr \<oplus> (\<lambda>a b. cap_relation (fst a) (fst b) \<and> snd b = cte_map (snd a)))
(valid_objs and pspace_aligned and tcb_at thread)
(valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread)
(lookup_cap_and_slot thread cptr) (lookupCapAndSlot thread cptr')"
unfolding lookup_cap_and_slot_def lookupCapAndSlot_def
apply (simp add: liftE_bindE split_def)
apply (rule corres_guard_imp)
apply (rule_tac r'="\<lambda>rv rv'. rv' = cte_map (fst rv)"
in corres_splitEE)
apply (rule corres_rel_imp, rule lookupSlotForThread_corres)
apply (simp add: split_def)
apply (rule corres_split[OF getSlotCap_corres])
apply simp
apply (rule corres_returnOkTT, simp)
apply wp+
apply (wp | simp add: liftE_bindE[symmetric])+
done
lemma lookupExtraCaps_corres:
"\<lbrakk> info' = message_info_map info; buffer = buffer'\<rbrakk> \<Longrightarrow>
corres (fr \<oplus> list_all2 (\<lambda>x y. cap_relation (fst x) (fst y) \<and> snd y = cte_map (snd x)))
(valid_objs and pspace_aligned and tcb_at thread and (\<lambda>_. valid_message_info info))
(valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread
and case_option \<top> valid_ipc_buffer_ptr' buffer')
(lookup_extra_caps thread buffer info) (lookupExtraCaps thread buffer' info')"
unfolding lookupExtraCaps_def lookup_extra_caps_def
apply (rule corres_gen_asm)
apply (cases "mi_extra_caps info = 0")
apply (cases info)
apply (simp add: Let_def returnOk_def getExtraCPtrs_def
liftE_bindE upto_enum_step_def mapM_def
sequence_def doMachineOp_return mapME_Nil
split: option.split)
apply (cases info)
apply (rename_tac w1 w2 w3 w4)
apply (simp add: Let_def liftE_bindE)
apply (cases buffer')
apply (simp add: getExtraCPtrs_def mapME_Nil)
apply (rule corres_returnOk)
apply simp
apply (simp add: msgLengthBits_def msgMaxLength_def word_size field_simps
getExtraCPtrs_def upto_enum_step_def upto_enum_word
word_size_def msg_max_length_def liftM_def
Suc_unat_diff_1 word_le_sub1 mapM_map_simp
upt_lhs_sub_map[where x=buffer_cptr_index]
wordSize_def wordBits_def
del: upt.simps)
apply (rule corres_guard_imp)
apply (rule corres_underlying_split)
apply (rule_tac S = "\<lambda>x y. x = y \<and> x < unat w2"
in corres_mapM_list_all2
[where Q = "\<lambda>_. valid_objs and pspace_aligned and tcb_at thread" and r = "(=)"
and Q' = "\<lambda>_. valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' thread
and case_option \<top> valid_ipc_buffer_ptr' buffer'" and r'="(=)" ])
apply simp
apply simp
apply simp
apply (rule corres_guard_imp)
apply (rule loadWordUser_corres')
apply (clarsimp simp: buffer_cptr_index_def msg_max_length_def
max_ipc_words valid_message_info_def
msg_max_extra_caps_def word_le_nat_alt)
apply (simp add: buffer_cptr_index_def msg_max_length_def)
apply simp
apply simp
apply (simp add: load_word_offs_word_def)
apply (wp | simp)+
apply (subst list_all2_same)
apply (clarsimp simp: max_ipc_words field_simps)
apply (simp add: mapME_def, fold mapME_def)[1]
apply (rule corres_mapME [where S = Id and r'="(\<lambda>x y. cap_relation (fst x) (fst y) \<and> snd y = cte_map (snd x))"])
apply simp
apply simp
apply simp
apply (rule corres_cap_fault [OF lookupCapAndSlot_corres])
apply simp
apply simp
apply (wp | simp)+
apply (simp add: set_zip_same Int_lower1)
apply (wp mapM_wp [OF _ subset_refl] | simp)+
done
crunch ctes_of[wp]: copyMRs "\<lambda>s. P (ctes_of s)"
(wp: threadSet_ctes_of crunch_wps)
lemma copyMRs_valid_mdb[wp]:
"\<lbrace>valid_mdb'\<rbrace> copyMRs t buf t' buf' n \<lbrace>\<lambda>rv. valid_mdb'\<rbrace>"
by (simp add: valid_mdb'_def copyMRs_ctes_of)
lemma doNormalTransfer_corres:
"corres dc
(tcb_at sender and tcb_at receiver and (pspace_aligned:: det_state \<Rightarrow> bool)
and valid_objs and cur_tcb and valid_mdb and valid_list and pspace_distinct
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at x s | _ \<Rightarrow> True)
and case_option \<top> in_user_frame send_buf
and case_option \<top> in_user_frame recv_buf)
(tcb_at' sender and tcb_at' receiver and valid_objs'
and pspace_aligned' and pspace_distinct' and cur_tcb'
and valid_mdb' and no_0_obj'
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at' x s | _ \<Rightarrow> True)
and case_option \<top> valid_ipc_buffer_ptr' send_buf
and case_option \<top> valid_ipc_buffer_ptr' recv_buf)
(do_normal_transfer sender send_buf ep badge can_grant receiver recv_buf)
(doNormalTransfer sender send_buf ep badge can_grant receiver recv_buf)"
apply (simp add: do_normal_transfer_def doNormalTransfer_def)
apply (rule corres_guard_imp)
apply (rule corres_split_mapr[OF getMessageInfo_corres])
apply (rule_tac F="valid_message_info mi" in corres_gen_asm)
apply (rule_tac r'="list_all2 (\<lambda>x y. cap_relation (fst x) (fst y) \<and> snd y = cte_map (snd x))"
in corres_split)
apply (rule corres_if[OF refl])
apply (rule corres_split_catch)
apply (rule lookupExtraCaps_corres; simp)
apply (rule corres_trivial, simp)
apply wp+
apply (rule corres_trivial, simp)
apply simp
apply (rule corres_split_eqr[OF copyMRs_corres])
apply (rule corres_split)
apply (rule transferCaps_corres; simp)
apply (rename_tac mi' mi'')
apply (rule_tac F="mi_label mi' = mi_label mi"
in corres_gen_asm)
apply (rule corres_split_nor[OF setMessageInfo_corres])
apply (case_tac mi', clarsimp)
apply (simp add: badge_register_def badgeRegister_def)
apply (fold dc_def)
apply (rule asUser_setRegister_corres)
apply wp
apply simp+
apply ((wp valid_case_option_post_wp hoare_vcg_const_Ball_lift
hoare_case_option_wp
hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at'
hoare_vcg_const_Ball_lift lookupExtraCaps_length
| simp add: if_apply_def2)+)
apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+
apply clarsimp
apply auto
done
lemma corres_liftE_lift:
"corres r1 P P' m m' \<Longrightarrow>
corres (f1 \<oplus> r1) P P' (liftE m) (withoutFailure m')"
by simp
lemmas corres_ipc_thread_helper =
corres_split_eqrE[OF corres_liftE_lift [OF getCurThread_corres]]
lemmas corres_ipc_info_helper =
corres_split_maprE [where f = message_info_map, OF _
corres_liftE_lift [OF getMessageInfo_corres]]
crunch typ_at'[wp]: doNormalTransfer "\<lambda>s. P (typ_at' T p s)"
lemmas doNormal_lifts[wp] = typ_at_lifts [OF doNormalTransfer_typ_at']
lemma doNormal_invs'[wp]:
"\<lbrace>tcb_at' sender and tcb_at' receiver and invs'\<rbrace>
doNormalTransfer sender send_buf ep badge
can_grant receiver recv_buf \<lbrace>\<lambda>r. invs'\<rbrace>"
apply (simp add: doNormalTransfer_def)
apply (wp hoare_vcg_const_Ball_lift | simp)+
done
crunch aligned'[wp]: doNormalTransfer pspace_aligned'
(wp: crunch_wps)
crunch distinct'[wp]: doNormalTransfer pspace_distinct'
(wp: crunch_wps)
lemma transferCaps_urz[wp]:
"\<lbrace>untyped_ranges_zero' and valid_pspace'
and (\<lambda>s. (\<forall>x\<in>set caps. cte_wp_at' (\<lambda>cte. fst x \<noteq> capability.NullCap \<longrightarrow> cteCap cte = fst x) (snd x) s))\<rbrace>
transferCaps tag caps ep receiver recv_buf
\<lbrace>\<lambda>r. untyped_ranges_zero'\<rbrace>"
apply (simp add: transferCaps_def)
apply (rule hoare_pre)
apply (wp hoare_vcg_all_lift hoare_vcg_const_imp_lift
| wpc
| simp add: ball_conj_distrib)+
apply clarsimp
done
crunch gsUntypedZeroRanges[wp]: doNormalTransfer "\<lambda>s. P (gsUntypedZeroRanges s)"
(wp: crunch_wps transferCapsToSlots_pres1 ignore: constOnFailure)
lemmas asUser_urz = untyped_ranges_zero_lift[OF asUser_gsUntypedZeroRanges]
crunch urz[wp]: doNormalTransfer "untyped_ranges_zero'"
(ignore: asUser wp: crunch_wps asUser_urz hoare_vcg_const_Ball_lift)
lemma msgFromLookupFailure_map[simp]:
"msgFromLookupFailure (lookup_failure_map lf)
= msg_from_lookup_failure lf"
by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def)
lemma asUser_getRestartPC_corres:
"corres (=) (tcb_at t) (tcb_at' t)
(as_user t getRestartPC) (asUser t getRestartPC)"
apply (rule asUser_corres')
apply (rule corres_Id, simp, simp)
apply (rule no_fail_getRestartPC)
done
lemma asUser_mapM_getRegister_corres:
"corres (=) (tcb_at t) (tcb_at' t)
(as_user t (mapM getRegister regs))
(asUser t (mapM getRegister regs))"
apply (rule asUser_corres')
apply (rule corres_Id [OF refl refl])
apply (rule no_fail_mapM)
apply (simp add: getRegister_def)
done
lemma makeArchFaultMessage_corres:
"corres (=) (tcb_at t) (tcb_at' t)
(make_arch_fault_msg f t)
(makeArchFaultMessage (arch_fault_map f) t)"
apply (cases f, clarsimp simp: makeArchFaultMessage_def split: arch_fault.split)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF asUser_getRestartPC_corres])
apply (rule corres_trivial, simp add: arch_fault_map_def)
apply (wp+, auto)
done
lemma makeFaultMessage_corres:
"corres (=) (tcb_at t) (tcb_at' t)
(make_fault_msg ft t)
(makeFaultMessage (fault_map ft) t)"
apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF asUser_getRestartPC_corres])
apply (rule corres_trivial, simp add: fromEnum_def enum_bool)
apply (wp | simp)+
apply (simp add: ARM_H.syscallMessage_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres])
apply (rule corres_trivial, simp)
apply (wp | simp)+
apply (simp add: ARM_H.exceptionMessage_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres])
apply (rule corres_trivial, simp)
apply (wp | simp)+
apply (rule makeArchFaultMessage_corres)
done
lemma makeFaultMessage_inv[wp]:
"\<lbrace>P\<rbrace> makeFaultMessage ft t \<lbrace>\<lambda>rv. P\<rbrace>"
apply (cases ft, simp_all add: makeFaultMessage_def)
apply (wp asUser_inv mapM_wp' det_mapM[where S=UNIV]
det_getRestartPC getRestartPC_inv
| clarsimp simp: getRegister_def makeArchFaultMessage_def
split: arch_fault.split)+
done
lemmas threadget_fault_corres =
threadGet_corres [where r = fault_rel_optionation
and f = tcb_fault and f' = tcbFault,
simplified tcb_relation_def, simplified]
lemma doFaultTransfer_corres:
"corres dc
(obj_at (\<lambda>ko. \<exists>tcb ft. ko = TCB tcb \<and> tcb_fault tcb = Some ft) sender
and tcb_at receiver and case_option \<top> in_user_frame recv_buf)
(tcb_at' sender and tcb_at' receiver and
case_option \<top> valid_ipc_buffer_ptr' recv_buf)
(do_fault_transfer badge sender receiver recv_buf)
(doFaultTransfer badge sender receiver recv_buf)"
apply (clarsimp simp: do_fault_transfer_def doFaultTransfer_def split_def
ARM_H.badgeRegister_def badge_register_def)
apply (rule_tac Q="\<lambda>fault. K (\<exists>f. fault = Some f) and
tcb_at sender and tcb_at receiver and
case_option \<top> in_user_frame recv_buf"
and Q'="\<lambda>fault'. tcb_at' sender and tcb_at' receiver and
case_option \<top> valid_ipc_buffer_ptr' recv_buf"
in corres_underlying_split)
apply (rule corres_guard_imp)
apply (rule threadget_fault_corres)
apply (clarsimp simp: obj_at_def is_tcb)+
apply (rule corres_assume_pre)
apply (fold assert_opt_def | unfold haskell_fail_def)+
apply (rule corres_assert_opt_assume)
apply (clarsimp split: option.splits
simp: fault_rel_optionation_def assert_opt_def
map_option_case)
defer
defer
apply (clarsimp simp: fault_rel_optionation_def)
apply (wp thread_get_wp)
apply (clarsimp simp: obj_at_def is_tcb)
apply wp
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF makeFaultMessage_corres])
apply (rule corres_split_eqr[OF setMRs_corres [OF refl]])
apply (rule corres_split_nor[OF setMessageInfo_corres])
apply simp
apply (rule asUser_setRegister_corres)
apply (wp | simp)+
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF makeFaultMessage_corres])
apply (rule corres_split_eqr[OF setMRs_corres [OF refl]])
apply (rule corres_split_nor[OF setMessageInfo_corres])
apply simp
apply (rule asUser_setRegister_corres)
apply (wp | simp)+
done
lemma doFaultTransfer_invs[wp]:
"\<lbrace>invs' and tcb_at' receiver\<rbrace>
doFaultTransfer badge sender receiver recv_buf
\<lbrace>\<lambda>rv. invs'\<rbrace>"
by (simp add: doFaultTransfer_def split_def | wp
| clarsimp split: option.split)+
lemma lookupIPCBuffer_valid_ipc_buffer [wp]:
"\<lbrace>valid_objs'\<rbrace> VSpace_H.lookupIPCBuffer b s \<lbrace>case_option \<top> valid_ipc_buffer_ptr'\<rbrace>"
unfolding lookupIPCBuffer_def ARM_H.lookupIPCBuffer_def
apply (simp add: Let_def getSlotCap_def getThreadBufferSlot_def
locateSlot_conv threadGet_def comp_def)
apply (wp getCTE_wp getObject_tcb_wp | wpc)+
apply (clarsimp simp del: imp_disjL)
apply (drule obj_at_ko_at')
apply (clarsimp simp del: imp_disjL)
apply (rule_tac x = ko in exI)
apply (frule ko_at_cte_ipcbuffer)
apply (clarsimp simp: cte_wp_at_ctes_of simp del: imp_disjL)
apply (clarsimp simp: valid_ipc_buffer_ptr'_def)
apply (frule (1) ko_at_valid_objs')
apply (clarsimp simp: projectKO_opts_defs split: kernel_object.split_asm)
apply (clarsimp simp add: valid_obj'_def valid_tcb'_def
isCap_simps cte_level_bits_def field_simps)
apply (drule bspec [OF _ ranI [where a = "0x40"]])
apply simp
apply (clarsimp simp add: valid_cap'_def)
apply (rule conjI)
apply (rule aligned_add_aligned)
apply (clarsimp simp add: capAligned_def)
apply assumption
apply (erule is_aligned_andI1)
apply (case_tac xd, simp_all add: msg_align_bits)[1]
apply (clarsimp simp: capAligned_def)
apply (drule_tac x =
"(tcbIPCBuffer ko && mask (pageBitsForSize xd)) >> pageBits" in spec)
apply (subst(asm) mult.commute mult.left_commute, subst(asm) shiftl_t2n[symmetric])
apply (simp add: shiftr_shiftl1)
apply (subst (asm) mask_out_add_aligned)
apply (erule is_aligned_weaken [OF _ pbfs_atleast_pageBits])
apply (erule mp)
apply (rule shiftr_less_t2n)
apply (clarsimp simp: pbfs_atleast_pageBits)
apply (rule and_mask_less')
apply (simp add: word_bits_conv)
done
lemma doIPCTransfer_corres:
"corres dc
(tcb_at s and tcb_at r and valid_objs and pspace_aligned
and valid_list
and pspace_distinct and valid_mdb and cur_tcb
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at x s | _ \<Rightarrow> True))
(tcb_at' s and tcb_at' r and valid_pspace' and cur_tcb'
and (\<lambda>s. case ep of Some x \<Rightarrow> ep_at' x s | _ \<Rightarrow> True))
(do_ipc_transfer s ep bg grt r)
(doIPCTransfer s ep bg grt r)"
apply (simp add: do_ipc_transfer_def doIPCTransfer_def)
apply (rule_tac Q="%receiveBuffer sa. tcb_at s sa \<and> valid_objs sa \<and>
pspace_aligned sa \<and> tcb_at r sa \<and>
cur_tcb sa \<and> valid_mdb sa \<and> valid_list sa \<and> pspace_distinct sa \<and>
(case ep of None \<Rightarrow> True | Some x \<Rightarrow> ep_at x sa) \<and>
case_option (\<lambda>_. True) in_user_frame receiveBuffer sa \<and>
obj_at (\<lambda>ko. \<exists>tcb. ko = TCB tcb
\<comment> \<open>\<exists>ft. tcb_fault tcb = Some ft\<close>) s sa"
in corres_underlying_split)
apply (rule corres_guard_imp)
apply (rule lookupIPCBuffer_corres')
apply auto[2]
apply (rule corres_underlying_split [OF _ _ thread_get_sp threadGet_inv])
apply (rule corres_guard_imp)
apply (rule threadget_fault_corres)
apply simp
defer
apply (rule corres_guard_imp)
apply (subst case_option_If)+
apply (rule corres_if2)
apply (simp add: fault_rel_optionation_def)
apply (rule corres_split_eqr[OF lookupIPCBuffer_corres'])
apply (simp add: dc_def[symmetric])
apply (rule doNormalTransfer_corres)
apply (wp | simp add: valid_pspace'_def)+
apply (simp add: dc_def[symmetric])
apply (rule doFaultTransfer_corres)
apply (clarsimp simp: obj_at_def)
apply (erule ignore_if)
apply (wp|simp add: obj_at_def is_tcb valid_pspace'_def)+
done
crunch ifunsafe[wp]: doIPCTransfer "if_unsafe_then_cap'"
(wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots
simp: zipWithM_x_mapM ball_conj_distrib )
crunch iflive[wp]: doIPCTransfer "if_live_then_nonz_cap'"
(wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots
simp: zipWithM_x_mapM ball_conj_distrib )
lemma valid_pspace_valid_objs'[elim!]:
"valid_pspace' s \<Longrightarrow> valid_objs' s"
by (simp add: valid_pspace'_def)
crunch vp[wp]: doIPCTransfer "valid_pspace'"
(wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib )
crunch sch_act_wf[wp]: doIPCTransfer "\<lambda>s. sch_act_wf (ksSchedulerAction s) s"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch vq'[wp]: doIPCTransfer "valid_queues'"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch state_refs_of[wp]: doIPCTransfer "\<lambda>s. P (state_refs_of' s)"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch ct[wp]: doIPCTransfer "cur_tcb'"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch idle'[wp]: doIPCTransfer "valid_idle'"
(wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM)
crunch typ_at'[wp]: doIPCTransfer "\<lambda>s. P (typ_at' T p s)"
(wp: crunch_wps simp: zipWithM_x_mapM)
lemmas dit'_typ_ats[wp] = typ_at_lifts [OF doIPCTransfer_typ_at']
crunch irq_node'[wp]: doIPCTransfer "\<lambda>s. P (irq_node' s)"
(wp: crunch_wps simp: crunch_simps)
lemmas dit_irq_node'[wp]
= valid_irq_node_lift [OF doIPCTransfer_irq_node' doIPCTransfer_typ_at']
crunch valid_arch_state'[wp]: doIPCTransfer "valid_arch_state'"
(wp: crunch_wps simp: crunch_simps)
(* Levity: added (20090126 19:32:26) *)
declare asUser_global_refs' [wp]
lemma lec_valid_cap' [wp]:
"\<lbrace>valid_objs'\<rbrace> lookupExtraCaps thread xa mi \<lbrace>\<lambda>rv s. (\<forall>x\<in>set rv. s \<turnstile>' fst x)\<rbrace>, -"
apply (rule hoare_pre, rule hoare_post_imp_R)
apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\<lambda>_. valid_objs'"])
apply (rule lookupExtraCaps_srcs)
apply wp
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (fastforce elim: ctes_of_valid')
apply simp
done
crunch objs'[wp]: doIPCTransfer "valid_objs'"
( wp: crunch_wps hoare_vcg_const_Ball_lift
transferCapsToSlots_valid_objs
simp: zipWithM_x_mapM ball_conj_distrib )
crunch global_refs'[wp]: doIPCTransfer "valid_global_refs'"
(wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_global_refsT
transferCapsToSlots_valid_globals
simp: zipWithM_x_mapM ball_conj_distrib)
declare asUser_irq_handlers' [wp]
crunch irq_handlers'[wp]: doIPCTransfer "valid_irq_handlers'"
(wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_irq_handlers'
transferCapsToSlots_irq_handlers
simp: zipWithM_x_mapM ball_conj_distrib )
crunch irq_states'[wp]: doIPCTransfer "valid_irq_states'"
(wp: crunch_wps no_irq no_irq_mapM no_irq_storeWord no_irq_loadWord
no_irq_case_option simp: crunch_simps zipWithM_x_mapM)
crunch pde_mappings'[wp]: doIPCTransfer "valid_pde_mappings'"
(wp: crunch_wps simp: crunch_simps)
crunch irqs_masked'[wp]: doIPCTransfer "irqs_masked'"
(wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift)
lemma doIPCTransfer_invs[wp]:
"\<lbrace>invs' and tcb_at' s and tcb_at' r\<rbrace>
doIPCTransfer s ep bg grt r
\<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: doIPCTransfer_def)
apply (wpsimp wp: hoare_drop_imp)
done
crunch nosch[wp]: doIPCTransfer "\<lambda>s. P (ksSchedulerAction s)"
(wp: hoare_drop_imps hoare_vcg_split_case_option mapM_wp'
simp: split_def zipWithM_x_mapM)
lemma handle_fault_reply_registers_corres:
"corres (=) (tcb_at t) (tcb_at' t)
(do t' \<leftarrow> arch_get_sanitise_register_info t;
y \<leftarrow> as_user t
(zipWithM_x
(\<lambda>r v. setRegister r
(sanitise_register t' r v))
msg_template msg);
return (label = 0)
od)
(do t' \<leftarrow> getSanitiseRegisterInfo t;
y \<leftarrow> asUser t
(zipWithM_x
(\<lambda>r v. setRegister r (sanitiseRegister t' r v))
msg_template msg);
return (label = 0)
od)"
apply (rule corres_guard_imp)
apply (clarsimp simp: arch_get_sanitise_register_info_def getSanitiseRegisterInfo_def)
apply (rule corres_split)
apply (rule asUser_corres')
apply(simp add: setRegister_def sanitise_register_def
sanitiseRegister_def syscallMessage_def)
apply(subst zipWithM_x_modify)+
apply(rule corres_modify')
apply (simp|wp)+
done
lemma handleFaultReply_corres:
"ft' = fault_map ft \<Longrightarrow>
corres (=) (tcb_at t) (tcb_at' t)
(handle_fault_reply ft t label msg)
(handleFaultReply ft' t label msg)"
apply (cases ft)
apply(simp_all add: handleFaultReply_def
handle_arch_fault_reply_def handleArchFaultReply_def
syscallMessage_def exceptionMessage_def
split: arch_fault.split)
by (rule handle_fault_reply_registers_corres)+
crunch typ_at'[wp]: handleFaultReply "\<lambda>s. P (typ_at' T p s)"
lemmas hfr_typ_ats[wp] = typ_at_lifts [OF handleFaultReply_typ_at']
crunch ct'[wp]: handleFaultReply "\<lambda>s. P (ksCurThread s)"
lemma doIPCTransfer_sch_act_simple [wp]:
"\<lbrace>sch_act_simple\<rbrace> doIPCTransfer sender endpoint badge grant receiver \<lbrace>\<lambda>_. sch_act_simple\<rbrace>"
by (simp add: sch_act_simple_def, wp)
lemma possibleSwitchTo_invs'[wp]:
"\<lbrace>invs' and st_tcb_at' runnable' t
and (\<lambda>s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t)\<rbrace>
possibleSwitchTo t \<lbrace>\<lambda>_. invs'\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def)
apply (wp tcbSchedEnqueue_invs' ssa_invs')
apply (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt])
apply (wpsimp wp: ssa_invs' threadGet_wp)+
apply (clarsimp dest!: obj_at_ko_at' simp: tcb_in_cur_domain'_def obj_at'_def)
done
crunch cur' [wp]: isFinalCapability "\<lambda>s. P (cur_tcb' s)"
(simp: crunch_simps unless_when
wp: crunch_wps getObject_inv loadObject_default_inv)
crunch ct' [wp]: deleteCallerCap "\<lambda>s. P (ksCurThread s)"
(simp: crunch_simps unless_when
wp: crunch_wps getObject_inv loadObject_default_inv)
lemma getThreadCallerSlot_inv:
"\<lbrace>P\<rbrace> getThreadCallerSlot t \<lbrace>\<lambda>_. P\<rbrace>"
by (simp add: getThreadCallerSlot_def, wp)
lemma deleteCallerCap_ct_not_ksQ:
"\<lbrace>invs' and ct_in_state' simple' and sch_act_sane
and (\<lambda>s. ksCurThread s \<notin> set (ksReadyQueues s p))\<rbrace>
deleteCallerCap t
\<lbrace>\<lambda>rv s. ksCurThread s \<notin> set (ksReadyQueues s p)\<rbrace>"
apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv)
apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp)
apply (clarsimp simp: cte_wp_at_ctes_of)
done
crunch tcb_at'[wp]: unbindNotification "tcb_at' x"
lemma finaliseCapTrue_standin_tcb_at' [wp]:
"\<lbrace>tcb_at' x\<rbrace> finaliseCapTrue_standin cap v2 \<lbrace>\<lambda>_. tcb_at' x\<rbrace>"
apply (simp add: finaliseCapTrue_standin_def Let_def)
apply (safe)
apply (wp getObject_ntfn_inv
| wpc
| simp)+
done
lemma finaliseCapTrue_standin_cur':
"\<lbrace>\<lambda>s. cur_tcb' s\<rbrace> finaliseCapTrue_standin cap v2 \<lbrace>\<lambda>_ s'. cur_tcb' s'\<rbrace>"
apply (simp add: cur_tcb'_def)
apply (rule hoare_lift_Pf2 [OF _ finaliseCapTrue_standin_ct'])
apply (wp)
done
lemma cteDeleteOne_cur' [wp]:
"\<lbrace>\<lambda>s. cur_tcb' s\<rbrace> cteDeleteOne slot \<lbrace>\<lambda>_ s'. cur_tcb' s'\<rbrace>"
apply (simp add: cteDeleteOne_def unless_def when_def)
apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur'
| simp add: split_def | wp (once) cur_tcb_lift)+
done
lemma handleFaultReply_cur' [wp]:
"\<lbrace>\<lambda>s. cur_tcb' s\<rbrace> handleFaultReply x0 thread label msg \<lbrace>\<lambda>_ s'. cur_tcb' s'\<rbrace>"
apply (clarsimp simp add: cur_tcb'_def)
apply (rule hoare_lift_Pf2 [OF _ handleFaultReply_ct'])
apply (wp)
done
lemma capClass_Reply:
"capClass cap = ReplyClass tcb \<Longrightarrow> isReplyCap cap \<and> capTCBPtr cap = tcb"
apply (cases cap, simp_all add: isCap_simps)
apply (rename_tac arch_capability)
apply (case_tac arch_capability, simp_all)
done
lemma reply_cap_end_mdb_chain:
"\<lbrakk> cte_wp_at (is_reply_cap_to t) slot s; invs s;
invs' s';
(s, s') \<in> state_relation; ctes_of s' (cte_map slot) = Some cte \<rbrakk>
\<Longrightarrow> (mdbPrev (cteMDBNode cte) \<noteq> nullPointer
\<and> mdbNext (cteMDBNode cte) = nullPointer)
\<and> cte_wp_at' (\<lambda>cte. isReplyCap (cteCap cte) \<and> capReplyMaster (cteCap cte))
(mdbPrev (cteMDBNode cte)) s'"
apply (clarsimp simp only: cte_wp_at_reply_cap_to_ex_rights)
apply (frule(1) pspace_relation_ctes_ofI[OF state_relation_pspace_relation],
clarsimp+)
apply (subgoal_tac "\<exists>slot' rights'. caps_of_state s slot' = Some (cap.ReplyCap t True rights')
\<and> descendants_of slot' (cdt s) = {slot}")
apply (elim state_relationE exE)
apply (clarsimp simp: cdt_relation_def
simp del: split_paired_All)
apply (drule spec, drule(1) mp[OF _ caps_of_state_cte_at])
apply (frule(1) pspace_relation_cte_wp_at[OF _ caps_of_state_cteD],
clarsimp+)
apply (clarsimp simp: descendants_of'_def cte_wp_at_ctes_of)
apply (frule_tac f="\<lambda>S. cte_map slot \<in> S" in arg_cong, simp(no_asm_use))
apply (frule invs_mdb'[unfolded valid_mdb'_def])
apply (rule context_conjI)
apply (clarsimp simp: nullPointer_def valid_mdb_ctes_def)
apply (erule(4) subtree_prev_0)
apply (rule conjI)
apply (rule ccontr)
apply (frule valid_mdb_no_loops, simp add: no_loops_def)
apply (drule_tac x="cte_map slot" in spec)
apply (erule notE, rule r_into_trancl, rule ccontr)
apply (clarsimp simp: mdb_next_unfold valid_mdb_ctes_def nullPointer_def)
apply (rule valid_dlistEn, assumption+)
apply (subgoal_tac "ctes_of s' \<turnstile> cte_map slot \<leadsto> mdbNext (cteMDBNode cte)")
apply (frule(3) class_linksD)
apply (clarsimp simp: isCap_simps dest!: capClass_Reply[OF sym])
apply (drule_tac f="\<lambda>S. mdbNext (cteMDBNode cte) \<in> S" in arg_cong)
apply (simp, erule notE, rule subtree.trans_parent, assumption+)
apply (case_tac ctea, case_tac cte')
apply (clarsimp simp add: parentOf_def isMDBParentOf_CTE)
apply (simp add: sameRegionAs_def2 isCap_simps)
apply (erule subtree.cases)
apply (clarsimp simp: parentOf_def isMDBParentOf_CTE)
apply (clarsimp simp: parentOf_def isMDBParentOf_CTE)
apply (simp add: mdb_next_unfold)
apply (erule subtree.cases)
apply (clarsimp simp: valid_mdb_ctes_def)
apply (erule_tac cte=ctea in valid_dlistEn, assumption)
apply (simp add: mdb_next_unfold)
apply (clarsimp simp: mdb_next_unfold isCap_simps)
apply (drule_tac f="\<lambda>S. c' \<in> S" in arg_cong)
apply (clarsimp simp: no_loops_direct_simp valid_mdb_no_loops)
apply (frule invs_mdb)
apply (drule invs_valid_reply_caps)
apply (clarsimp simp: valid_mdb_def reply_mdb_def
valid_reply_caps_def reply_caps_mdb_def
cte_wp_at_caps_of_state
simp del: split_paired_All)
apply (erule_tac x=slot in allE, erule_tac x=t in allE, erule impE, fast)
apply (elim exEI)
apply clarsimp
apply (subgoal_tac "P" for P, rule sym, rule equalityI, assumption)
apply clarsimp
apply (erule(4) unique_reply_capsD)
apply (simp add: descendants_of_def)
apply (rule r_into_trancl)
apply (simp add: cdt_parent_rel_def is_cdt_parent_def)
done
crunch valid_objs'[wp]: cteDeleteOne "valid_objs'"
(simp: crunch_simps unless_def
wp: crunch_wps getObject_inv loadObject_default_inv)
crunch nosch[wp]: handleFaultReply "\<lambda>s. P (ksSchedulerAction s)"
lemma emptySlot_weak_sch_act[wp]:
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>
emptySlot slot irq
\<lbrace>\<lambda>_ s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
by (wp weak_sch_act_wf_lift tcb_in_cur_domain'_lift)
lemma cancelAllIPC_weak_sch_act_wf[wp]:
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>
cancelAllIPC epptr
\<lbrace>\<lambda>_ s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: cancelAllIPC_def)
apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+
done
lemma cancelAllSignals_weak_sch_act_wf[wp]:
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>
cancelAllSignals ntfnptr
\<lbrace>\<lambda>_ s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: cancelAllSignals_def)
apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+
done
crunch weak_sch_act_wf[wp]: finaliseCapTrue_standin "\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s"
(ignore: setThreadState
simp: crunch_simps
wp: crunch_wps getObject_inv loadObject_default_inv)
lemma cteDeleteOne_weak_sch_act[wp]:
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>
cteDeleteOne sl
\<lbrace>\<lambda>_ s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: cteDeleteOne_def unless_def)
apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur'
| simp add: split_def)+
done
crunch weak_sch_act_wf[wp]: emptySlot "\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s"
crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t"
crunch valid_queues[wp]: handleFaultReply "Invariants_H.valid_queues"
crunch valid_queues'[wp]: handleFaultReply "valid_queues'"
crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t"
crunch sch_act_wf[wp]: unbindNotification "\<lambda>s. sch_act_wf (ksSchedulerAction s) s"
(wp: sbn_sch_act')
crunch valid_queues'[wp]: cteDeleteOne valid_queues'
(simp: crunch_simps inQ_def
wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv
threadSet_valid_queues' rescheduleRequired_valid_queues'_weak)
lemma cancelSignal_valid_queues'[wp]:
"\<lbrace>valid_queues'\<rbrace> cancelSignal t ntfn \<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
apply (simp add: cancelSignal_def)
apply (rule hoare_pre)
apply (wp getNotification_wp| wpc | simp)+
done
lemma cancelIPC_valid_queues'[wp]:
"\<lbrace>valid_queues' and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s) \<rbrace> cancelIPC t \<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def)
apply (rule hoare_seq_ext[OF _ gts_sp'])
apply (case_tac state, simp_all) defer 2
apply (rule hoare_pre)
apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8]
apply (wp cteDeleteOne_valid_queues')
apply (rule_tac Q="\<lambda>_. valid_queues' and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp)
apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of)
apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+
apply (clarsimp simp: inQ_def)
done
crunch valid_objs'[wp]: handleFaultReply valid_objs'
lemma cte_wp_at_is_reply_cap_toI:
"cte_wp_at ((=) (cap.ReplyCap t False rights)) ptr s
\<Longrightarrow> cte_wp_at (is_reply_cap_to t) ptr s"
by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights)
lemma doReplyTransfer_corres:
"corres dc
(einvs and tcb_at receiver and tcb_at sender
and cte_wp_at ((=) (cap.ReplyCap receiver False rights)) slot)
(invs' and tcb_at' sender and tcb_at' receiver
and valid_pspace' and cte_at' (cte_map slot))
(do_reply_transfer sender receiver slot grant)
(doReplyTransfer sender receiver (cte_map slot) grant)"
apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong)
apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp'])
apply (rule corres_guard_imp)
apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at)+)
apply (rule_tac F = "awaiting_reply state" in corres_req)
apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb)
apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD
dest: has_reply_cap_cte_wpD
dest!: valid_reply_caps_awaiting_reply cte_wp_at_is_reply_cap_toI)
apply (case_tac state, simp_all add: bind_assoc)
apply (simp add: isReply_def liftM_def)
apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated])
apply (rule no_fail_pre, wp)
apply clarsimp
apply (rename_tac mdbnode)
apply (rule_tac P="Q" and Q="Q" and P'="Q'" and Q'="(\<lambda>s. Q' s \<and> R' s)" for Q Q' R'
in stronger_corres_guard_imp[rotated])
apply assumption
apply (rule conjI, assumption)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (drule cte_wp_at_is_reply_cap_toI)
apply (erule(4) reply_cap_end_mdb_chain)
apply (rule corres_assert_assume[rotated], simp)
apply (simp add: getSlotCap_def)
apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated])
apply (rule no_fail_pre, wp)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule corres_assert_assume[rotated])
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule corres_guard_imp)
apply (rule corres_split[OF threadget_fault_corres])
apply (case_tac rv, simp_all add: fault_rel_optionation_def bind_assoc)[1]
apply (rule corres_split[OF doIPCTransfer_corres])
apply (rule corres_split[OF cap_delete_one_corres])
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule possibleSwitchTo_corres)
apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at'
| simp add: valid_tcb_state'_def)+
apply (strengthen cte_wp_at_reply_cap_can_fast_finalise)
apply (wp hoare_vcg_conj_lift)
apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at])
prefer 2
apply (erule cte_wp_at_weakenE)
apply (fastforce)
apply (clarsimp simp:is_cap_simps)
apply (wp weak_valid_sched_action_lift)+
apply (rule_tac Q="\<lambda>_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak)
apply (wp tcb_in_cur_domain'_lift)
defer
apply (simp)
apply (wp)+
apply (clarsimp)
apply (rule conjI, erule invs_valid_objs)
apply (rule conjI, clarsimp)+
apply (rule conjI)
apply (erule cte_wp_at_weakenE)
apply (clarsimp)
apply (rule conjI, rule refl)
apply (fastforce)
apply (clarsimp simp: invs_def valid_sched_def valid_sched_action_def)
apply (simp)
apply (auto simp: invs'_def valid_state'_def)[1]
apply (rule corres_guard_imp)
apply (rule corres_split[OF cap_delete_one_corres])
apply (rule corres_split_mapr[OF getMessageInfo_corres])
apply (rule corres_split_eqr[OF lookupIPCBuffer_corres'])
apply (rule corres_split_eqr[OF getMRs_corres])
apply (simp(no_asm) del: dc_simp)
apply (rule corres_split_eqr[OF handleFaultReply_corres])
apply simp
apply (rule corres_split)
apply (rule threadset_corresT;
clarsimp simp add: tcb_relation_def fault_rel_optionation_def
tcb_cap_cases_def tcb_cte_cases_def exst_same_def)
apply (rule_tac P="valid_sched and cur_tcb and tcb_at receiver"
and P'="tcb_at' receiver and cur_tcb'
and (\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s)
and Invariants_H.valid_queues and valid_queues' and valid_objs'"
in corres_inst)
apply (case_tac rvb, simp_all)[1]
apply (rule corres_guard_imp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (fold dc_def, rule possibleSwitchTo_corres)
apply simp
apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at'
sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+
apply (rule corres_guard_imp)
apply (rule setThreadState_corres)
apply clarsimp+
apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state
thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues'
threadSet_tcbDomain_triv threadSet_valid_objs'
| simp add: valid_tcb_state'_def)+
apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state
thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues'
| simp add: runnable_def inQ_def valid_tcb'_def)+
apply (rule_tac Q="\<lambda>_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and valid_objs and pspace_aligned"
in hoare_strengthen_post [rotated], clarsimp)
apply (wp)
apply (rule hoare_chain [OF cap_delete_one_invs])
apply (assumption)
apply (rule conjI, clarsimp)
apply (clarsimp simp add: invs_def valid_state_def)
apply (rule_tac Q="\<lambda>_. tcb_at' sender and tcb_at' receiver and invs'"
in hoare_strengthen_post [rotated])
apply (solves\<open>auto simp: invs'_def valid_state'_def\<close>)
apply wp
apply clarsimp
apply (rule conjI)
apply (erule cte_wp_at_weakenE)
apply (clarsimp simp add: can_fast_finalise_def)
apply (erule(1) emptyable_cte_wp_atD)
apply (rule allI, rule impI)
apply (clarsimp simp add: is_master_reply_cap_def)
apply (clarsimp)
done
(* when we cannot talk about reply cap rights explicitly (for instance, when a schematic ?rights
would be generated too early *)
lemma doReplyTransfer_corres':
"corres dc
(einvs and tcb_at receiver and tcb_at sender
and cte_wp_at (is_reply_cap_to receiver) slot)
(invs' and tcb_at' sender and tcb_at' receiver
and valid_pspace' and cte_at' (cte_map slot))
(do_reply_transfer sender receiver slot grant)
(doReplyTransfer sender receiver (cte_map slot) grant)"
using doReplyTransfer_corres[of receiver sender _ slot]
by (fastforce simp add: cte_wp_at_reply_cap_to_ex_rights corres_underlying_def)
lemma valid_pspace'_splits[elim!]:
"valid_pspace' s \<Longrightarrow> valid_objs' s"
"valid_pspace' s \<Longrightarrow> pspace_aligned' s"
"valid_pspace' s \<Longrightarrow> pspace_distinct' s"
"valid_pspace' s \<Longrightarrow> valid_mdb' s"
"valid_pspace' s \<Longrightarrow> no_0_obj' s"
by (simp add: valid_pspace'_def)+
lemma sts_valid_pspace_hangers:
"\<lbrace>valid_pspace' and tcb_at' t and
valid_tcb_state' st\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. valid_objs'\<rbrace>"
"\<lbrace>valid_pspace' and tcb_at' t and
valid_tcb_state' st\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. pspace_distinct'\<rbrace>"
"\<lbrace>valid_pspace' and tcb_at' t and
valid_tcb_state' st\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. pspace_aligned'\<rbrace>"
"\<lbrace>valid_pspace' and tcb_at' t and
valid_tcb_state' st\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. valid_mdb'\<rbrace>"
"\<lbrace>valid_pspace' and tcb_at' t and
valid_tcb_state' st\<rbrace> setThreadState st t \<lbrace>\<lambda>rv. no_0_obj'\<rbrace>"
by (safe intro!: hoare_strengthen_post [OF sts'_valid_pspace'_inv])
declare no_fail_getSlotCap [wp]
lemma setupCallerCap_corres:
"corres dc
(st_tcb_at (Not \<circ> halted) sender and tcb_at receiver and
st_tcb_at (Not \<circ> awaiting_reply) sender and valid_reply_caps and
valid_objs and pspace_distinct and pspace_aligned and valid_mdb
and valid_list and
valid_reply_masters and cte_wp_at (\<lambda>c. c = cap.NullCap) (receiver, tcb_cnode_index 3))
(tcb_at' sender and tcb_at' receiver and valid_pspace'
and (\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s))
(setup_caller_cap sender receiver grant)
(setupCallerCap sender receiver grant)"
supply if_split[split del]
apply (simp add: setup_caller_cap_def setupCallerCap_def
getThreadReplySlot_def locateSlot_conv
getThreadCallerSlot_def)
apply (rule stronger_corres_guard_imp)
apply (rule corres_split_nor)
apply (rule setThreadState_corres)
apply (simp split: option.split)
apply (rule corres_symb_exec_r)
apply (rule_tac F="\<exists>r. cteCap masterCTE = capability.ReplyCap sender True r
\<and> mdbNext (cteMDBNode masterCTE) = nullPointer"
in corres_gen_asm2, clarsimp simp add: isCap_simps)
apply (rule corres_symb_exec_r)
apply (rule_tac F="rv = capability.NullCap"
in corres_gen_asm2, simp)
apply (rule cteInsert_corres)
apply (simp split: if_splits)
apply (simp add: cte_map_def tcbReplySlot_def
tcb_cnode_index_def cte_level_bits_def)
apply (simp add: cte_map_def tcbCallerSlot_def
tcb_cnode_index_def cte_level_bits_def)
apply (rule_tac Q="\<lambda>rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)"
in valid_prove_more)
apply (wp, (wp getSlotCap_wp)+)
apply blast
apply (rule no_fail_pre, wp)
apply (clarsimp simp: cte_wp_at'_def cte_at'_def)
apply (rule_tac Q="\<lambda>rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)"
in valid_prove_more)
apply (wp, (wp getCTE_wp')+)
apply blast
apply (rule no_fail_pre, wp)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (wp sts_valid_pspace_hangers
| simp add: cte_wp_at_ctes_of)+
apply (clarsimp simp: valid_tcb_state_def st_tcb_at_reply_cap_valid
st_tcb_at_tcb_at st_tcb_at_caller_cap_null
split: option.split)
apply (clarsimp simp: valid_tcb_state'_def valid_cap'_def capAligned_reply_tcbI)
apply (frule(1) st_tcb_at_reply_cap_valid, simp, clarsimp)
apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state)
apply (drule pspace_relation_cte_wp_at[rotated, OF caps_of_state_cteD],
erule valid_pspace'_splits, clarsimp+)+
apply (clarsimp simp: cte_wp_at_ctes_of cte_map_def tcbReplySlot_def
tcbCallerSlot_def tcb_cnode_index_def
is_cap_simps)
apply (auto intro: reply_no_descendants_mdbNext_null[OF not_waiting_reply_slot_no_descendants]
simp: cte_index_repair)
done
crunch tcb_at'[wp]: getThreadCallerSlot "tcb_at' t"
lemma getThreadReplySlot_tcb_at'[wp]:
"\<lbrace>tcb_at' t\<rbrace> getThreadReplySlot tcb \<lbrace>\<lambda>_. tcb_at' t\<rbrace>"
by (simp add: getThreadReplySlot_def, wp)
lemma setupCallerCap_tcb_at'[wp]:
"\<lbrace>tcb_at' t\<rbrace> setupCallerCap sender receiver grant \<lbrace>\<lambda>_. tcb_at' t\<rbrace>"
by (simp add: setupCallerCap_def, wp hoare_drop_imp)
crunch ct'[wp]: setupCallerCap "\<lambda>s. P (ksCurThread s)"
(wp: crunch_wps)
lemma cteInsert_sch_act_wf[wp]:
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s\<rbrace>
cteInsert newCap srcSlot destSlot
\<lbrace>\<lambda>_ s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
by (wp sch_act_wf_lift tcb_in_cur_domain'_lift)
lemma setupCallerCap_sch_act [wp]:
"\<lbrace>\<lambda>s. sch_act_not t s \<and> sch_act_wf (ksSchedulerAction s) s\<rbrace>
setupCallerCap t r g \<lbrace>\<lambda>_ s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: setupCallerCap_def getSlotCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv)
apply (wp getCTE_wp' sts_sch_act' hoare_drop_imps hoare_vcg_all_lift)
apply clarsimp
done
lemma possibleSwitchTo_weak_sch_act_wf[wp]:
"\<lbrace>\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s \<and> st_tcb_at' runnable' t s\<rbrace>
possibleSwitchTo t \<lbrace>\<lambda>rv s. weak_sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: possibleSwitchTo_def setSchedulerAction_def threadGet_def curDomain_def
bitmap_fun_defs)
apply (wp rescheduleRequired_weak_sch_act_wf
weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"]
getObject_tcb_wp static_imp_wp
| wpc)+
apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def)
done
lemmas transferCapsToSlots_pred_tcb_at' =
transferCapsToSlots_pres1 [OF cteInsert_pred_tcb_at']
crunches doIPCTransfer, possibleSwitchTo
for pred_tcb_at'[wp]: "pred_tcb_at' proj P t"
(wp: mapM_wp' crunch_wps simp: zipWithM_x_mapM)
lemma setSchedulerAction_ct_in_domain:
"\<lbrace>\<lambda>s. ct_idle_or_in_cur_domain' s
\<and> p \<noteq> ResumeCurrentThread \<rbrace> setSchedulerAction p
\<lbrace>\<lambda>_. ct_idle_or_in_cur_domain'\<rbrace>"
by (simp add:setSchedulerAction_def | wp)+
crunches setupCallerCap, doIPCTransfer, possibleSwitchTo
for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain'
and ksCurDomain[wp]: "\<lambda>s. P (ksCurDomain s)"
and ksDomSchedule[wp]: "\<lambda>s. P (ksDomSchedule s)"
(wp: crunch_wps setSchedulerAction_ct_in_domain simp: zipWithM_x_mapM)
crunch tcbDomain_obj_at'[wp]: doIPCTransfer "obj_at' (\<lambda>tcb. P (tcbDomain tcb)) t"
(wp: crunch_wps constOnFailure_wp simp: crunch_simps)
crunches possibleSwitchTo
for tcb_at'[wp]: "tcb_at' t"
and valid_pspace'[wp]: valid_pspace'
(wp: crunch_wps)
lemma sendIPC_corres:
(* call is only true if called in handleSyscall SysCall, which
is always blocking. *)
assumes "call \<longrightarrow> bl"
shows
"corres dc (einvs and st_tcb_at active t and ep_at ep and ex_nonz_cap_to t)
(invs' and sch_act_not t and tcb_at' t and ep_at' ep)
(send_ipc bl call bg cg cgr t ep) (sendIPC bl call bg cg cgr t ep)"
proof -
show ?thesis
apply (insert assms)
apply (unfold send_ipc_def sendIPC_def Let_def)
apply (case_tac bl)
apply clarsimp
apply (rule corres_guard_imp)
apply (rule corres_split[OF getEndpoint_corres,
where
R="\<lambda>rv. einvs and st_tcb_at active t and ep_at ep and
valid_ep rv and obj_at (\<lambda>ob. ob = Endpoint rv) ep
and ex_nonz_cap_to t"
and
R'="\<lambda>rv'. invs' and tcb_at' t and sch_act_not t
and ep_at' ep and valid_ep' rv'"])
apply (case_tac rv)
apply (simp add: ep_relation_def)
apply (rule corres_guard_imp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setEndpoint_corres)
apply (simp add: ep_relation_def)
apply wp+
apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def)
apply clarsimp
\<comment> \<open>concludes IdleEP if bl branch\<close>
apply (simp add: ep_relation_def)
apply (rule corres_guard_imp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setEndpoint_corres)
apply (simp add: ep_relation_def)
apply wp+
apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def)
apply clarsimp
\<comment> \<open>concludes SendEP if bl branch\<close>
apply (simp add: ep_relation_def)
apply (rename_tac list)
apply (rule_tac F="list \<noteq> []" in corres_req)
apply (simp add: valid_ep_def)
apply (case_tac list)
apply simp
apply (clarsimp split del: if_split)
apply (rule corres_guard_imp)
apply (rule corres_split[OF setEndpoint_corres])
apply (simp add: ep_relation_def split: list.split)
apply (simp add: isReceive_def split del:if_split)
apply (rule corres_split[OF getThreadState_corres])
apply (rule_tac
F="\<exists>data. recv_state = Structures_A.BlockedOnReceive ep data"
in corres_gen_asm)
apply (clarsimp simp: case_bool_If case_option_If if3_fold
simp del: dc_simp split del: if_split cong: if_cong)
apply (rule corres_split[OF doIPCTransfer_corres])
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule corres_split[OF possibleSwitchTo_corres])
apply (fold when_def)[1]
apply (rule_tac P="call" and P'="call"
in corres_symmetric_bool_cases, blast)
apply (simp add: when_def dc_def[symmetric] split del: if_split)
apply (rule corres_if2, simp)
apply (rule setupCallerCap_corres)
apply (rule setThreadState_corres, simp)
apply (rule corres_trivial)
apply (simp add: when_def dc_def[symmetric] split del: if_split)
apply (simp split del: if_split add: if_apply_def2)
apply (wp hoare_drop_imps)[1]
apply (simp split del: if_split add: if_apply_def2)
apply (wp hoare_drop_imps)[1]
apply (wp | simp)+
apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases)
apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf
sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1]
apply (simp add: valid_tcb_state_def pred_conj_def)
apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg)
apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift
| clarsimp simp: is_cap_simps)+)[1]
apply (simp add: pred_conj_def)
apply (strengthen sch_act_wf_weak)
apply (simp add: valid_tcb_state'_def)
apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1]
apply (wp gts_st_tcb_at)+
apply (simp add: pred_conj_def cong: conj_cong)
apply (wp hoare_post_taut)
apply (simp)
apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+
apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps
ep_redux_simps' st_tcb_at_tcb_at valid_ep_def
cong: list.case_cong)
apply (drule(1) sym_refs_obj_atD[where P="\<lambda>ob. ob = e" for e])
apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid
st_tcb_def2 valid_sched_def valid_sched_action_def)
apply (force simp: st_tcb_def2 dest!: st_tcb_at_caller_cap_null[simplified,rotated])
subgoal by (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split)
apply wp+
apply (clarsimp simp: ep_at_def2)+
apply (rule corres_guard_imp)
apply (rule corres_split[OF getEndpoint_corres,
where
R="\<lambda>rv. einvs and st_tcb_at active t and ep_at ep and
valid_ep rv and obj_at (\<lambda>k. k = Endpoint rv) ep"
and
R'="\<lambda>rv'. invs' and tcb_at' t and sch_act_not t
and ep_at' ep and valid_ep' rv'"])
apply (rename_tac rv rv')
apply (case_tac rv)
apply (simp add: ep_relation_def)
\<comment> \<open>concludes IdleEP branch if not bl and no ft\<close>
apply (simp add: ep_relation_def)
\<comment> \<open>concludes SendEP branch if not bl and no ft\<close>
apply (simp add: ep_relation_def)
apply (rename_tac list)
apply (rule_tac F="list \<noteq> []" in corres_req)
apply (simp add: valid_ep_def)
apply (case_tac list)
apply simp
apply (rule_tac F="a \<noteq> t" in corres_req)
apply (clarsimp simp: invs_def valid_state_def
valid_pspace_def)
apply (drule(1) sym_refs_obj_atD[where P="\<lambda>ob. ob = e" for e])
apply (clarsimp simp: st_tcb_at_def obj_at_def tcb_bound_refs_def2)
apply fastforce
apply (clarsimp split del: if_split)
apply (rule corres_guard_imp)
apply (rule corres_split[OF setEndpoint_corres])
apply (simp add: ep_relation_def split: list.split)
apply (rule corres_split[OF getThreadState_corres])
apply (rule_tac
F="\<exists>data. recv_state = Structures_A.BlockedOnReceive ep data"
in corres_gen_asm)
apply (clarsimp simp: isReceive_def case_bool_If
split del: if_split cong: if_cong)
apply (rule corres_split[OF doIPCTransfer_corres])
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule possibleSwitchTo_corres)
apply (simp add: if_apply_def2)
apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases |
simp add: if_apply_def2 split del: if_split)+)[1]
apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf
sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)
apply (simp add: valid_tcb_state_def pred_conj_def)
apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift
| clarsimp simp:is_cap_simps)+)[1]
apply (simp add: valid_tcb_state'_def pred_conj_def)
apply (strengthen sch_act_wf_weak)
apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps)
apply (wp gts_st_tcb_at)+
apply (simp add: pred_conj_def cong: conj_cong)
apply (wp hoare_post_taut)
apply simp
apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')
apply (clarsimp simp add: invs_def valid_state_def
valid_pspace_def ep_redux_simps ep_redux_simps'
st_tcb_at_tcb_at
cong: list.case_cong)
apply (clarsimp simp: valid_ep_def)
apply (drule(1) sym_refs_obj_atD[where P="\<lambda>ob. ob = e" for e])
apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid
st_tcb_at_caller_cap_null)
apply (fastforce simp: st_tcb_def2 valid_sched_def valid_sched_action_def)
subgoal by (auto simp: valid_ep'_def
split: list.split;
clarsimp simp: invs'_def valid_state'_def)
apply wp+
apply (clarsimp simp: ep_at_def2)+
done
qed
crunch typ_at'[wp]: setMessageInfo "\<lambda>s. P (typ_at' T p s)"
lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at']
(* Annotation added by Simon Winwood (Thu Jul 1 20:54:41 2010) using taint-mode *)
declare tl_drop_1[simp]
crunch cur[wp]: cancel_ipc "cur_tcb"
(wp: select_wp crunch_wps simp: crunch_simps)
crunch valid_objs'[wp]: asUser "valid_objs'"
lemma valid_sched_weak_strg:
"valid_sched s \<longrightarrow> weak_valid_sched_action s"
by (simp add: valid_sched_def valid_sched_action_def)
crunch weak_valid_sched_action[wp]: as_user weak_valid_sched_action
(wp: weak_valid_sched_action_lift)
lemma sendSignal_corres:
"corres dc (einvs and ntfn_at ep) (invs' and ntfn_at' ep)
(send_signal ep bg) (sendSignal ep bg)"
apply (simp add: send_signal_def sendSignal_def Let_def)
apply (rule corres_guard_imp)
apply (rule corres_split[OF getNotification_corres,
where
R = "\<lambda>rv. einvs and ntfn_at ep and valid_ntfn rv and
ko_at (Structures_A.Notification rv) ep" and
R' = "\<lambda>rv'. invs' and ntfn_at' ep and
valid_ntfn' rv' and ko_at' rv' ep"])
defer
apply (wp get_simple_ko_ko_at get_ntfn_ko')+
apply (simp add: invs_valid_objs)+
apply (case_tac "ntfn_obj ntfn")
\<comment> \<open>IdleNtfn\<close>
apply (clarsimp simp add: ntfn_relation_def)
apply (case_tac "ntfnBoundTCB nTFN")
apply clarsimp
apply (rule corres_guard_imp[OF setNotification_corres])
apply (clarsimp simp add: ntfn_relation_def)+
apply (rule corres_guard_imp)
apply (rule corres_split[OF getThreadState_corres])
apply (rule corres_if)
apply (fastforce simp: receive_blocked_def receiveBlocked_def
thread_state_relation_def
split: Structures_A.thread_state.splits
Structures_H.thread_state.splits)
apply (rule corres_split[OF cancel_ipc_corres])
apply (rule corres_split[OF setThreadState_corres])
apply (clarsimp simp: thread_state_relation_def)
apply (simp add: badgeRegister_def badge_register_def)
apply (rule corres_split[OF asUser_setRegister_corres])
apply (rule possibleSwitchTo_corres)
apply wp
apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at'
sts_valid_queues sts_st_tcb' hoare_disjI2
cancel_ipc_cte_wp_at_not_reply_state
| strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg
| simp add: valid_tcb_state_def)+
apply (rule_tac Q="\<lambda>rv. invs' and tcb_at' a" in hoare_strengthen_post)
apply wp
apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak
valid_tcb_state'_def)
apply (rule setNotification_corres)
apply (clarsimp simp add: ntfn_relation_def)
apply (wp gts_wp gts_wp' | clarsimp)+
apply (auto simp: valid_ntfn_def receive_blocked_def valid_sched_def invs_cur
elim: pred_tcb_weakenE
intro: st_tcb_at_reply_cap_valid
split: Structures_A.thread_state.splits)[1]
apply (clarsimp simp: valid_ntfn'_def invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)
\<comment> \<open>WaitingNtfn\<close>
apply (clarsimp simp add: ntfn_relation_def Let_def)
apply (simp add: update_waiting_ntfn_def)
apply (rename_tac list)
apply (case_tac "tl list = []")
\<comment> \<open>tl list = []\<close>
apply (rule corres_guard_imp)
apply (rule_tac F="list \<noteq> []" in corres_gen_asm)
apply (simp add: list_case_helper split del: if_split)
apply (rule corres_split[OF setNotification_corres])
apply (simp add: ntfn_relation_def)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (simp add: badgeRegister_def badge_register_def)
apply (rule corres_split[OF asUser_setRegister_corres])
apply (rule possibleSwitchTo_corres)
apply ((wp | simp)+)[1]
apply (rule_tac Q="\<lambda>_. Invariants_H.valid_queues and valid_queues' and
(\<lambda>s. sch_act_wf (ksSchedulerAction s) s) and
cur_tcb' and
st_tcb_at' runnable' (hd list) and valid_objs'"
in hoare_post_imp, clarsimp simp: pred_tcb_at' elim!: sch_act_wf_weak)
apply (wp | simp)+
apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action
| simp)+
apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues'
setThreadState_st_tcb
| simp)+
apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs'
hoare_vcg_disj_lift weak_sch_act_wf_lift_linear
| simp add: valid_tcb_state_def valid_tcb_state'_def)+
apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def
valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def
valid_sched_action_def)
apply (auto simp: valid_ntfn'_def )[1]
apply (clarsimp simp: invs'_def valid_state'_def)
\<comment> \<open>tl list \<noteq> []\<close>
apply (rule corres_guard_imp)
apply (rule_tac F="list \<noteq> []" in corres_gen_asm)
apply (simp add: list_case_helper)
apply (rule corres_split[OF setNotification_corres])
apply (simp add: ntfn_relation_def split:list.splits)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (simp add: badgeRegister_def badge_register_def)
apply (rule corres_split[OF asUser_setRegister_corres])
apply (rule possibleSwitchTo_corres)
apply (wp cur_tcb_lift | simp)+
apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action
| simp)+
apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues'
setThreadState_st_tcb
| simp)+
apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs'
hoare_vcg_disj_lift weak_sch_act_wf_lift_linear
| simp add: valid_tcb_state_def valid_tcb_state'_def)+
apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def
valid_pspace_def neq_Nil_conv
ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def
split: option.splits)
apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def
weak_sch_act_wf_def
split: option.splits)[1]
\<comment> \<open>ActiveNtfn\<close>
apply (clarsimp simp add: ntfn_relation_def)
apply (rule corres_guard_imp)
apply (rule setNotification_corres)
apply (simp add: ntfn_relation_def combine_ntfn_badges_def
combine_ntfn_msgs_def)
apply (simp add: invs_def valid_state_def valid_ntfn_def)
apply (simp add: invs'_def valid_state'_def valid_ntfn'_def)
done
lemma valid_Running'[simp]:
"valid_tcb_state' Running = \<top>"
by (rule ext, simp add: valid_tcb_state'_def)
crunch typ'[wp]: setMRs "\<lambda>s. P (typ_at' T p s)"
(wp: crunch_wps simp: zipWithM_x_mapM)
lemma possibleSwitchTo_sch_act[wp]:
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s \<and> st_tcb_at' runnable' t s\<rbrace>
possibleSwitchTo t
\<lbrace>\<lambda>rv s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs)
apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp
| simp add: unless_def | wpc)+
apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def)
done
lemma possibleSwitchTo_valid_queues[wp]:
"\<lbrace>Invariants_H.valid_queues and valid_objs' and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\<rbrace>
possibleSwitchTo t
\<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs)
apply (wp hoare_drop_imps | wpc | simp)+
apply (auto simp: valid_tcb'_def weak_sch_act_wf_def
dest: pred_tcb_at'
elim!: valid_objs_valid_tcbE)
done
lemma possibleSwitchTo_ksQ':
"\<lbrace>(\<lambda>s. t' \<notin> set (ksReadyQueues s p) \<and> sch_act_not t' s) and K(t' \<noteq> t)\<rbrace>
possibleSwitchTo t
\<lbrace>\<lambda>_ s. t' \<notin> set (ksReadyQueues s p)\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs)
apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp
| wpc
| simp split del: if_split)+
apply (auto simp: obj_at'_def)
done
lemma possibleSwitchTo_valid_queues'[wp]:
"\<lbrace>valid_queues' and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s)
and st_tcb_at' runnable' t\<rbrace>
possibleSwitchTo t
\<lbrace>\<lambda>rv. valid_queues'\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs)
apply (wp static_imp_wp threadGet_wp | wpc | simp)+
apply (auto simp: obj_at'_def)
done
crunch st_refs_of'[wp]: possibleSwitchTo "\<lambda>s. P (state_refs_of' s)"
(wp: crunch_wps)
crunch cap_to'[wp]: possibleSwitchTo "ex_nonz_cap_to' p"
(wp: crunch_wps)
crunch objs'[wp]: possibleSwitchTo valid_objs'
(wp: crunch_wps)
crunch ct[wp]: possibleSwitchTo cur_tcb'
(wp: cur_tcb_lift crunch_wps)
lemma possibleSwitchTo_iflive[wp]:
"\<lbrace>if_live_then_nonz_cap' and ex_nonz_cap_to' t
and (\<lambda>s. sch_act_wf (ksSchedulerAction s) s)\<rbrace>
possibleSwitchTo t
\<lbrace>\<lambda>rv. if_live_then_nonz_cap'\<rbrace>"
apply (simp add: possibleSwitchTo_def curDomain_def)
apply (wp | wpc | simp)+
apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift)
apply (wp threadGet_wp)+
apply (auto simp: obj_at'_def projectKOs)
done
crunches possibleSwitchTo
for ifunsafe[wp]: if_unsafe_then_cap'
and idle'[wp]: valid_idle'
and global_refs'[wp]: valid_global_refs'
and arch_state'[wp]: valid_arch_state'
and irq_node'[wp]: "\<lambda>s. P (irq_node' s)"
and typ_at'[wp]: "\<lambda>s. P (typ_at' T p s)"
and irq_handlers'[wp]: valid_irq_handlers'
and irq_states'[wp]: valid_irq_states'
and pde_mappigns'[wp]: valid_pde_mappings'
(wp: crunch_wps simp: unless_def tcb_cte_cases_def)
crunches sendSignal
for ct'[wp]: "\<lambda>s. P (ksCurThread s)"
and it'[wp]: "\<lambda>s. P (ksIdleThread s)"
(wp: crunch_wps simp: crunch_simps o_def)
context
notes option.case_cong_weak[cong]
begin
crunches sendSignal, setBoundNotification
for irqs_masked'[wp]: "irqs_masked'"
(wp: crunch_wps getObject_inv loadObject_default_inv
simp: crunch_simps unless_def o_def
rule: irqs_masked_lift)
end
lemma sts_running_valid_queues:
"runnable' st \<Longrightarrow> \<lbrace> Invariants_H.valid_queues \<rbrace> setThreadState st t \<lbrace>\<lambda>_. Invariants_H.valid_queues \<rbrace>"
by (wp sts_valid_queues, clarsimp)
lemma ct_in_state_activatable_imp_simple'[simp]:
"ct_in_state' activatable' s \<Longrightarrow> ct_in_state' simple' s"
apply (simp add: ct_in_state'_def)
apply (erule pred_tcb'_weakenE)
apply (case_tac st; simp)
done
lemma setThreadState_nonqueued_state_update:
"\<lbrace>\<lambda>s. invs' s \<and> st_tcb_at' simple' t s
\<and> st \<in> {Inactive, Running, Restart, IdleThreadState}
\<and> (st \<noteq> Inactive \<longrightarrow> ex_nonz_cap_to' t s)
\<and> (t = ksIdleThread s \<longrightarrow> idle' st)
\<and> (\<not> runnable' st \<longrightarrow> sch_act_simple s)
\<and> (\<not> runnable' st \<longrightarrow> (\<forall>p. t \<notin> set (ksReadyQueues s p)))\<rbrace>
setThreadState st t \<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre, wp valid_irq_node_lift
sts_valid_queues
setThreadState_ct_not_inQ)
apply (clarsimp simp: pred_tcb_at')
apply (rule conjI, fastforce simp: valid_tcb_state'_def)
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (drule bound_tcb_at_state_refs_ofD')
apply (rule conjI, fastforce)
apply clarsimp
apply (erule delta_sym_refs)
apply (fastforce split: if_split_asm)
apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def
split: if_split_asm)
done
lemma cteDeleteOne_reply_cap_to'[wp]:
"\<lbrace>ex_nonz_cap_to' p and
cte_wp_at' (\<lambda>c. isReplyCap (cteCap c)) slot\<rbrace>
cteDeleteOne slot
\<lbrace>\<lambda>rv. ex_nonz_cap_to' p\<rbrace>"
apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def)
apply (rule hoare_seq_ext [OF _ getCTE_sp])
apply (rule hoare_assume_pre)
apply (subgoal_tac "isReplyCap (cteCap cte)")
apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv
| clarsimp simp: finaliseCap_def isCap_simps | simp
| wp (once) hoare_drop_imps)+
apply (fastforce simp: cte_wp_at_ctes_of)
apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps)
done
crunches setupCallerCap, possibleSwitchTo, asUser, doIPCTransfer
for vms'[wp]: "valid_machine_state'"
(wp: crunch_wps simp: zipWithM_x_mapM_x)
crunch nonz_cap_to'[wp]: cancelSignal "ex_nonz_cap_to' p"
(wp: crunch_wps simp: crunch_simps)
lemma cancelIPC_nonz_cap_to'[wp]:
"\<lbrace>ex_nonz_cap_to' p\<rbrace> cancelIPC t \<lbrace>\<lambda>rv. ex_nonz_cap_to' p\<rbrace>"
apply (simp add: cancelIPC_def getThreadReplySlot_def Let_def
capHasProperty_def)
apply (wp threadSet_cap_to'
| wpc
| simp
| clarsimp elim!: cte_wp_at_weakenE'
| rule hoare_post_imp[where Q="\<lambda>rv. ex_nonz_cap_to' p"])+
done
crunches activateIdleThread, getThreadReplySlot, isFinalCapability
for nosch[wp]: "\<lambda>s. P (ksSchedulerAction s)"
(ignore: setNextPC simp: Let_def)
crunches setupCallerCap, asUser, setMRs, doIPCTransfer, possibleSwitchTo
for pspace_domain_valid[wp]: "pspace_domain_valid"
(wp: crunch_wps simp: zipWithM_x_mapM_x)
crunches setupCallerCap, doIPCTransfer, possibleSwitchTo
for ksDomScheduleIdx[wp]: "\<lambda>s. P (ksDomScheduleIdx s)"
(wp: crunch_wps simp: zipWithM_x_mapM)
lemma setThreadState_not_rct[wp]:
"\<lbrace>\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>
setThreadState st t
\<lbrace>\<lambda>_ s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>"
apply (simp add: setThreadState_def)
apply (wp)
apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp)
apply (simp)
apply (wp)+
apply simp
done
lemma cancelAllIPC_not_rct[wp]:
"\<lbrace>\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>
cancelAllIPC epptr
\<lbrace>\<lambda>_ s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>"
apply (simp add: cancelAllIPC_def)
apply (wp | wpc)+
apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp)
apply simp
apply (rule mapM_x_wp_inv)
apply (wp)+
apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp)
apply simp
apply (rule mapM_x_wp_inv)
apply (wp)+
apply (wp hoare_vcg_all_lift hoare_drop_imp)
apply (simp_all)
done
lemma cancelAllSignals_not_rct[wp]:
"\<lbrace>\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>
cancelAllSignals epptr
\<lbrace>\<lambda>_ s. ksSchedulerAction s \<noteq> ResumeCurrentThread \<rbrace>"
apply (simp add: cancelAllSignals_def)
apply (wp | wpc)+
apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp)
apply simp
apply (rule mapM_x_wp_inv)
apply (wp)+
apply (wp hoare_vcg_all_lift hoare_drop_imp)
apply (simp_all)
done
crunch not_rct[wp]: finaliseCapTrue_standin "\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread"
(simp: Let_def)
declare setEndpoint_ct' [wp]
lemma cancelIPC_ResumeCurrentThread_imp_notct[wp]:
"\<lbrace>\<lambda>s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>
cancelIPC t
\<lbrace>\<lambda>_ s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>"
(is "\<lbrace>?PRE t'\<rbrace> _ \<lbrace>_\<rbrace>")
proof -
have aipc: "\<And>t t' ntfn.
\<lbrace>\<lambda>s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>
cancelSignal t ntfn
\<lbrace>\<lambda>_ s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>"
apply (simp add: cancelSignal_def)
apply (wp)[1]
apply (wp hoare_convert_imp)+
apply (rule_tac P="\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread"
in hoare_weaken_pre)
apply (wpc)
apply (wp | simp)+
apply (wpc, wp+)
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply (wp)
apply simp
done
have cdo: "\<And>t t' slot.
\<lbrace>\<lambda>s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>
cteDeleteOne slot
\<lbrace>\<lambda>_ s. ksSchedulerAction s = ResumeCurrentThread \<longrightarrow> ksCurThread s \<noteq> t'\<rbrace>"
apply (simp add: cteDeleteOne_def unless_def split_def)
apply (wp)
apply (wp hoare_convert_imp)[1]
apply (wp)
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply (wp hoare_convert_imp | simp)+
done
show ?thesis
apply (simp add: cancelIPC_def Let_def)
apply (wp, wpc)
prefer 4 \<comment> \<open>state = Running\<close>
apply wp
prefer 7 \<comment> \<open>state = Restart\<close>
apply wp
apply (wp)+
apply (wp hoare_convert_imp)[1]
apply (wpc, wp+)
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply (wp cdo)+
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply ((wp aipc hoare_convert_imp)+)[6]
apply (wp)
apply (wp hoare_convert_imp)[1]
apply (wpc, wp+)
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply (wp)
apply (rule_tac Q="\<lambda>_. ?PRE t'" in hoare_post_imp, clarsimp)
apply (wp)
apply simp
done
qed
crunch nosch[wp]: setMRs "\<lambda>s. P (ksSchedulerAction s)"
lemma sai_invs'[wp]:
"\<lbrace>invs' and ex_nonz_cap_to' ntfnptr\<rbrace>
sendSignal ntfnptr badge \<lbrace>\<lambda>y. invs'\<rbrace>"
unfolding sendSignal_def
including no_pre
apply (rule hoare_seq_ext[OF _ get_ntfn_sp'])
apply (case_tac "ntfnObj nTFN", simp_all)
prefer 3
apply (rename_tac list)
apply (case_tac list,
simp_all split del: if_split
add: setMessageInfo_def)[1]
apply (rule hoare_pre)
apply (wp hoare_convert_imp [OF asUser_nosch]
hoare_convert_imp [OF setMRs_sch_act])+
apply (clarsimp simp:conj_comms)
apply (simp add: invs'_def valid_state'_def)
apply ((wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ
sts_valid_queues [where st="Structures_H.thread_state.Running", simplified]
set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb'
hoare_convert_imp [OF setNotification_nosch]
| simp split del: if_split)+)[3]
apply (intro conjI[rotated];
(solves \<open>clarsimp simp: invs'_def valid_state'_def valid_pspace'_def\<close>)?)
apply clarsimp
apply (clarsimp simp: invs'_def valid_state'_def split del: if_split)
apply (drule(1) ct_not_in_ntfnQueue, simp+)
apply clarsimp
apply (frule ko_at_valid_objs', clarsimp)
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def
split: list.splits)
apply (clarsimp simp: invs'_def valid_state'_def)
apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_idle'_def pred_tcb_at'_def idle_tcb'_def
dest!: sym_refs_ko_atD' sym_refs_st_tcb_atD' sym_refs_obj_atD'
split: list.splits)
apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def)
apply (frule(1) ko_at_valid_objs')
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def
split: list.splits option.splits)
apply (clarsimp elim!: if_live_then_nonz_capE' simp:invs'_def valid_state'_def)
apply (drule(1) sym_refs_ko_atD')
apply (clarsimp elim!: ko_wp_at'_weakenE
intro!: refs_of_live')
apply (clarsimp split del: if_split)+
apply (frule ko_at_valid_objs', clarsimp)
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split)
apply (frule invs_sym')
apply (drule(1) sym_refs_obj_atD')
apply (clarsimp split del: if_split cong: if_cong
simp: st_tcb_at_refs_of_rev' ep_redux_simps' ntfn_bound_refs'_def)
apply (frule st_tcb_at_state_refs_ofD')
apply (erule delta_sym_refs)
apply (fastforce simp: split: if_split_asm)
apply (fastforce simp: tcb_bound_refs'_def set_eq_subset symreftype_inverse'
split: if_split_asm)
apply (clarsimp simp:invs'_def)
apply (frule ko_at_valid_objs')
apply (clarsimp simp: valid_pspace'_def valid_state'_def)
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split)
apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def)
apply (frule(1) ko_at_valid_objs')
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def
split: list.splits option.splits)
apply (case_tac "ntfnBoundTCB nTFN", simp_all)
apply (wp set_ntfn_minor_invs')
apply (fastforce simp: valid_ntfn'_def invs'_def valid_state'_def
elim!: obj_at'_weakenE
dest!: global'_no_ex_cap)
apply (wp add: hoare_convert_imp [OF asUser_nosch]
hoare_convert_imp [OF setMRs_sch_act]
setThreadState_nonqueued_state_update sts_st_tcb'
del: cancelIPC_simple)
apply (clarsimp | wp cancelIPC_ct')+
apply (wp set_ntfn_minor_invs' gts_wp' | clarsimp)+
apply (frule pred_tcb_at')
by (wp set_ntfn_minor_invs'
| rule conjI
| clarsimp elim!: st_tcb_ex_cap''
| fastforce simp: receiveBlocked_def projectKOs pred_tcb_at'_def obj_at'_def
dest!: invs_rct_ct_activatable'
split: thread_state.splits
| fastforce simp: invs'_def valid_state'_def receiveBlocked_def projectKOs
valid_obj'_def valid_ntfn'_def
split: thread_state.splits
dest!: global'_no_ex_cap st_tcb_ex_cap'' ko_at_valid_objs')+
lemma replyFromKernel_corres:
"corres dc (tcb_at t and invs) (tcb_at' t and invs')
(reply_from_kernel t r) (replyFromKernel t r)"
apply (case_tac r)
apply (clarsimp simp: replyFromKernel_def reply_from_kernel_def
badge_register_def badgeRegister_def)
apply (rule corres_guard_imp)
apply (rule corres_split_eqr[OF lookupIPCBuffer_corres])
apply (rule corres_split[OF asUser_setRegister_corres])
apply (rule corres_split_eqr[OF setMRs_corres])
apply simp
apply (rule setMessageInfo_corres)
apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at'
| clarsimp)+
done
lemma rfk_invs':
"\<lbrace>invs' and tcb_at' t\<rbrace> replyFromKernel t r \<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: replyFromKernel_def)
apply (cases r)
apply wpsimp
done
crunch nosch[wp]: replyFromKernel "\<lambda>s. P (ksSchedulerAction s)"
lemma completeSignal_corres:
"corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs
\<comment> \<open>and obj_at (\<lambda>ko. ko = Notification ntfn \<and> Ipc_A.isActive ntfn) ntfnptr*\<close> )
(ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr)
(complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)"
apply (simp add: complete_signal_def completeSignal_def)
apply (rule corres_guard_imp)
apply (rule_tac R'="\<lambda>ntfn. ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace'
and valid_ntfn' ntfn and (\<lambda>_. isActive ntfn)"
in corres_split[OF getNotification_corres])
apply (rule corres_gen_asm2)
apply (case_tac "ntfn_obj rv")
apply (clarsimp simp: ntfn_relation_def isActive_def
split: ntfn.splits Structures_H.notification.splits)+
apply (rule corres_guard2_imp)
apply (simp add: badgeRegister_def badge_register_def)
apply (rule corres_split[OF asUser_setRegister_corres setNotification_corres])
apply (clarsimp simp: ntfn_relation_def)
apply (wp set_simple_ko_valid_objs get_simple_ko_wp getNotification_wp | clarsimp simp: valid_ntfn'_def)+
apply (clarsimp simp: valid_pspace'_def)
apply (frule_tac P="(\<lambda>k. k = ntfn)" in obj_at_valid_objs', assumption)
apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def obj_at'_def)
done
lemma doNBRecvFailedTransfer_corres:
"corres dc (tcb_at thread)
(tcb_at' thread)
(do_nbrecv_failed_transfer thread)
(doNBRecvFailedTransfer thread)"
unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def
by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres)
lemma receiveIPC_corres:
assumes "is_ep_cap cap" and "cap_relation cap cap'"
shows "
corres dc (einvs and valid_sched and tcb_at thread and valid_cap cap and ex_nonz_cap_to thread
and cte_wp_at (\<lambda>c. c = cap.NullCap) (thread, tcb_cnode_index 3))
(invs' and tcb_at' thread and valid_cap' cap')
(receive_ipc thread cap isBlocking) (receiveIPC thread cap' isBlocking)"
apply (insert assms)
apply (simp add: receive_ipc_def receiveIPC_def
split del: if_split)
apply (case_tac cap, simp_all add: isEndpointCap_def)
apply (rename_tac word1 word2 right)
apply clarsimp
apply (rule corres_guard_imp)
apply (rule corres_split[OF getEndpoint_corres])
apply (rule corres_guard_imp)
apply (rule corres_split[OF getBoundNotification_corres])
apply (rule_tac r'="ntfn_relation" in corres_split)
apply (rule corres_option_split[rotated 2])
apply (rule getNotification_corres)
apply clarsimp
apply (rule corres_trivial, simp add: ntfn_relation_def default_notification_def
default_ntfn_def)
apply (rule corres_if)
apply (clarsimp simp: ntfn_relation_def Ipc_A.isActive_def Endpoint_H.isActive_def
split: Structures_A.ntfn.splits Structures_H.notification.splits)
apply clarsimp
apply (rule completeSignal_corres)
apply (rule_tac P="einvs and valid_sched and tcb_at thread and
ep_at word1 and valid_ep ep and
obj_at (\<lambda>k. k = Endpoint ep) word1
and cte_wp_at (\<lambda>c. c = cap.NullCap) (thread, tcb_cnode_index 3)
and ex_nonz_cap_to thread" and
P'="invs' and tcb_at' thread and ep_at' word1 and
valid_ep' epa"
in corres_inst)
apply (case_tac ep)
\<comment> \<open>IdleEP\<close>
apply (simp add: ep_relation_def)
apply (rule corres_guard_imp)
apply (case_tac isBlocking; simp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setEndpoint_corres)
apply (simp add: ep_relation_def)
apply wp+
apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp)
apply simp
apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def
valid_tcb_state_def st_tcb_at_tcb_at)
apply auto[1]
\<comment> \<open>SendEP\<close>
apply (simp add: ep_relation_def)
apply (rename_tac list)
apply (rule_tac F="list \<noteq> []" in corres_req)
apply (clarsimp simp: valid_ep_def)
apply (case_tac list, simp_all split del: if_split)[1]
apply (rule corres_guard_imp)
apply (rule corres_split[OF setEndpoint_corres])
apply (case_tac lista, simp_all add: ep_relation_def)[1]
apply (rule corres_split[OF getThreadState_corres])
apply (rule_tac
F="\<exists>data.
sender_state =
Structures_A.thread_state.BlockedOnSend word1 data"
in corres_gen_asm)
apply (clarsimp simp: isSend_def case_bool_If
case_option_If if3_fold
split del: if_split cong: if_cong)
apply (rule corres_split[OF doIPCTransfer_corres])
apply (simp split del: if_split cong: if_cong)
apply (fold dc_def)[1]
apply (rule_tac P="valid_objs and valid_mdb and valid_list
and valid_sched
and cur_tcb
and valid_reply_caps
and pspace_aligned and pspace_distinct
and st_tcb_at (Not \<circ> awaiting_reply) a
and st_tcb_at (Not \<circ> halted) a
and tcb_at thread and valid_reply_masters
and cte_wp_at (\<lambda>c. c = cap.NullCap)
(thread, tcb_cnode_index 3)"
and P'="tcb_at' a and tcb_at' thread and cur_tcb'
and Invariants_H.valid_queues
and valid_queues'
and valid_pspace'
and valid_objs'
and (\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s)"
in corres_guard_imp [OF corres_if])
apply (simp add: fault_rel_optionation_def)
apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres])
apply simp
apply simp
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule possibleSwitchTo_corres)
apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action
| simp)+
apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues'
setThreadState_st_tcb
| simp)+
apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def
valid_sched_action_def)
apply (clarsimp split: if_split_asm)
apply (clarsimp | wp do_ipc_transfer_tcb_caps)+
apply (rule_tac Q="\<lambda>_ s. sch_act_wf (ksSchedulerAction s) s"
in hoare_post_imp, erule sch_act_wf_weak)
apply (wp sts_st_tcb' gts_st_tcb_at | simp)+
apply (simp cong: list.case_cong)
apply wp
apply simp
apply (wp weak_sch_act_wf_lift_linear setEndpoint_valid_mdb' set_ep_valid_objs')
apply (clarsimp split: list.split)
apply (clarsimp simp add: invs_def valid_state_def st_tcb_at_tcb_at)
apply (clarsimp simp add: valid_ep_def valid_pspace_def)
apply (drule(1) sym_refs_obj_atD[where P="\<lambda>ko. ko = Endpoint e" for e])
apply (fastforce simp: st_tcb_at_refs_of_rev elim: st_tcb_weakenE)
apply (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split)[1]
\<comment> \<open>RecvEP\<close>
apply (simp add: ep_relation_def)
apply (rule_tac corres_guard_imp)
apply (case_tac isBlocking; simp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setEndpoint_corres)
apply (simp add: ep_relation_def)
apply wp+
apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp)
apply simp
apply (clarsimp simp: valid_tcb_state_def)
apply (clarsimp simp add: valid_tcb_state'_def)
apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp'
hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift
| wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+
apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def
valid_obj_def valid_tcb_def valid_bound_ntfn_def
dest!: invs_valid_objs
elim!: obj_at_valid_objsE
split: option.splits)
apply (auto simp: valid_cap'_def invs_valid_pspace' valid_obj'_def valid_tcb'_def
valid_bound_ntfn'_def obj_at'_def projectKOs pred_tcb_at'_def
dest!: invs_valid_objs' obj_at_valid_objs'
split: option.splits)
done
lemma receiveSignal_corres:
"\<lbrakk> is_ntfn_cap cap; cap_relation cap cap' \<rbrakk> \<Longrightarrow>
corres dc (invs and st_tcb_at active thread and valid_cap cap and ex_nonz_cap_to thread)
(invs' and tcb_at' thread and valid_cap' cap')
(receive_signal thread cap isBlocking) (receiveSignal thread cap' isBlocking)"
apply (simp add: receive_signal_def receiveSignal_def)
apply (case_tac cap, simp_all add: isEndpointCap_def)
apply (rename_tac word1 word2 rights)
apply (rule corres_guard_imp)
apply (rule_tac R="\<lambda>rv. invs and tcb_at thread and st_tcb_at active thread and
ntfn_at word1 and ex_nonz_cap_to thread and
valid_ntfn rv and
obj_at (\<lambda>k. k = Notification rv) word1" and
R'="\<lambda>rv'. invs' and tcb_at' thread and ntfn_at' word1 and
valid_ntfn' rv'"
in corres_split[OF getNotification_corres])
apply clarsimp
apply (case_tac "ntfn_obj rv")
\<comment> \<open>IdleNtfn\<close>
apply (simp add: ntfn_relation_def)
apply (rule corres_guard_imp)
apply (case_tac isBlocking; simp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setNotification_corres)
apply (simp add: ntfn_relation_def)
apply wp+
apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp+)
\<comment> \<open>WaitingNtfn\<close>
apply (simp add: ntfn_relation_def)
apply (rule corres_guard_imp)
apply (case_tac isBlocking; simp)
apply (rule corres_split[OF setThreadState_corres])
apply simp
apply (rule setNotification_corres)
apply (simp add: ntfn_relation_def)
apply wp+
apply (rule corres_guard_imp)
apply (rule doNBRecvFailedTransfer_corres, simp+)
\<comment> \<open>ActiveNtfn\<close>
apply (simp add: ntfn_relation_def)
apply (rule corres_guard_imp)
apply (simp add: badgeRegister_def badge_register_def)
apply (rule corres_split[OF asUser_setRegister_corres])
apply (rule setNotification_corres)
apply (simp add: ntfn_relation_def)
apply wp+
apply (fastforce simp: invs_def valid_state_def valid_pspace_def
elim!: st_tcb_weakenE)
apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def)
apply wp+
apply (clarsimp simp add: ntfn_at_def2 valid_cap_def st_tcb_at_tcb_at)
apply (clarsimp simp add: valid_cap'_def)
done
lemma tg_sp':
"\<lbrace>P\<rbrace> threadGet f p \<lbrace>\<lambda>t. obj_at' (\<lambda>t'. f t' = t) p and P\<rbrace>"
including no_pre
apply (simp add: threadGet_def)
apply wp
apply (rule hoare_strengthen_post)
apply (rule getObject_tcb_sp)
apply clarsimp
apply (erule obj_at'_weakenE)
apply simp
done
declare lookup_cap_valid' [wp]
lemma sendFaultIPC_corres:
"valid_fault f \<Longrightarrow> fr f f' \<Longrightarrow>
corres (fr \<oplus> dc)
(einvs and st_tcb_at active thread and ex_nonz_cap_to thread)
(invs' and sch_act_not thread and tcb_at' thread)
(send_fault_ipc thread f) (sendFaultIPC thread f')"
apply (simp add: send_fault_ipc_def sendFaultIPC_def
liftE_bindE Let_def)
apply (rule corres_guard_imp)
apply (rule corres_split [where r'="\<lambda>fh fh'. fh = to_bl fh'"])
apply (rule threadGet_corres)
apply (simp add: tcb_relation_def)
apply simp
apply (rule corres_splitEE)
apply (rule corres_cap_fault)
apply (rule lookup_cap_corres, rule refl)
apply (rule_tac P="einvs and st_tcb_at active thread
and valid_cap handler_cap and ex_nonz_cap_to thread"
and P'="invs' and tcb_at' thread and sch_act_not thread
and valid_cap' handlerCap"
in corres_inst)
apply (case_tac handler_cap,
simp_all add: isCap_defs lookup_failure_map_def
case_bool_If If_rearrage
split del: if_split cong: if_cong)[1]
apply (rule corres_guard_imp)
apply (rule corres_if2 [OF refl])
apply (simp add: dc_def[symmetric])
apply (rule corres_split[OF threadset_corres sendIPC_corres], simp_all)[1]
apply (simp add: tcb_relation_def fault_rel_optionation_def exst_same_def)+
apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state
thread_set_typ_at ep_at_typ_at ex_nonz_cap_to_pres
thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched
| simp add: tcb_cap_cases_def)+
apply ((wp threadSet_invs_trivial threadSet_tcb'
| simp add: tcb_cte_cases_def
| wp (once) sch_act_sane_lift)+)[1]
apply (rule corres_trivial, simp add: lookup_failure_map_def)
apply (clarsimp simp: st_tcb_at_tcb_at split: if_split)
apply (simp add: valid_cap_def)
apply (clarsimp simp: valid_cap'_def inQ_def)
apply auto[1]
apply (clarsimp simp: lookup_failure_map_def)
apply wp+
apply (fastforce elim: st_tcb_at_tcb_at)
apply fastforce
done
lemma gets_the_noop_corres:
assumes P: "\<And>s. P s \<Longrightarrow> f s \<noteq> None"
shows "corres dc P P' (gets_the f) (return x)"
apply (clarsimp simp: corres_underlying_def gets_the_def
return_def gets_def bind_def get_def)
apply (clarsimp simp: assert_opt_def return_def dest!: P)
done
lemma handleDoubleFault_corres:
"corres dc (tcb_at thread)
(tcb_at' thread and (\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s))
(handle_double_fault thread f ft)
(handleDoubleFault thread f' ft')"
apply (simp add: handle_double_fault_def handleDoubleFault_def)
apply (rule corres_guard_imp)
apply (subst bind_return [symmetric],
rule corres_underlying_split [OF setThreadState_corres])
apply simp
apply (rule corres_noop2)
apply (simp add: exs_valid_def return_def)
apply (rule hoare_eq_P)
apply wp
apply (rule asUser_inv)
apply (rule getRestartPC_inv)
apply (wp no_fail_getRestartPC)+
apply (wp|simp)+
done
crunch tcb' [wp]: sendFaultIPC "tcb_at' t" (wp: crunch_wps)
crunch typ_at'[wp]: receiveIPC "\<lambda>s. P (typ_at' T p s)"
(wp: crunch_wps)
lemmas receiveIPC_typ_ats[wp] = typ_at_lifts [OF receiveIPC_typ_at']
crunch typ_at'[wp]: receiveSignal "\<lambda>s. P (typ_at' T p s)"
(wp: crunch_wps)
lemmas receiveAIPC_typ_ats[wp] = typ_at_lifts [OF receiveSignal_typ_at']
declare cart_singleton_empty[simp]
declare cart_singleton_empty2[simp]
crunch aligned'[wp]: setupCallerCap "pspace_aligned'"
(wp: crunch_wps)
crunch distinct'[wp]: setupCallerCap "pspace_distinct'"
(wp: crunch_wps)
crunch cur_tcb[wp]: setupCallerCap "cur_tcb'"
(wp: crunch_wps)
lemma setupCallerCap_state_refs_of[wp]:
"\<lbrace>\<lambda>s. P ((state_refs_of' s) (sender := {r \<in> state_refs_of' s sender. snd r = TCBBound}))\<rbrace>
setupCallerCap sender rcvr grant
\<lbrace>\<lambda>rv s. P (state_refs_of' s)\<rbrace>"
apply (simp add: setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def)
apply (wp hoare_drop_imps)
apply (simp add: fun_upd_def cong: if_cong)
done
crunch sch_act_wf: setupCallerCap
"\<lambda>s. sch_act_wf (ksSchedulerAction s) s"
(wp: crunch_wps ssa_sch_act sts_sch_act rule: sch_act_wf_lift)
lemma setCTE_valid_queues[wp]:
"\<lbrace>Invariants_H.valid_queues\<rbrace> setCTE ptr val \<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
by (wp valid_queues_lift setCTE_pred_tcb_at')
crunch vq[wp]: cteInsert "Invariants_H.valid_queues"
(wp: crunch_wps)
crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues"
(wp: crunch_wps)
crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues"
(wp: crunch_wps)
lemma setupCallerCap_vq[wp]:
"\<lbrace>Invariants_H.valid_queues and (\<lambda>s. \<forall>p. send \<notin> set (ksReadyQueues s p))\<rbrace>
setupCallerCap send recv grant \<lbrace>\<lambda>_. Invariants_H.valid_queues\<rbrace>"
apply (simp add: setupCallerCap_def)
apply (wp crunch_wps sts_valid_queues)
apply (fastforce simp: valid_queues_def obj_at'_def inQ_def)
done
crunch vq'[wp]: setupCallerCap "valid_queues'"
(wp: crunch_wps)
lemma is_derived_ReplyCap' [simp]:
"\<And>m p g. is_derived' m p (capability.ReplyCap t False g) =
(\<lambda>c. \<exists> g. c = capability.ReplyCap t True g)"
apply (subst fun_eq_iff)
apply clarsimp
apply (case_tac x, simp_all add: is_derived'_def isCap_simps
badge_derived'_def
vsCapRef_def)
done
lemma unique_master_reply_cap':
"\<And>c t. isReplyCap c \<and> capReplyMaster c \<and> capTCBPtr c = t \<longleftrightarrow>
(\<exists>g . c = capability.ReplyCap t True g)"
by (fastforce simp: isCap_simps conj_comms)
lemma getSlotCap_cte_wp_at:
"\<lbrace>\<top>\<rbrace> getSlotCap sl \<lbrace>\<lambda>rv. cte_wp_at' (\<lambda>c. cteCap c = rv) sl\<rbrace>"
apply (simp add: getSlotCap_def)
apply (wp getCTE_wp)
apply (clarsimp simp: cte_wp_at_ctes_of)
done
crunch no_0_obj'[wp]: setThreadState no_0_obj'
lemma setupCallerCap_vp[wp]:
"\<lbrace>valid_pspace' and tcb_at' sender and tcb_at' rcvr\<rbrace>
setupCallerCap sender rcvr grant \<lbrace>\<lambda>rv. valid_pspace'\<rbrace>"
apply (simp add: valid_pspace'_def setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv getSlotCap_def)
apply (wp getCTE_wp)
apply (rule_tac Q="\<lambda>_. valid_pspace' and
tcb_at' sender and tcb_at' rcvr"
in hoare_post_imp)
apply (clarsimp simp: valid_cap'_def o_def cte_wp_at_ctes_of isCap_simps
valid_pspace'_def)
apply (frule(1) ctes_of_valid', simp add: valid_cap'_def capAligned_def)
apply clarsimp
apply (wp | simp add: valid_pspace'_def valid_tcb_state'_def)+
done
declare haskell_assert_inv[wp del]
lemma setupCallerCap_iflive[wp]:
"\<lbrace>if_live_then_nonz_cap' and ex_nonz_cap_to' sender\<rbrace>
setupCallerCap sender rcvr grant
\<lbrace>\<lambda>rv. if_live_then_nonz_cap'\<rbrace>"
unfolding setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv
by (wp getSlotCap_cte_wp_at
| simp add: unique_master_reply_cap'
| strengthen eq_imp_strg
| wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+
lemma setupCallerCap_ifunsafe[wp]:
"\<lbrace>if_unsafe_then_cap' and valid_objs' and
ex_nonz_cap_to' rcvr and tcb_at' rcvr\<rbrace>
setupCallerCap sender rcvr grant
\<lbrace>\<lambda>rv. if_unsafe_then_cap'\<rbrace>"
unfolding setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv
apply (wp getSlotCap_cte_wp_at
| simp add: unique_master_reply_cap' | strengthen eq_imp_strg
| wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+
apply (rule_tac Q="\<lambda>rv. valid_objs' and tcb_at' rcvr and ex_nonz_cap_to' rcvr"
in hoare_post_imp)
apply (clarsimp simp: ex_nonz_tcb_cte_caps' tcbCallerSlot_def
objBits_def objBitsKO_def dom_def cte_level_bits_def)
apply (wp sts_valid_objs' | simp)+
apply (clarsimp simp: valid_tcb_state'_def)+
done
lemma setupCallerCap_global_refs'[wp]:
"\<lbrace>valid_global_refs'\<rbrace>
setupCallerCap sender rcvr grant
\<lbrace>\<lambda>rv. valid_global_refs'\<rbrace>"
unfolding setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv
apply (wp getSlotCap_cte_wp_at
| simp add: o_def unique_master_reply_cap'
| strengthen eq_imp_strg
| wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+
(* at setThreadState *)
apply (rule_tac Q="\<lambda>_. valid_global_refs'" in hoare_post_imp, wpsimp+)
done
crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'"
(wp: hoare_drop_imps)
crunch typ'[wp]: setupCallerCap "\<lambda>s. P (typ_at' T p s)"
crunch irq_node'[wp]: setupCallerCap "\<lambda>s. P (irq_node' s)"
(wp: hoare_drop_imps)
lemma setupCallerCap_irq_handlers'[wp]:
"\<lbrace>valid_irq_handlers'\<rbrace>
setupCallerCap sender rcvr grant
\<lbrace>\<lambda>rv. valid_irq_handlers'\<rbrace>"
unfolding setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def locateSlot_conv
by (wp hoare_drop_imps | simp)+
lemma cteInsert_cap_to':
"\<lbrace>ex_nonz_cap_to' p and cte_wp_at' (\<lambda>c. cteCap c = NullCap) dest\<rbrace>
cteInsert cap src dest
\<lbrace>\<lambda>rv. ex_nonz_cap_to' p\<rbrace>"
apply (simp add: cteInsert_def ex_nonz_cap_to'_def
updateCap_def setUntypedCapAsFull_def
split del: if_split)
apply (rule hoare_pre, rule hoare_vcg_ex_lift)
apply (wp updateMDB_weak_cte_wp_at
setCTE_weak_cte_wp_at
| simp
| rule hoare_drop_imps)+
apply (wp getCTE_wp)
apply clarsimp
apply (rule_tac x=cref in exI)
apply (rule conjI)
apply (clarsimp simp: cte_wp_at_ctes_of)+
done
crunch cap_to'[wp]: setExtraBadge "ex_nonz_cap_to' p"
crunch cap_to'[wp]: doIPCTransfer "ex_nonz_cap_to' p"
(ignore: transferCapsToSlots
wp: crunch_wps transferCapsToSlots_pres2 cteInsert_cap_to' hoare_vcg_const_Ball_lift
simp: zipWithM_x_mapM ball_conj_distrib)
lemma st_tcb_idle':
"\<lbrakk>valid_idle' s; st_tcb_at' P t s\<rbrakk> \<Longrightarrow>
(t = ksIdleThread s) \<longrightarrow> P IdleThreadState"
by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def)
crunch idle'[wp]: getThreadCallerSlot "valid_idle'"
crunch idle'[wp]: getThreadReplySlot "valid_idle'"
crunch it[wp]: setupCallerCap "\<lambda>s. P (ksIdleThread s)"
(simp: updateObject_cte_inv wp: crunch_wps)
lemma setupCallerCap_idle'[wp]:
"\<lbrace>valid_idle' and valid_pspace' and
(\<lambda>s. st \<noteq> ksIdleThread s \<and> rt \<noteq> ksIdleThread s)\<rbrace>
setupCallerCap st rt gr
\<lbrace>\<lambda>_. valid_idle'\<rbrace>"
by (simp add: setupCallerCap_def capRange_def | wp hoare_drop_imps)+
crunch idle'[wp]: doIPCTransfer "valid_idle'"
(wp: crunch_wps simp: crunch_simps ignore: transferCapsToSlots)
crunch it[wp]: setExtraBadge "\<lambda>s. P (ksIdleThread s)"
crunch it[wp]: receiveIPC "\<lambda>s. P (ksIdleThread s)"
(ignore: transferCapsToSlots
wp: transferCapsToSlots_pres2 crunch_wps hoare_vcg_const_Ball_lift
simp: crunch_simps ball_conj_distrib)
crunch irq_states' [wp]: setupCallerCap valid_irq_states'
(wp: crunch_wps)
crunch pde_mappings' [wp]: setupCallerCap valid_pde_mappings'
(wp: crunch_wps)
crunch irqs_masked' [wp]: receiveIPC "irqs_masked'"
(wp: crunch_wps rule: irqs_masked_lift)
crunch ct_not_inQ[wp]: getThreadCallerSlot "ct_not_inQ"
crunch ct_not_inQ[wp]: getThreadReplySlot "ct_not_inQ"
lemma setupCallerCap_ct_not_inQ[wp]:
"\<lbrace>ct_not_inQ\<rbrace> setupCallerCap sender receiver grant \<lbrace>\<lambda>_. ct_not_inQ\<rbrace>"
apply (simp add: setupCallerCap_def)
apply (wp hoare_drop_imp setThreadState_ct_not_inQ)
done
crunch ksQ'[wp]: copyMRs "\<lambda>s. P (ksReadyQueues s)"
(wp: mapM_wp' hoare_drop_imps simp: crunch_simps)
crunch ksQ[wp]: doIPCTransfer "\<lambda>s. P (ksReadyQueues s)"
(wp: hoare_drop_imps hoare_vcg_split_case_option
mapM_wp'
simp: split_def zipWithM_x_mapM)
crunch ct'[wp]: doIPCTransfer "\<lambda>s. P (ksCurThread s)"
(wp: hoare_drop_imps hoare_vcg_split_case_option
mapM_wp'
simp: split_def zipWithM_x_mapM)
lemma asUser_ct_not_inQ[wp]:
"\<lbrace>ct_not_inQ\<rbrace> asUser t m \<lbrace>\<lambda>rv. ct_not_inQ\<rbrace>"
apply (simp add: asUser_def split_def)
apply (wp hoare_drop_imps threadSet_not_inQ | simp)+
done
crunch ct_not_inQ[wp]: copyMRs "ct_not_inQ"
(wp: mapM_wp' hoare_drop_imps simp: crunch_simps)
crunch ct_not_inQ[wp]: doIPCTransfer "ct_not_inQ"
(ignore: getRestartPC setRegister transferCapsToSlots
wp: hoare_drop_imps hoare_vcg_split_case_option
mapM_wp'
simp: split_def zipWithM_x_mapM)
lemma ntfn_q_refs_no_bound_refs': "rf : ntfn_q_refs_of' (ntfnObj ob) \<Longrightarrow> rf ~: ntfn_bound_refs' (ntfnBoundTCB ob')"
by (auto simp add: ntfn_q_refs_of'_def ntfn_bound_refs'_def
split: Structures_H.ntfn.splits)
lemma completeSignal_invs:
"\<lbrace>invs' and tcb_at' tcb\<rbrace>
completeSignal ntfnptr tcb
\<lbrace>\<lambda>_. invs'\<rbrace>"
apply (simp add: completeSignal_def)
apply (rule hoare_seq_ext[OF _ get_ntfn_sp'])
apply (rule hoare_pre)
apply (wp set_ntfn_minor_invs' | wpc | simp)+
apply (rule_tac Q="\<lambda>_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn))
\<and> ntfn_at' ntfnptr s
\<and> valid_ntfn' (ntfnObj_update (\<lambda>_. Structures_H.ntfn.IdleNtfn) ntfn) s
\<and> ((\<exists>y. ntfnBoundTCB ntfn = Some y) \<longrightarrow> ex_nonz_cap_to' ntfnptr s)
\<and> ntfnptr \<noteq> ksIdleThread s"
in hoare_strengthen_post)
apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1]
apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def projectKOs split: option.splits)
apply (blast dest: ntfn_q_refs_no_bound_refs')
apply wp
apply (subgoal_tac "valid_ntfn' ntfn s")
apply (subgoal_tac "ntfnptr \<noteq> ksIdleThread s")
apply (fastforce simp: valid_ntfn'_def valid_bound_tcb'_def projectKOs ko_at_state_refs_ofD'
elim: obj_at'_weakenE
if_live_then_nonz_capD'[OF invs_iflive'
obj_at'_real_def[THEN meta_eq_to_obj_eq,
THEN iffD1]])
apply (fastforce simp: valid_idle'_def pred_tcb_at'_def obj_at'_def projectKOs
dest!: invs_valid_idle')
apply (fastforce dest: invs_valid_objs' ko_at_valid_objs'
simp: valid_obj'_def projectKOs)[1]
done
lemma setupCallerCap_urz[wp]:
"\<lbrace>untyped_ranges_zero' and valid_pspace' and tcb_at' sender\<rbrace>
setupCallerCap sender t g \<lbrace>\<lambda>rv. untyped_ranges_zero'\<rbrace>"
apply (simp add: setupCallerCap_def getSlotCap_def
getThreadCallerSlot_def getThreadReplySlot_def
locateSlot_conv)
apply (wp getCTE_wp')
apply (rule_tac Q="\<lambda>_. untyped_ranges_zero' and valid_mdb' and valid_objs'" in hoare_post_imp)
apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def untyped_derived_eq_def
isCap_simps)
apply (wp sts_valid_pspace_hangers)
apply (clarsimp simp: valid_tcb_state'_def)
done
lemmas threadSet_urz = untyped_ranges_zero_lift[where f="cteCaps_of", OF _ threadSet_cteCaps_of]
crunch urz[wp]: doIPCTransfer "untyped_ranges_zero'"
(ignore: threadSet wp: threadSet_urz crunch_wps simp: zipWithM_x_mapM)
crunch gsUntypedZeroRanges[wp]: receiveIPC "\<lambda>s. P (gsUntypedZeroRanges s)"
(wp: crunch_wps transferCapsToSlots_pres1 simp: zipWithM_x_mapM ignore: constOnFailure)
crunch ctes_of[wp]: possibleSwitchTo "\<lambda>s. P (ctes_of s)"
(wp: crunch_wps ignore: constOnFailure)
lemmas possibleSwitchToTo_cteCaps_of[wp]
= cteCaps_of_ctes_of_lift[OF possibleSwitchTo_ctes_of]
(* t = ksCurThread s *)
lemma ri_invs' [wp]:
"\<lbrace>invs' and sch_act_not t
and ct_in_state' simple'
and st_tcb_at' simple' t
and (\<lambda>s. \<forall>p. t \<notin> set (ksReadyQueues s p))
and ex_nonz_cap_to' t
and (\<lambda>s. \<forall>r \<in> zobj_refs' cap. ex_nonz_cap_to' r s)\<rbrace>
receiveIPC t cap isBlocking
\<lbrace>\<lambda>_. invs'\<rbrace>" (is "\<lbrace>?pre\<rbrace> _ \<lbrace>_\<rbrace>")
apply (clarsimp simp: receiveIPC_def)
apply (rule hoare_seq_ext [OF _ get_ep_sp'])
apply (rule hoare_seq_ext [OF _ gbn_sp'])
apply (rule hoare_seq_ext)
(* set up precondition for old proof *)
apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split)
apply (wp completeSignal_invs)
apply (case_tac ep)
\<comment> \<open>endpoint = RecvEP\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre, wpc, wp valid_irq_node_lift)
apply (simp add: valid_ep'_def)
apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift
sts_valid_queues setThreadState_ct_not_inQ
asUser_urz
| simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+
apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def)
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (frule obj_at_valid_objs')
apply (clarsimp simp: valid_pspace'_def)
apply (drule(1) sym_refs_ko_atD')
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (drule bound_tcb_at_state_refs_ofD')
apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_ep'_def
valid_obj'_def projectKOs tcb_bound_refs'_def
dest!: isCapDs)
apply (rule conjI, clarsimp)
apply (drule (1) bspec)
apply (clarsimp dest!: st_tcb_at_state_refs_ofD')
apply (clarsimp simp: set_eq_subset)
apply (rule conjI, erule delta_sym_refs)
apply (clarsimp split: if_split_asm)
apply (rename_tac list one two three fur five six seven eight nine ten eleven)
apply (subgoal_tac "set list \<times> {EPRecv} \<noteq> {}")
apply (thin_tac "\<forall>a b. t \<notin> set (ksReadyQueues one (a, b))") \<comment> \<open>causes slowdown\<close>
apply (safe ; solves \<open>auto\<close>)
apply fastforce
apply fastforce
apply (clarsimp split: if_split_asm)
apply (fastforce simp: valid_pspace'_def global'_no_ex_cap idle'_not_queued)
\<comment> \<open>endpoint = IdleEP\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre, wpc, wp valid_irq_node_lift)
apply (simp add: valid_ep'_def)
apply (wp sts_sch_act' valid_irq_node_lift
sts_valid_queues setThreadState_ct_not_inQ
asUser_urz
| simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+
apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def)
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (subgoal_tac "t \<noteq> capEPPtr cap")
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (drule ko_at_state_refs_ofD')
apply (drule bound_tcb_at_state_refs_ofD')
apply (clarsimp dest!: isCapDs)
apply (rule conjI, erule delta_sym_refs)
apply (clarsimp split: if_split_asm)
apply (clarsimp simp: tcb_bound_refs'_def
dest: symreftype_inverse'
split: if_split_asm)
apply (fastforce simp: global'_no_ex_cap)
apply (clarsimp simp: obj_at'_def pred_tcb_at'_def projectKOs)
\<comment> \<open>endpoint = SendEP\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rename_tac list)
apply (case_tac list, simp_all split del: if_split)
apply (rename_tac sender queue)
apply (rule hoare_pre)
apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb'
set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues
setThreadState_ct_not_inQ possibleSwitchTo_valid_queues
possibleSwitchTo_valid_queues'
possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift
setEndpoint_ksQ setEndpoint_ct'
| simp add: valid_tcb_state'_def case_bool_If
case_option_If
split del: if_split cong: if_cong
| wp (once) sch_act_sane_lift hoare_vcg_conj_lift hoare_vcg_all_lift
untyped_ranges_zero_lift)+
apply (clarsimp split del: if_split simp: pred_tcb_at')
apply (frule obj_at_valid_objs')
apply (clarsimp simp: valid_pspace'_def)
apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp)
apply (drule(1) sym_refs_ko_atD')
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (clarsimp simp: projectKOs valid_obj'_def valid_ep'_def
st_tcb_at_refs_of_rev' conj_ac
split del: if_split
cong: if_cong)
apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ)
apply (erule pred_tcb'_weakenE, clarsimp)
apply (subgoal_tac "sch_act_not sender s")
prefer 2
apply (clarsimp simp: pred_tcb_at'_def obj_at'_def)
apply (drule st_tcb_at_state_refs_ofD')
apply (simp only: conj_ac(1, 2)[where Q="sym_refs R" for R])
apply (subgoal_tac "distinct (ksIdleThread s # capEPPtr cap # t # sender # queue)")
apply (rule conjI)
apply (clarsimp simp: ep_redux_simps' cong: if_cong)
apply (erule delta_sym_refs)
apply (clarsimp split: if_split_asm)
apply (fastforce simp: tcb_bound_refs'_def
dest: symreftype_inverse'
split: if_split_asm)
apply (clarsimp simp: singleton_tuple_cartesian split: list.split
| rule conjI | drule(1) bspec
| drule st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD'
| clarsimp elim!: if_live_state_refsE)+
apply (case_tac cap, simp_all add: isEndpointCap_def)
apply (clarsimp simp: global'_no_ex_cap)
apply (rule conjI
| clarsimp simp: singleton_tuple_cartesian split: list.split
| clarsimp elim!: if_live_state_refsE
| clarsimp simp: global'_no_ex_cap idle'_not_queued' idle'_no_refs tcb_bound_refs'_def
| drule(1) bspec | drule st_tcb_at_state_refs_ofD'
| clarsimp simp: set_eq_subset dest!: bound_tcb_at_state_refs_ofD' )+
apply (rule hoare_pre)
apply (wp getNotification_wp | wpc | clarsimp)+
done
(* t = ksCurThread s *)
lemma rai_invs'[wp]:
"\<lbrace>invs' and sch_act_not t
and st_tcb_at' simple' t
and (\<lambda>s. \<forall>p. t \<notin> set (ksReadyQueues s p))
and ex_nonz_cap_to' t
and (\<lambda>s. \<forall>r \<in> zobj_refs' cap. ex_nonz_cap_to' r s)
and (\<lambda>s. \<exists>ntfnptr. isNotificationCap cap
\<and> capNtfnPtr cap = ntfnptr
\<and> obj_at' (\<lambda>ko. ntfnBoundTCB ko = None \<or> ntfnBoundTCB ko = Some t)
ntfnptr s)\<rbrace>
receiveSignal t cap isBlocking
\<lbrace>\<lambda>_. invs'\<rbrace>"
apply (simp add: receiveSignal_def)
apply (rule hoare_seq_ext [OF _ get_ntfn_sp'])
apply (rename_tac ep)
apply (case_tac "ntfnObj ep")
\<comment> \<open>ep = IdleNtfn\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre)
apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts
sts_valid_queues setThreadState_ct_not_inQ
asUser_urz
| simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+
apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def)
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (subgoal_tac "capNtfnPtr cap \<noteq> t")
apply (frule valid_pspace_valid_objs')
apply (frule (1) ko_at_valid_objs')
apply (clarsimp simp: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def)
apply (rule conjI, clarsimp simp: obj_at'_def split: option.split)
apply (drule simple_st_tcb_at_state_refs_ofD'
ko_at_state_refs_ofD' bound_tcb_at_state_refs_ofD')+
apply (clarsimp dest!: isCapDs)
apply (rule conjI, erule delta_sym_refs)
apply (clarsimp split: if_split_asm)
apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse'
split: if_split_asm)
apply (clarsimp dest!: global'_no_ex_cap)
apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs)
\<comment> \<open>ep = ActiveNtfn\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre)
apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp
asUser_urz
| simp add: valid_ntfn'_def)+
apply (clarsimp simp: pred_tcb_at' valid_pspace'_def)
apply (frule (1) ko_at_valid_objs')
apply (clarsimp simp: projectKOs)
apply (clarsimp simp: valid_obj'_def valid_ntfn'_def isCap_simps)
apply (drule simple_st_tcb_at_state_refs_ofD'
ko_at_state_refs_ofD')+
apply (erule delta_sym_refs)
apply (clarsimp split: if_split_asm simp: global'_no_ex_cap)+
\<comment> \<open>ep = WaitingNtfn\<close>
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre)
apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act'
sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts
asUser_urz
| simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+
apply (clarsimp simp: valid_tcb_state'_def)
apply (frule_tac t=t in not_in_ntfnQueue)
apply (simp)
apply (simp)
apply (erule pred_tcb'_weakenE, clarsimp)
apply (frule ko_at_valid_objs')
apply (clarsimp simp: valid_pspace'_def)
apply (simp add: projectKOs)
apply (clarsimp simp: valid_obj'_def)
apply (clarsimp simp: valid_ntfn'_def pred_tcb_at')
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (rule conjI, clarsimp simp: obj_at'_def split: option.split)
apply (drule(1) sym_refs_ko_atD')
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (drule bound_tcb_at_state_refs_ofD')
apply (clarsimp simp: st_tcb_at_refs_of_rev'
dest!: isCapDs)
apply (rule conjI, erule delta_sym_refs)
apply (clarsimp split: if_split_asm)
apply (rename_tac list one two three four five six seven eight nine)
apply (subgoal_tac "set list \<times> {NTFNSignal} \<noteq> {}")
apply safe[1]
apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5]
apply (fastforce simp: tcb_bound_refs'_def
split: if_split_asm)
apply (clarsimp dest!: global'_no_ex_cap)
done
lemma getCTE_cap_to_refs[wp]:
"\<lbrace>\<top>\<rbrace> getCTE p \<lbrace>\<lambda>rv s. \<forall>r\<in>zobj_refs' (cteCap rv). ex_nonz_cap_to' r s\<rbrace>"
apply (rule hoare_strengthen_post [OF getCTE_sp])
apply (clarsimp simp: ex_nonz_cap_to'_def)
apply (fastforce elim: cte_wp_at_weakenE')
done
lemma lookupCap_cap_to_refs[wp]:
"\<lbrace>\<top>\<rbrace> lookupCap t cref \<lbrace>\<lambda>rv s. \<forall>r\<in>zobj_refs' rv. ex_nonz_cap_to' r s\<rbrace>,-"
apply (simp add: lookupCap_def lookupCapAndSlot_def split_def
getSlotCap_def)
apply (wp | simp)+
done
lemma arch_stt_objs' [wp]:
"\<lbrace>valid_objs'\<rbrace> Arch.switchToThread t \<lbrace>\<lambda>rv. valid_objs'\<rbrace>"
apply (simp add: ARM_H.switchToThread_def)
apply wp
done
declare zipWithM_x_mapM [simp]
lemma cteInsert_invs_bits[wp]:
"\<lbrace>\<lambda>s. sch_act_wf (ksSchedulerAction s) s\<rbrace>
cteInsert a b c
\<lbrace>\<lambda>rv s. sch_act_wf (ksSchedulerAction s) s\<rbrace>"
"\<lbrace>Invariants_H.valid_queues\<rbrace> cteInsert a b c \<lbrace>\<lambda>rv. Invariants_H.valid_queues\<rbrace>"
"\<lbrace>cur_tcb'\<rbrace> cteInsert a b c \<lbrace>\<lambda>rv. cur_tcb'\<rbrace>"
"\<lbrace>\<lambda>s. P (state_refs_of' s)\<rbrace>
cteInsert a b c
\<lbrace>\<lambda>rv s. P (state_refs_of' s)\<rbrace>"
apply (wp sch_act_wf_lift valid_queues_lift
cur_tcb_lift tcb_in_cur_domain'_lift)+
done
lemma possibleSwitchTo_sch_act_not:
"\<lbrace>sch_act_not t' and K (t \<noteq> t')\<rbrace> possibleSwitchTo t \<lbrace>\<lambda>rv. sch_act_not t'\<rbrace>"
apply (simp add: possibleSwitchTo_def setSchedulerAction_def curDomain_def)
apply (wp hoare_drop_imps | wpc | simp)+
done
crunch vms'[wp]: possibleSwitchTo valid_machine_state'
crunch pspace_domain_valid[wp]: possibleSwitchTo pspace_domain_valid
crunch ct_idle_or_in_cur_domain'[wp]: possibleSwitchTo ct_idle_or_in_cur_domain'
crunch ct'[wp]: possibleSwitchTo "\<lambda>s. P (ksCurThread s)"
crunch it[wp]: possibleSwitchTo "\<lambda>s. P (ksIdleThread s)"
crunch irqs_masked'[wp]: possibleSwitchTo "irqs_masked'"
crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'"
(simp: crunch_simps unless_def wp: crunch_wps)
lemma si_invs'[wp]:
"\<lbrace>invs' and st_tcb_at' simple' t
and (\<lambda>s. \<forall>p. t \<notin> set (ksReadyQueues s p))
and sch_act_not t
and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\<rbrace>
sendIPC bl call ba cg cgr t ep
\<lbrace>\<lambda>rv. invs'\<rbrace>"
supply if_split[split del]
apply (simp add: sendIPC_def split del: if_split)
apply (rule hoare_seq_ext [OF _ get_ep_sp'])
apply (case_tac epa)
\<comment> \<open>epa = RecvEP\<close>
apply simp
apply (rename_tac list)
apply (case_tac list)
apply simp
apply (simp split del: if_split add: invs'_def valid_state'_def)
apply (rule hoare_pre)
apply (rule_tac P="a\<noteq>t" in hoare_gen_asm)
apply (wp valid_irq_node_lift
sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act'
possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ
possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ'
hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct']
hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct']
hoare_drop_imp [where f="threadGet tcbFault t"]
| rule_tac f="getThreadState a" in hoare_drop_imp
| wp (once) hoare_drop_imp[where R="\<lambda>_ _. call"]
hoare_drop_imp[where R="\<lambda>_ _. \<not> call"]
hoare_drop_imp[where R="\<lambda>_ _. cg"]
| simp add: valid_tcb_state'_def case_bool_If
case_option_If
cong: if_cong
split del: if_split
| wp (once) sch_act_sane_lift tcb_in_cur_domain'_lift hoare_vcg_const_imp_lift)+
apply (clarsimp simp: pred_tcb_at' cong: conj_cong imp_cong
split del: if_split)
apply (frule obj_at_valid_objs', clarsimp)
apply (frule(1) sym_refs_ko_atD')
apply (clarsimp simp: projectKOs valid_obj'_def valid_ep'_def
st_tcb_at_refs_of_rev' pred_tcb_at'
conj_comms fun_upd_def[symmetric]
split del: if_split)
apply (frule pred_tcb_at')
apply (drule simple_st_tcb_at_state_refs_ofD' st_tcb_at_state_refs_ofD')+
apply (clarsimp simp: valid_pspace'_splits)
apply (subst fun_upd_idem[where x=t])
apply (clarsimp split: if_split)
apply (rule conjI, clarsimp simp: obj_at'_def projectKOs)
apply (drule bound_tcb_at_state_refs_ofD')
apply (fastforce simp: tcb_bound_refs'_def)
apply (subgoal_tac "ex_nonz_cap_to' a s")
prefer 2
apply (clarsimp elim!: if_live_state_refsE)
apply clarsimp
apply (rule conjI)
apply (drule bound_tcb_at_state_refs_ofD')
apply (fastforce simp: tcb_bound_refs'_def set_eq_subset)
apply (clarsimp simp: conj_ac)
apply (rule conjI, clarsimp simp: idle'_no_refs)
apply (rule conjI, clarsimp simp: global'_no_ex_cap)
apply (rule conjI)
apply (rule impI)
apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp)
apply (clarsimp)
apply (simp add: ep_redux_simps')
apply (rule conjI, clarsimp split: if_split)
apply (rule conjI, fastforce simp: tcb_bound_refs'_def set_eq_subset)
apply (clarsimp, erule delta_sym_refs;
solves\<open>auto simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm\<close>)
apply (solves\<open>clarsimp split: list.splits\<close>)
\<comment> \<open>epa = IdleEP\<close>
apply (cases bl)
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre, wp valid_irq_node_lift)
apply (simp add: valid_ep'_def)
apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues
setThreadState_ct_not_inQ)
apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at')
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (subgoal_tac "ep \<noteq> t")
apply (drule simple_st_tcb_at_state_refs_ofD' ko_at_state_refs_ofD'
bound_tcb_at_state_refs_ofD')+
apply (rule conjI, erule delta_sym_refs)
apply (auto simp: tcb_bound_refs'_def symreftype_inverse'
split: if_split_asm)[2]
apply (fastforce simp: global'_no_ex_cap)
apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs)
apply simp
apply wp
apply simp
\<comment> \<open>epa = SendEP\<close>
apply (cases bl)
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre, wp valid_irq_node_lift)
apply (simp add: valid_ep'_def)
apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act'
sts_valid_queues setThreadState_ct_not_inQ)
apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at')
apply (rule conjI, clarsimp elim!: obj_at'_weakenE)
apply (frule obj_at_valid_objs', clarsimp)
apply (frule(1) sym_refs_ko_atD')
apply (frule pred_tcb_at')
apply (drule simple_st_tcb_at_state_refs_ofD')
apply (drule bound_tcb_at_state_refs_ofD')
apply (clarsimp simp: valid_obj'_def valid_ep'_def
projectKOs st_tcb_at_refs_of_rev')
apply (rule conjI, clarsimp)
apply (drule (1) bspec)
apply (clarsimp dest!: st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD'
simp: tcb_bound_refs'_def)
apply (clarsimp simp: set_eq_subset)
apply (rule conjI, erule delta_sym_refs)
subgoal by (fastforce simp: obj_at'_def projectKOs symreftype_inverse'
split: if_split_asm)
apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse'
split: if_split_asm)
apply (fastforce simp: global'_no_ex_cap idle'_not_queued)
apply (simp | wp)+
done
lemma sfi_invs_plus':
"\<lbrace>invs' and st_tcb_at' simple' t
and sch_act_not t
and (\<lambda>s. \<forall>p. t \<notin> set (ksReadyQueues s p))
and ex_nonz_cap_to' t\<rbrace>
sendFaultIPC t f
\<lbrace>\<lambda>rv. invs'\<rbrace>, \<lbrace>\<lambda>rv. invs' and st_tcb_at' simple' t
and (\<lambda>s. \<forall>p. t \<notin> set (ksReadyQueues s p))
and sch_act_not t and (\<lambda>s. ksIdleThread s \<noteq> t)\<rbrace>"
apply (simp add: sendFaultIPC_def)
apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state
threadSet_cap_to'
| wpc | simp)+
apply (rule_tac Q'="\<lambda>rv s. invs' s \<and> sch_act_not t s
\<and> st_tcb_at' simple' t s
\<and> (\<forall>p. t \<notin> set (ksReadyQueues s p))
\<and> ex_nonz_cap_to' t s
\<and> t \<noteq> ksIdleThread s
\<and> (\<forall>r\<in>zobj_refs' rv. ex_nonz_cap_to' r s)"
in hoare_post_imp_R)
apply wp
apply (clarsimp simp: inQ_def pred_tcb_at')
apply (wp | simp)+
apply (clarsimp simp: eq_commute)
apply (subst(asm) global'_no_ex_cap, auto)
done
lemma handleFault_corres:
"fr f f' \<Longrightarrow>
corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread
and (%_. valid_fault f))
(invs' and sch_act_not thread
and (\<lambda>s. \<forall>p. thread \<notin> set(ksReadyQueues s p))
and st_tcb_at' simple' thread and ex_nonz_cap_to' thread)
(handle_fault thread f) (handleFault thread f')"
apply (simp add: handle_fault_def handleFault_def)
apply (rule corres_guard_imp)
apply (subst return_bind [symmetric],
rule corres_split[where P="tcb_at thread",
OF gets_the_noop_corres [where x="()"]])
apply (simp add: tcb_at_def)
apply (rule corres_split_catch)
apply (rule_tac F="valid_fault f" in corres_gen_asm)
apply (rule sendFaultIPC_corres, assumption)
apply simp
apply (rule handleDoubleFault_corres)
apply wp+
apply (rule hoare_post_impErr, rule sfi_invs_plus', simp_all)[1]
apply clarsimp
apply wp+
apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def
valid_state_def valid_idle_def)
apply auto
done
lemma sts_invs_minor'':
"\<lbrace>st_tcb_at' (\<lambda>st'. tcb_st_refs_of' st' = tcb_st_refs_of' st
\<and> (st \<noteq> Inactive \<and> \<not> idle' st \<longrightarrow>
st' \<noteq> Inactive \<and> \<not> idle' st')) t
and (\<lambda>s. t = ksIdleThread s \<longrightarrow> idle' st)
and (\<lambda>s. (\<exists>p. t \<in> set (ksReadyQueues s p)) \<longrightarrow> runnable' st)
and (\<lambda>s. runnable' st \<and> obj_at' tcbQueued t s
\<longrightarrow> st_tcb_at' runnable' t s)
and (\<lambda>s. \<not> runnable' st \<longrightarrow> sch_act_not t s)
and invs'\<rbrace>
setThreadState st t
\<lbrace>\<lambda>rv. invs'\<rbrace>"
apply (simp add: invs'_def valid_state'_def)
apply (rule hoare_pre)
apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues
setThreadState_ct_not_inQ)
apply clarsimp
apply (rule conjI)
apply fastforce
apply (rule conjI)
apply (clarsimp simp: pred_tcb_at'_def)
apply (drule obj_at_valid_objs')
apply (clarsimp simp: valid_pspace'_def)
apply (clarsimp simp: valid_obj'_def valid_tcb'_def projectKOs)
subgoal by (cases st, auto simp: valid_tcb_state'_def
split: Structures_H.thread_state.splits)[1]
apply (rule conjI)
apply (clarsimp dest!: st_tcb_at_state_refs_ofD'
elim!: rsubst[where P=sym_refs]
intro!: ext)
apply (clarsimp elim!: st_tcb_ex_cap'')
done
lemma hf_invs' [wp]:
"\<lbrace>invs' and sch_act_not t
and (\<lambda>s. \<forall>p. t \<notin> set(ksReadyQueues s p))
and st_tcb_at' simple' t
and ex_nonz_cap_to' t and (\<lambda>s. t \<noteq> ksIdleThread s)\<rbrace>
handleFault t f \<lbrace>\<lambda>r. invs'\<rbrace>"
apply (simp add: handleFault_def)
apply wp
apply (simp add: handleDoubleFault_def)
apply (wp sts_invs_minor'' dmo_invs')+
apply (rule hoare_post_impErr, rule sfi_invs_plus',
simp_all)
apply (strengthen no_refs_simple_strg')
apply clarsimp
done
declare zipWithM_x_mapM [simp del]
lemma gts_st_tcb':
"\<lbrace>\<top>\<rbrace> getThreadState t \<lbrace>\<lambda>r. st_tcb_at' (\<lambda>st. st = r) t\<rbrace>"
apply (rule hoare_strengthen_post)
apply (rule gts_sp')
apply simp
done
declare setEndpoint_ct' [wp]
lemma setupCallerCap_pred_tcb_unchanged:
"\<lbrace>pred_tcb_at' proj P t and K (t \<noteq> t')\<rbrace>
setupCallerCap t' t'' g
\<lbrace>\<lambda>rv. pred_tcb_at' proj P t\<rbrace>"
apply (simp add: setupCallerCap_def getThreadCallerSlot_def
getThreadReplySlot_def)
apply (wp sts_pred_tcb_neq' hoare_drop_imps)
apply clarsimp
done
lemma si_blk_makes_simple':
"\<lbrace>st_tcb_at' simple' t and K (t \<noteq> t')\<rbrace>
sendIPC True call bdg x x' t' ep
\<lbrace>\<lambda>rv. st_tcb_at' simple' t\<rbrace>"
apply (simp add: sendIPC_def)
apply (rule hoare_seq_ext [OF _ get_ep_inv'])
apply (case_tac xa, simp_all)
apply (rename_tac list)
apply (case_tac list, simp_all add: case_bool_If case_option_If
split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged
hoare_drop_imps)
apply (clarsimp simp: pred_tcb_at' del: disjCI)
apply (wp sts_st_tcb_at'_cases)
apply clarsimp
apply (wp sts_st_tcb_at'_cases)
apply clarsimp
done
lemma si_blk_makes_runnable':
"\<lbrace>st_tcb_at' runnable' t and K (t \<noteq> t')\<rbrace>
sendIPC True call bdg x x' t' ep
\<lbrace>\<lambda>rv. st_tcb_at' runnable' t\<rbrace>"
apply (simp add: sendIPC_def)
apply (rule hoare_seq_ext [OF _ get_ep_inv'])
apply (case_tac xa, simp_all)
apply (rename_tac list)
apply (case_tac list, simp_all add: case_bool_If case_option_If
split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged
hoare_vcg_const_imp_lift hoare_drop_imps
| simp)+
apply (clarsimp del: disjCI simp: pred_tcb_at' elim!: pred_tcb'_weakenE)
apply (wp sts_st_tcb_at'_cases)
apply clarsimp
apply (wp sts_st_tcb_at'_cases)
apply clarsimp
done
crunches possibleSwitchTo, completeSignal
for pred_tcb_at'[wp]: "pred_tcb_at' proj P t"
lemma sendSignal_st_tcb'_Running:
"\<lbrace>st_tcb_at' (\<lambda>st. st = Running \<or> P st) t\<rbrace>
sendSignal ntfnptr bdg
\<lbrace>\<lambda>_. st_tcb_at' (\<lambda>st. st = Running \<or> P st) t\<rbrace>"
apply (simp add: sendSignal_def)
apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp
| wpc | clarsimp simp: pred_tcb_at')+
done
end
end
|
Require Import Blech.Defaults.
Require Import Blech.Bishop.
Require Import Blech.Category.
Require Import Blech.Category.Funct.
Require Import Blech.Category.Bsh.
Definition CoPSh (C: Category): Category := Funct C Bsh.
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.category.PartOrd
import order.hom.lattice
/-!
# The category of lattices
This defines `Lat`, the category of lattices.
Note that `Lat` doesn't correspond to the literature definition of [`Lat`]
(https://ncatlab.org/nlab/show/Lat) as we don't require bottom or top elements. Instead, `Lat`
corresponds to `BddLat`.
## TODO
The free functor from `Lat` to `BddLat` is `X → with_top (with_bot X)`.
-/
universes u
open category_theory
/-- The category of lattices. -/
def Lat := bundled lattice
namespace Lat
instance : has_coe_to_sort Lat Type* := bundled.has_coe_to_sort
instance (X : Lat) : lattice X := X.str
/-- Construct a bundled `Lat` from a `lattice`. -/
def of (α : Type*) [lattice α] : Lat := bundled.of α
@[simp] lemma coe_of (α : Type*) [lattice α] : ↥(of α) = α := rfl
instance : inhabited Lat := ⟨of bool⟩
instance : bundled_hom @lattice_hom :=
{ to_fun := λ _ _ _ _, coe_fn,
id := @lattice_hom.id,
comp := @lattice_hom.comp,
hom_ext := λ X Y _ _, by exactI fun_like.coe_injective }
instance : large_category.{u} Lat := bundled_hom.category lattice_hom
instance : concrete_category Lat := bundled_hom.concrete_category lattice_hom
instance has_forget_to_PartOrd : has_forget₂ Lat PartOrd :=
{ forget₂ := { obj := λ X, ⟨X⟩, map := λ X Y f, f },
forget_comp := rfl }
/-- Constructs an isomorphism of lattices from an order isomorphism between them. -/
@[simps] def iso.mk {α β : Lat.{u}} (e : α ≃o β) : α ≅ β :=
{ hom := e,
inv := e.symm,
hom_inv_id' := by { ext, exact e.symm_apply_apply _ },
inv_hom_id' := by { ext, exact e.apply_symm_apply _ } }
/-- `order_dual` as a functor. -/
@[simps] def dual : Lat ⥤ Lat := { obj := λ X, of Xᵒᵈ, map := λ X Y, lattice_hom.dual }
/-- The equivalence between `Lat` and itself induced by `order_dual` both ways. -/
@[simps functor inverse] def dual_equiv : Lat ≌ Lat :=
equivalence.mk dual dual
(nat_iso.of_components (λ X, iso.mk $ order_iso.dual_dual X) $ λ X Y f, rfl)
(nat_iso.of_components (λ X, iso.mk $ order_iso.dual_dual X) $ λ X Y f, rfl)
end Lat
lemma Lat_dual_comp_forget_to_PartOrd :
Lat.dual ⋙ forget₂ Lat PartOrd =
forget₂ Lat PartOrd ⋙ PartOrd.dual := rfl
|
/* multifit/gsl_multifit.h
*
* Copyright (C) 2000, 2007, 2010 Brian Gough
* Copyright (C) 2013, Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_MULTIFIT_H__
#define __GSL_MULTIFIT_H__
#include <stdlib.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_types.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
typedef struct
{
size_t nmax; /* maximum number of observations */
size_t pmax; /* maximum number of parameters */
size_t n; /* number of observations in current SVD decomposition */
size_t p; /* number of parameters in current SVD decomposition */
gsl_matrix * A; /* least squares matrix for SVD, n-by-p */
gsl_matrix * Q;
gsl_matrix * QSI;
gsl_vector * S;
gsl_vector * t;
gsl_vector * xt;
gsl_vector * D;
double rcond; /* reciprocal condition number */
}
gsl_multifit_linear_workspace;
gsl_multifit_linear_workspace *
gsl_multifit_linear_alloc (const size_t n, const size_t p);
void
gsl_multifit_linear_free (gsl_multifit_linear_workspace * w);
int
gsl_multifit_linear (const gsl_matrix * X,
const gsl_vector * y,
gsl_vector * c,
gsl_matrix * cov,
double * chisq,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_tsvd (const gsl_matrix * X,
const gsl_vector * y,
const double tol,
gsl_vector * c,
gsl_matrix * cov,
double * chisq,
size_t * rank,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_svd (const gsl_matrix * X,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_bsvd (const gsl_matrix * X,
gsl_multifit_linear_workspace * work);
size_t
gsl_multifit_linear_rank(const double tol, const gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_solve (const double lambda,
const gsl_matrix * X,
const gsl_vector * y,
gsl_vector * c,
double *rnorm,
double *snorm,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_applyW(const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
gsl_matrix * WX,
gsl_vector * Wy);
int
gsl_multifit_linear_stdform1 (const gsl_vector * L,
const gsl_matrix * X,
const gsl_vector * y,
gsl_matrix * Xs,
gsl_vector * ys,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_wstdform1 (const gsl_vector * L,
const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
gsl_matrix * Xs,
gsl_vector * ys,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_L_decomp (gsl_matrix * L, gsl_vector * tau);
int
gsl_multifit_linear_stdform2 (const gsl_matrix * LQR,
const gsl_vector * Ltau,
const gsl_matrix * X,
const gsl_vector * y,
gsl_matrix * Xs,
gsl_vector * ys,
gsl_matrix * M,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_wstdform2 (const gsl_matrix * LQR,
const gsl_vector * Ltau,
const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
gsl_matrix * Xs,
gsl_vector * ys,
gsl_matrix * M,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_genform1 (const gsl_vector * L,
const gsl_vector * cs,
gsl_vector * c,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_genform2 (const gsl_matrix * LQR,
const gsl_vector * Ltau,
const gsl_matrix * X,
const gsl_vector * y,
const gsl_vector * cs,
const gsl_matrix * M,
gsl_vector * c,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_wgenform2 (const gsl_matrix * LQR,
const gsl_vector * Ltau,
const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
const gsl_vector * cs,
const gsl_matrix * M,
gsl_vector * c,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_lreg (const double smin, const double smax,
gsl_vector * reg_param);
int
gsl_multifit_linear_lcurve (const gsl_vector * y,
gsl_vector * reg_param,
gsl_vector * rho, gsl_vector * eta,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_lcorner(const gsl_vector *rho,
const gsl_vector *eta,
size_t *idx);
int
gsl_multifit_linear_lcorner2(const gsl_vector *reg_param,
const gsl_vector *eta,
size_t *idx);
int
gsl_multifit_linear_Lk(const size_t p, const size_t k, gsl_matrix *L);
int
gsl_multifit_linear_Lsobolev(const size_t p, const size_t kmax,
const gsl_vector *alpha, gsl_matrix *L,
gsl_multifit_linear_workspace *work);
int
gsl_multifit_wlinear (const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
gsl_vector * c,
gsl_matrix * cov,
double * chisq,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_wlinear_tsvd (const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
const double tol,
gsl_vector * c,
gsl_matrix * cov,
double * chisq,
size_t * rank,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_wlinear_svd (const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
double tol,
size_t * rank,
gsl_vector * c,
gsl_matrix * cov,
double *chisq,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_wlinear_usvd (const gsl_matrix * X,
const gsl_vector * w,
const gsl_vector * y,
double tol,
size_t * rank,
gsl_vector * c,
gsl_matrix * cov,
double *chisq,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_est (const gsl_vector * x,
const gsl_vector * c,
const gsl_matrix * cov, double *y, double *y_err);
double
gsl_multifit_linear_rcond (const gsl_multifit_linear_workspace * w);
int
gsl_multifit_linear_residuals (const gsl_matrix *X, const gsl_vector *y,
const gsl_vector *c, gsl_vector *r);
/* gcv.c */
int
gsl_multifit_linear_gcv_init(const gsl_vector * y,
gsl_vector * reg_param,
gsl_vector * UTy,
double * delta0,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_gcv_curve(const gsl_vector * reg_param,
const gsl_vector * UTy,
const double delta0,
gsl_vector * G,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_gcv_min(const gsl_vector * reg_param,
const gsl_vector * UTy,
const gsl_vector * G,
const double delta0,
double * lambda,
gsl_multifit_linear_workspace * work);
double
gsl_multifit_linear_gcv_calc(const double lambda,
const gsl_vector * UTy,
const double delta0,
gsl_multifit_linear_workspace * work);
int
gsl_multifit_linear_gcv(const gsl_vector * y,
gsl_vector * reg_param,
gsl_vector * G,
double * lambda,
double * G_lambda,
gsl_multifit_linear_workspace * work);
typedef struct
{
const char * name; /* method name */
int (*wfun)(const gsl_vector *r, gsl_vector *w);
int (*psi_deriv)(const gsl_vector *r, gsl_vector *dpsi);
double tuning_default; /* default tuning constant */
} gsl_multifit_robust_type;
typedef struct
{
double sigma_ols; /* OLS estimate of sigma */
double sigma_mad; /* MAD estimate of sigma */
double sigma_rob; /* robust estimate of sigma */
double sigma; /* final estimate of sigma */
double Rsq; /* R^2 coefficient of determination */
double adj_Rsq; /* degree of freedom adjusted R^2 */
double rmse; /* root mean squared error */
double sse; /* residual sum of squares */
size_t dof; /* degrees of freedom */
size_t numit; /* number of iterations */
gsl_vector *weights; /* final weights */
gsl_vector *r; /* final residuals y - X c */
} gsl_multifit_robust_stats;
typedef struct
{
size_t n; /* number of observations */
size_t p; /* number of parameters */
size_t numit; /* number of iterations */
size_t maxiter; /* maximum iterations */
const gsl_multifit_robust_type *type;
double tune; /* tuning parameter */
gsl_vector *r; /* residuals at current iteration */
gsl_vector *weights; /* weights at current iteration */
gsl_vector *c_prev; /* coefficients from previous iteration */
gsl_vector *resfac; /* multiplicative factors for residuals */
gsl_vector *psi; /* psi(r) */
gsl_vector *dpsi; /* psi'(r) */
gsl_matrix *QSI; /* Q S^{-1} of original matrix X */
gsl_vector *D; /* balancing parameters of original matrix X */
gsl_vector *workn; /* workspace of length n */
gsl_multifit_robust_stats stats; /* various statistics */
gsl_multifit_linear_workspace *multifit_p;
} gsl_multifit_robust_workspace;
/* available types */
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_default;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_bisquare;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_cauchy;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_fair;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_huber;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_ols;
GSL_VAR const gsl_multifit_robust_type * gsl_multifit_robust_welsch;
gsl_multifit_robust_workspace *gsl_multifit_robust_alloc(const gsl_multifit_robust_type *T,
const size_t n, const size_t p);
void gsl_multifit_robust_free(gsl_multifit_robust_workspace *w);
int gsl_multifit_robust_tune(const double tune,
gsl_multifit_robust_workspace *w);
int gsl_multifit_robust_maxiter(const size_t maxiter,
gsl_multifit_robust_workspace *w);
const char *gsl_multifit_robust_name(const gsl_multifit_robust_workspace *w);
gsl_multifit_robust_stats gsl_multifit_robust_statistics(const gsl_multifit_robust_workspace *w);
int gsl_multifit_robust_weights(const gsl_vector *r, gsl_vector *wts,
gsl_multifit_robust_workspace *w);
int gsl_multifit_robust(const gsl_matrix * X, const gsl_vector * y,
gsl_vector * c, gsl_matrix *cov,
gsl_multifit_robust_workspace *w);
int gsl_multifit_robust_est(const gsl_vector * x, const gsl_vector * c,
const gsl_matrix * cov, double *y, double *y_err);
int gsl_multifit_robust_residuals(const gsl_matrix * X,
const gsl_vector * y,
const gsl_vector * c, gsl_vector * r,
gsl_multifit_robust_workspace * w);
__END_DECLS
#endif /* __GSL_MULTIFIT_H__ */
|
If $X$ is empty, then $f$ and $g$ are homotopic if and only if $f$ and $g$ are both continuous. |
// Copyright 2016 lyobzik
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Test.h"
#include <sstream>
#include <fstream>
#include <rapidjson/rapidjson.h>
#include <rapidjson/stringbuffer.h>
#include <rapidjson/writer.h>
#include <boost/algorithm/string/predicate.hpp>
#include "../lib/RapidJsonDefs.h"
namespace TestsCommon {
namespace fs = boost::filesystem;
namespace {
std::string ToString(JsonSchemaValidator::JsonValue const &value) {
using namespace rapidjson;
GenericStringBuffer<UTF8<> > buffer;
Writer<GenericStringBuffer<UTF8<> > > writer(buffer);
value.Accept(writer);
std::string string = buffer.GetString();
return buffer.GetString();
}
std::vector<fs::path> GetTestFiles()
{
fs::directory_iterator it("../thirdparty/json_schema_test_suite/tests/draft3"), endIt;
std::vector<fs::path> test_files;
std::copy_if(it, endIt, std::back_inserter(test_files), [](fs::directory_entry const &entry) -> bool {
return fs::is_regular_file(entry) &&
boost::algorithm::ends_with(entry.path().string(), ".json") &&
entry.path().filename().string() != "ref.json";
});
return test_files;
}
void LoadTestCases(fs::path const &path_to_file, JsonSchemaValidator::JsonDocument &test_case) {
std::ifstream test_file(path_to_file.string());
std::istreambuf_iterator<char> it(test_file), endIt;
std::string test_file_content(it, endIt);
test_case.Parse<0>(test_file_content.c_str());
}
std::string GetStringTestData(JsonSchemaValidator::JsonValue const &test_data) {
std::stringstream ss;
ss << std::boolalpha << std::fixed << "{\"data\": ";
if (test_data.IsObject() || test_data.IsArray()) ss << ToString(test_data);
else if (test_data.IsString()) ss << "\"" << test_data.GetString() << "\"";
else if (test_data.IsDouble()) ss << test_data.GetDouble();
else if (test_data.IsNumber()) ss << test_data.GetInt64();
else if (test_data.IsBool()) ss << test_data.GetBool();
else if (test_data.IsNull()) ss << "null";
ss << "}";
return ss.str();
}
} // namespace
///////////////////////////////////////////////////////////////////////////////////////////////////
Test::Test(std::string const &name, std::string const &schema,
std::string const &inspected_document, bool expect_result)
: name_(name)
, schema_(schema)
, inspected_document_(inspected_document)
, expect_result_(expect_result) {
}
std::string Test::GetName() const {
return name_;
}
std::string Test::GetSchema() const {
return schema_;
}
std::string Test::GetInspectedDocument() const {
return inspected_document_;
}
bool Test::GetExpectResult() const {
return expect_result_;
}
Tests Test::GetTests() {
Tests result;
using namespace JsonSchemaValidator;
for (auto const &file : GetTestFiles()) {
JsonDocument test_cases;
LoadTestCases(file, test_cases);
assert(!test_cases.HasParseError());
assert(test_cases.IsArray());
for (JsonSizeType case_index = 0; case_index < test_cases.Size(); ++case_index) {
JsonValue const &test_case(test_cases[case_index]);
assert(test_case.HasMember("description"));
assert(test_case.HasMember("schema"));
assert(test_case.HasMember("tests"));
assert(test_case["description"].IsString());
assert(test_case["tests"].IsArray());
std::string schema = ToString(test_case["schema"]);
std::string description = test_case["description"].GetString();
JsonValue const &tests(test_case["tests"]);
for (JsonSizeType test_index = 0; test_index < tests.Size(); ++test_index) {
JsonValue const &test(tests[test_index]);
assert(test.HasMember("description"));
assert(test.HasMember("data"));
assert(test.HasMember("valid"));
assert(test["description"].IsString());
assert(test["valid"].IsBool());
std::string test_name = file.filename().string() + "/" + description +
"/" + test["description"].GetString();
result.emplace_back(test_name, schema, GetStringTestData(test["data"]),
test["valid"].GetBool());
}
}
}
return result;
}
} // namespace TestsCommon
|
(* Title: HOL/Auth/n_german_lemma_on_inv__42.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__42 imports n_german_base
begin
section{*All lemmas on causal relation between inv__42 and some rule r*}
lemma n_RecvReqVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReq N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''CurCmd'')) (Const Empty)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvEVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvSVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Field (Para (Ident ''Chan3'') i) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__42:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__42 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__42:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqESVsinv__42:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__42:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__42:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__42 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.