text
stringlengths 0
3.34M
|
---|
Require Import SpecDeps.
Require Import RData.
Require Import EventReplay.
Require Import MoverTypes.
Require Import Constants.
Require Import CommonLib.
Require Import AbsAccessor.Spec.
Local Open Scope Z_scope.
Section SpecLow.
Definition psci_features_spec0 (rec: Pointer) (psci_func_id: Z) (adt: RData) : option RData :=
match rec, psci_func_id with
| (_rec_base, _rec_ofst), _psci_func_id =>
if (_psci_func_id =? 2214592513) then
let _t'1 := 1 in
let _t'2 := 1 in
let _t'3 := 1 in
let _t'4 := 1 in
let _t'5 := 1 in
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'1 := (_psci_func_id =? 3288334337) in
if _t'1 then
let _t'2 := 1 in
let _t'3 := 1 in
let _t'4 := 1 in
let _t'5 := 1 in
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'2 := (_psci_func_id =? 2214592514) in
if _t'2 then
let _t'3 := 1 in
let _t'4 := 1 in
let _t'5 := 1 in
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'3 := (_psci_func_id =? 2214592515) in
if _t'3 then
let _t'4 := 1 in
let _t'5 := 1 in
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'4 := (_psci_func_id =? 3288334339) in
if _t'4 then
let _t'5 := 1 in
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'5 := (_psci_func_id =? 2214592516) in
if _t'5 then
let _t'6 := 1 in
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'6 := (_psci_func_id =? 3288334340) in
if _t'6 then
let _t'7 := 1 in
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'7 := (_psci_func_id =? 2214592520) in
if _t'7 then
let _t'8 := 1 in
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'8 := (_psci_func_id =? 2214592521) in
if _t'8 then
let _t'9 := 1 in
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _t'9 := (_psci_func_id =? 2214592522) in
if _t'9 then
let _ret := 0 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
else
let _ret := 4294967295 in
rely is_int64 _ret;
when adt == set_psci_result_x0_spec (VZ64 _ret) adt;
Some adt
end
.
End SpecLow.
|
import order.lattice data.finset
universes u v w
noncomputable theory
open classical set function lattice
instance {α : Type u} [semilattice_sup α] : is_idempotent α (⊔) := ⟨assume a, sup_idem⟩
namespace finset
section general
variables {α : Type u} {β : Type w} [decidable_eq β] [semilattice_sup_bot α]
def Sup_fin (s : finset β) (f : β → α) : α := s.fold (⊔) ⊥ f
variables {s s₁ s₂ : finset β} {f : β → α}
@[simp] lemma Sup_fin_empty : (∅ : finset β).Sup_fin f = ⊥ :=
fold_empty
@[simp] lemma Sup_fin_insert {b : β} : (insert b s : finset β).Sup_fin f = f b ⊔ s.Sup_fin f :=
fold_insert_idem
@[simp] lemma Sup_fin_singleton {b : β} : (finset.singleton b).Sup_fin f = f b :=
calc _ = f b ⊔ (∅:finset β).Sup_fin f : Sup_fin_insert
... = f b : by simp
lemma Sup_fin_union : (s₁ ∪ s₂).Sup_fin f = s₁.Sup_fin f ⊔ s₂.Sup_fin f :=
finset.induction_on s₁ (by simp) (by simp {contextual := tt}; cc)
lemma Sup_fin_mono_fun {g : β → α} : (∀b∈s, f b ≤ g b) → s.Sup_fin f ≤ s.Sup_fin g :=
finset.induction_on s (by simp) (by simp [-sup_le_iff, sup_le_sup] {contextual := tt})
lemma le_Sup_fin {b : β} (hb : b ∈ s) : f b ≤ s.Sup_fin f :=
calc f b ≤ f b ⊔ s.Sup_fin f : le_sup_left
... = (insert b s).Sup_fin f : by simp
... = s.Sup_fin f : by simp [hb]
lemma Sup_fin_le {a : α} : (∀b ∈ s, f b ≤ a) → s.Sup_fin f ≤ a :=
finset.induction_on s (by simp) (by simp {contextual := tt})
lemma Sup_fin_mono (h : s₁ ⊆ s₂) : s₁.Sup_fin f ≤ s₂.Sup_fin f :=
Sup_fin_le $ assume b hb, le_Sup_fin (finset.subset_iff.mpr h hb)
end general
end finset
instance nat.distrib_lattice : distrib_lattice ℕ :=
by apply_instance
instance nat.semilattice_sup_bot : semilattice_sup_bot ℕ :=
{ bot := 0, bot_le := nat.zero_le , .. nat.distrib_lattice }
lemma nat.sup_eq_max {n m : ℕ} : n ⊔ m = max n m := rfl
lemma forall_eq_zero_of_sup_fin_eq_bot {s : finset ℕ} : finset.Sup_fin s id = 0 → ∀x ∈ s, x = 0 :=
begin
intros h x h2,
have : x ≤ 0,
exact calc x = id x : rfl
... ≤ finset.Sup_fin s id : finset.le_Sup_fin h2
... = 0 : h,
exact nat.eq_zero_of_le_zero this
end
lemma eq_singleton_or_eq_empty_of_sup_fin_eq_bot {s : finset ℕ} : finset.Sup_fin s id = 0 → s = {0} ∨ s = ∅ :=
begin
intro h,
by_cases h1 : (s = ∅),
simp [h1],
have h2 : ∀x ∈ s, x = 0,
from forall_eq_zero_of_sup_fin_eq_bot h,
have hex : ∃x : ℕ, x ∈ s,
from finset.exists_mem_of_ne_empty h1,
have h3: {0} ⊆ s,
have h4 : (some hex) ∈ s,
from some_spec hex,
have h5: (some hex) = 0,
from h2 (some hex) h4,
have h6: 0 ∈ s,
rw [←h5],
exact h4,
exact iff.elim_right singleton_subset_iff h6,
have h7 : s ⊆ {0},
dunfold has_subset.subset,
intros a h4,
have h8 : a = 0,
from h2 a h4,
rw h8,
exact mem_singleton 0,
have h9 : s = {0},
exact finset.subset.antisymm h7 h3,
simp [h9]
end
open finset
lemma Sup_fin_mem_of_id_nat {s : finset ℕ} : s ≠ ∅ → Sup_fin s id ∈ s :=
finset.induction_on s
(by contradiction)
(by intros a s; by_cases s = ∅; cases le_total a (Sup_fin s id);
simp [*, nat.sup_eq_max, max_eq_left, max_eq_right] {contextual := tt})
|
theory ex3_11 imports Main "~~/src/HOL/IMP/AExp" begin
type_synonym reg = nat
datatype instr = LDI int reg | LD vname reg | ADD reg reg
type_synonym reg_st = "reg \<Rightarrow> int"
fun exec1 :: "instr \<Rightarrow> state \<Rightarrow> reg_st \<Rightarrow> reg_st" where
"exec1 (LDI n r) _ f = f(r := n)" |
"exec1 (LD x r) s f = f(r := s x)" |
"exec1 (ADD r1 r2) _ f = f(r1 := f r1 + f r2)"
fun exec :: "instr list \<Rightarrow> state \<Rightarrow> reg_st \<Rightarrow> reg_st" where
"exec [] _ f = f" |
"exec (i#is) s f = exec is s (exec1 i s f)"
lemma exec_append[simp]:
"exec (is1@is2) s f = exec is2 s (exec is1 s f)"
apply(induction is1 arbitrary: s f)
apply (auto)
done
fun comp :: "aexp \<Rightarrow> reg \<Rightarrow> instr list" where
"comp (N n) r = [LDI n r]" |
"comp (V x) r = [LD x r]" |
"comp (Plus e\<^sub>1 e\<^sub>2) r = comp e\<^sub>1 r @ comp e\<^sub>2 (r+1) @ [ADD r (r+1)]"
theorem exec_comp: "exec (comp a r) s rs r = aval a s"
apply(induction a arbitrary: s rs r)
apply (auto)
done
end |
great(c1,v1).
great(jj1,aa1).
great(jj1,n1).
great(cc1,n1).
great(u1,ll1).
great(bb1,t1).
great(f1,l1).
great(jj1,o1).
great(d1,y1).
great(j1,ee1).
great(bb1,p1).
great(c1,d1).
great(hh1,jj1).
great(i1,t1).
great(hh1,h1).
great(e1,t1).
great(k1,s1).
great(g1,bb1).
great(f1,hh1).
great(j1,t1).
great(c1,dd1).
great(f1,ll1).
great(v1,x1).
great(d1,o1).
great(d1,aa1).
great(e1,n1).
|
lemma interior_mono: "S \<subseteq> T \<Longrightarrow> interior S \<subseteq> interior T" |
Formal statement is: lemma measurable_mono: assumes N: "sets N' \<le> sets N" "space N = space N'" assumes M: "sets M \<le> sets M'" "space M = space M'" shows "measurable M N \<subseteq> measurable M' N'" Informal statement is: If $N'$ is a sub-$\sigma$-algebra of $N$ and $M'$ is a super-$\sigma$-algebra of $M$, then any $N$-measurable $M$-measurable set is $N'$-measurable $M'$-measurable. |
-- | The core type system.
module Ethambda.System where
open import Ethambda.Common using () -- ((<.>), (<.))
data Tp : Type -> Type where
Var : a -> Tp a
Fun : Tp a -> Tp a -> Tp a
Show a => Show (Tp a) where
show t = case t of
-- Var a => ?foo
Var a => show a
Fun a0 b0 => mbrackets a0 <+> "→" <+> show b0
-- where
-- brackets : String -> String
-- brackets s = "(" <+> s <+> ")"
-- mbrackets : Show a => Tp a -> String
-- mbrackets a = case a of
-- Var _ => neutral
-- Fun _ _ => brackets (show a)
|
-- -------------------------------------------------------------- [ Viewer.idr ]
-- Module : Viewer.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| A Pattern Viewer
module Sif.REPL
import System
import Sif.Types
import Sif.AbsSyntax
import Sif.Pattern
import Sif.Effs
import Sif.Error
import Sif.Library
import Sif.DSL.Parser.Problem
import Sif.DSL.Parser.Solution
import Sif.DSL.Parser
import Sif.DSL
import Sif.Options
import Sif.API
import Sif.Commands
%default partial
-- -------------------------------------------------------------------- [ Effs ]
sifBanner : String
sifBanner = """
_____ _ ____ __
/ ___/(_) __/ / / ____ _____ ____ _
\__ \/ / /_ / / / __ `/ __ \/ __ `/
___/ / / __/ / /___/ /_/ / / / / /_/ /
/____/_/_/ /_____/\__,_/_/ /_/\__, /
/____/
http://www.github.com/jfdm/sif-lang
Type :? for help
Sif is free software with ABSOLUTELY NO WARRANTY.
"""
||| Fetch and parse commands
fetchCMD : Eff SifCMD SifEffs
fetchCMD = do
putStr "sif-viewer> "
rawCmd <- getStr
case parseCMD rawCmd of
Left err => do
printLn NoSuchCommand
fetchCMD
Right cmd => do
case getCmdIndex cmd of
Nothing => pure cmd
Just n => do
lib <- getLibrary
if n < (length $ patts lib)
then pure cmd
else do
printLn IndexOutOfBounds
fetchCMD
doCommand : SifCMD -> Eff () SifEffs
doCommand Quit = pure ()
doCommand Help = putStrLn showHelp
doCommand (ShowPattern n fmt fname) = do
case fname of
Nothing => do
putStrLn "Show Pattern"
getAndPrintPattern n fmt
Just fn => do
putStrLn $ "Saving Pattern to " ++ fn ++ " as " ++ show fmt
printLn FeatureNotImpl
doCommand (ListLib) = do
putStrLn "Listing Library"
listLibrary
doCommand (PreludeLoad x) = do
case x of
Nothing => loadPrelude
dirname => do
updateOptions (\o => record {prelude = dirname} o)
loadPrelude
doCommand (EvalPattern n) = do
putStrLn "Eval Pattern"
getAndEvalPattern n
doCommand (CheckExtPattern p s) = do
putStrLn "Importing..."
evalPatternFromFile (Just p) (Just s)
doCommand _ = printLn NoSuchCommand
runREPL : Eff () SifEffs
runREPL = do
cmd <- fetchCMD
case cmd of
Quit => pure ()
x => do
doCommand x
runREPL
||| A Viewer to view a library of patterns.
export
sifREPL : Eff () SifEffs
sifREPL =
case banner !(getOptions) of
True => do
putStrLn sifBanner
runREPL
False => runREPL
-- --------------------------------------------------------------------- [ EOF ]
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the GNU General Public License as published by *)
(* the Free Software Foundation, either version 2 of the License, or *)
(* (at your option) any later version. This file is also distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** A deterministic evaluation strategy for C. *)
Require Import Axioms.
Require Import Coqlib.
Require Import Errors.
Require Import Maps.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import AST.
Require Import Memory.
Require Import Events.
Require Import Globalenvs.
Require Import Smallstep.
Require Import Ctypes.
Require Import Cop.
Require Import Csyntax.
Require Import Csem.
Section STRATEGY.
Variable ge: genv.
(** * Definition of the strategy *)
(** We now formalize a particular strategy for reducing expressions which
is the one implemented by the CompCert compiler. It evaluates effectful
subexpressions first, in leftmost-innermost order, then finishes
with the evaluation of the remaining simple expression. *)
(** Simple expressions are defined as follows. *)
Fixpoint simple (a: expr) : bool :=
match a with
| Eloc _ _ _ => true
| Evar _ _ => true
| Ederef r _ => simple r
| Efield r _ _ => simple r
| Eval _ _ => true
| Evalof l _ => simple l && negb(type_is_volatile (typeof l))
| Eaddrof l _ => simple l
| Eunop _ r1 _ => simple r1
| Ebinop _ r1 r2 _ => simple r1 && simple r2
| Ecast r1 _ => simple r1
| Eseqand _ _ _ => false
| Eseqor _ _ _ => false
| Econdition _ _ _ _ => false
| Esizeof _ _ => true
| Ealignof _ _ => true
| Eassign _ _ _ => false
| Eassignop _ _ _ _ _ => false
| Epostincr _ _ _ => false
| Ecomma _ _ _ => false
| Ecall _ _ _ => false
| Ebuiltin _ _ _ _ => false
| Eparen _ _ => false
end.
Fixpoint simplelist (rl: exprlist) : bool :=
match rl with Enil => true | Econs r rl' => simple r && simplelist rl' end.
(** Simple expressions have interesting properties: their evaluations always
terminate, are deterministic, and preserve the memory state.
We seize this opportunity to define a big-step semantics for simple
expressions. *)
Section SIMPLE_EXPRS.
Variable e: env.
Variable m: mem.
Inductive eval_simple_lvalue: expr -> block -> int -> Prop :=
| esl_loc: forall b ofs ty,
eval_simple_lvalue (Eloc b ofs ty) b ofs
| esl_var_local: forall x ty b,
e!x = Some(b, ty) ->
eval_simple_lvalue (Evar x ty) b Int.zero
| esl_var_global: forall x ty b,
e!x = None ->
Genv.find_symbol ge x = Some b ->
type_of_global ge b = Some ty ->
eval_simple_lvalue (Evar x ty) b Int.zero
| esl_deref: forall r ty b ofs,
eval_simple_rvalue r (Vptr b ofs) ->
eval_simple_lvalue (Ederef r ty) b ofs
| esl_field_struct: forall r f ty b ofs id fList a delta,
eval_simple_rvalue r (Vptr b ofs) ->
typeof r = Tstruct id fList a -> field_offset f fList = OK delta ->
eval_simple_lvalue (Efield r f ty) b (Int.add ofs (Int.repr delta))
| esl_field_union: forall r f ty b ofs id fList a,
eval_simple_rvalue r (Vptr b ofs) ->
typeof r = Tunion id fList a ->
eval_simple_lvalue (Efield r f ty) b ofs
with eval_simple_rvalue: expr -> val -> Prop :=
| esr_val: forall v ty,
eval_simple_rvalue (Eval v ty) v
| esr_rvalof: forall b ofs l ty v,
eval_simple_lvalue l b ofs ->
ty = typeof l -> type_is_volatile ty = false ->
deref_loc ge ty m b ofs E0 v ->
eval_simple_rvalue (Evalof l ty) v
| esr_addrof: forall b ofs l ty,
eval_simple_lvalue l b ofs ->
eval_simple_rvalue (Eaddrof l ty) (Vptr b ofs)
| esr_unop: forall op r1 ty v1 v,
eval_simple_rvalue r1 v1 ->
sem_unary_operation op v1 (typeof r1) = Some v ->
eval_simple_rvalue (Eunop op r1 ty) v
| esr_binop: forall op r1 r2 ty v1 v2 v,
eval_simple_rvalue r1 v1 -> eval_simple_rvalue r2 v2 ->
sem_binary_operation op v1 (typeof r1) v2 (typeof r2) m = Some v ->
eval_simple_rvalue (Ebinop op r1 r2 ty) v
| esr_cast: forall ty r1 v1 v,
eval_simple_rvalue r1 v1 ->
sem_cast v1 (typeof r1) ty = Some v ->
eval_simple_rvalue (Ecast r1 ty) v
| esr_sizeof: forall ty1 ty,
eval_simple_rvalue (Esizeof ty1 ty) (Vint (Int.repr (sizeof ty1)))
| esr_alignof: forall ty1 ty,
eval_simple_rvalue (Ealignof ty1 ty) (Vint (Int.repr (alignof ty1))).
Inductive eval_simple_list: exprlist -> typelist -> list val -> Prop :=
| esrl_nil:
eval_simple_list Enil Tnil nil
| esrl_cons: forall r rl ty tyl v vl v',
eval_simple_rvalue r v' -> sem_cast v' (typeof r) ty = Some v ->
eval_simple_list rl tyl vl ->
eval_simple_list (Econs r rl) (Tcons ty tyl) (v :: vl).
Scheme eval_simple_rvalue_ind2 := Minimality for eval_simple_rvalue Sort Prop
with eval_simple_lvalue_ind2 := Minimality for eval_simple_lvalue Sort Prop.
Combined Scheme eval_simple_rvalue_lvalue_ind from eval_simple_rvalue_ind2, eval_simple_lvalue_ind2.
End SIMPLE_EXPRS.
(** Left reduction contexts. These contexts allow reducing to the right
of a binary operator only if the left subexpression is simple. *)
Inductive leftcontext: kind -> kind -> (expr -> expr) -> Prop :=
| lctx_top: forall k,
leftcontext k k (fun x => x)
| lctx_deref: forall k C ty,
leftcontext k RV C -> leftcontext k LV (fun x => Ederef (C x) ty)
| lctx_field: forall k C f ty,
leftcontext k RV C -> leftcontext k LV (fun x => Efield (C x) f ty)
| lctx_rvalof: forall k C ty,
leftcontext k LV C -> leftcontext k RV (fun x => Evalof (C x) ty)
| lctx_addrof: forall k C ty,
leftcontext k LV C -> leftcontext k RV (fun x => Eaddrof (C x) ty)
| lctx_unop: forall k C op ty,
leftcontext k RV C -> leftcontext k RV (fun x => Eunop op (C x) ty)
| lctx_binop_left: forall k C op e2 ty,
leftcontext k RV C -> leftcontext k RV (fun x => Ebinop op (C x) e2 ty)
| lctx_binop_right: forall k C op e1 ty,
simple e1 = true -> leftcontext k RV C ->
leftcontext k RV (fun x => Ebinop op e1 (C x) ty)
| lctx_cast: forall k C ty,
leftcontext k RV C -> leftcontext k RV (fun x => Ecast (C x) ty)
| lctx_seqand: forall k C r2 ty,
leftcontext k RV C -> leftcontext k RV (fun x => Eseqand (C x) r2 ty)
| lctx_seqor: forall k C r2 ty,
leftcontext k RV C -> leftcontext k RV (fun x => Eseqor (C x) r2 ty)
| lctx_condition: forall k C r2 r3 ty,
leftcontext k RV C -> leftcontext k RV (fun x => Econdition (C x) r2 r3 ty)
| lctx_assign_left: forall k C e2 ty,
leftcontext k LV C -> leftcontext k RV (fun x => Eassign (C x) e2 ty)
| lctx_assign_right: forall k C e1 ty,
simple e1 = true -> leftcontext k RV C ->
leftcontext k RV (fun x => Eassign e1 (C x) ty)
| lctx_assignop_left: forall k C op e2 tyres ty,
leftcontext k LV C -> leftcontext k RV (fun x => Eassignop op (C x) e2 tyres ty)
| lctx_assignop_right: forall k C op e1 tyres ty,
simple e1 = true -> leftcontext k RV C ->
leftcontext k RV (fun x => Eassignop op e1 (C x) tyres ty)
| lctx_postincr: forall k C id ty,
leftcontext k LV C -> leftcontext k RV (fun x => Epostincr id (C x) ty)
| lctx_call_left: forall k C el ty,
leftcontext k RV C -> leftcontext k RV (fun x => Ecall (C x) el ty)
| lctx_call_right: forall k C e1 ty,
simple e1 = true -> leftcontextlist k C ->
leftcontext k RV (fun x => Ecall e1 (C x) ty)
| lctx_builtin: forall k C ef tyargs ty,
leftcontextlist k C ->
leftcontext k RV (fun x => Ebuiltin ef tyargs (C x) ty)
| lctx_comma: forall k C e2 ty,
leftcontext k RV C -> leftcontext k RV (fun x => Ecomma (C x) e2 ty)
| lctx_paren: forall k C ty,
leftcontext k RV C -> leftcontext k RV (fun x => Eparen (C x) ty)
with leftcontextlist: kind -> (expr -> exprlist) -> Prop :=
| lctx_list_head: forall k C el,
leftcontext k RV C -> leftcontextlist k (fun x => Econs (C x) el)
| lctx_list_tail: forall k C e1,
simple e1 = true -> leftcontextlist k C ->
leftcontextlist k (fun x => Econs e1 (C x)).
Lemma leftcontext_context:
forall k1 k2 C, leftcontext k1 k2 C -> context k1 k2 C
with leftcontextlist_contextlist:
forall k C, leftcontextlist k C -> contextlist k C.
Proof.
induction 1; constructor; auto.
induction 1; constructor; auto.
Qed.
Hint Resolve leftcontext_context.
(** Strategy for reducing expressions. We reduce the leftmost innermost
non-simple subexpression, evaluating its arguments (which are necessarily
simple expressions) with the big-step semantics.
If there are none, the whole expression is simple and is evaluated in
one big step. *)
Inductive estep: state -> trace -> state -> Prop :=
| step_expr: forall f r k e m v ty,
eval_simple_rvalue e m r v ->
match r with Eval _ _ => False | _ => True end ->
ty = typeof r ->
estep (ExprState f r k e m)
E0 (ExprState f (Eval v ty) k e m)
| step_rvalof_volatile: forall f C l ty k e m b ofs t v,
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
deref_loc ge ty m b ofs t v ->
ty = typeof l -> type_is_volatile ty = true ->
estep (ExprState f (C (Evalof l ty)) k e m)
t (ExprState f (C (Eval v ty)) k e m)
| step_seqand_true: forall f C r1 r2 ty k e m v,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
bool_val v (typeof r1) = Some true ->
estep (ExprState f (C (Eseqand r1 r2 ty)) k e m)
E0 (ExprState f (C (Eparen (Eparen r2 type_bool) ty)) k e m)
| step_seqand_false: forall f C r1 r2 ty k e m v,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
bool_val v (typeof r1) = Some false ->
estep (ExprState f (C (Eseqand r1 r2 ty)) k e m)
E0 (ExprState f (C (Eval (Vint Int.zero) ty)) k e m)
| step_seqor_true: forall f C r1 r2 ty k e m v,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
bool_val v (typeof r1) = Some true ->
estep (ExprState f (C (Eseqor r1 r2 ty)) k e m)
E0 (ExprState f (C (Eval (Vint Int.one) ty)) k e m)
| step_seqor_false: forall f C r1 r2 ty k e m v,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
bool_val v (typeof r1) = Some false ->
estep (ExprState f (C (Eseqor r1 r2 ty)) k e m)
E0 (ExprState f (C (Eparen (Eparen r2 type_bool) ty)) k e m)
| step_condition: forall f C r1 r2 r3 ty k e m v b,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
bool_val v (typeof r1) = Some b ->
estep (ExprState f (C (Econdition r1 r2 r3 ty)) k e m)
E0 (ExprState f (C (Eparen (if b then r2 else r3) ty)) k e m)
| step_assign: forall f C l r ty k e m b ofs v v' t m',
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
eval_simple_rvalue e m r v ->
sem_cast v (typeof r) (typeof l) = Some v' ->
assign_loc ge (typeof l) m b ofs v' t m' ->
ty = typeof l ->
estep (ExprState f (C (Eassign l r ty)) k e m)
t (ExprState f (C (Eval v' ty)) k e m')
| step_assignop: forall f C op l r tyres ty k e m b ofs v1 v2 v3 v4 t1 t2 m' t,
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
deref_loc ge (typeof l) m b ofs t1 v1 ->
eval_simple_rvalue e m r v2 ->
sem_binary_operation op v1 (typeof l) v2 (typeof r) m = Some v3 ->
sem_cast v3 tyres (typeof l) = Some v4 ->
assign_loc ge (typeof l) m b ofs v4 t2 m' ->
ty = typeof l ->
t = t1 ** t2 ->
estep (ExprState f (C (Eassignop op l r tyres ty)) k e m)
t (ExprState f (C (Eval v4 ty)) k e m')
| step_assignop_stuck: forall f C op l r tyres ty k e m b ofs v1 v2 t,
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
deref_loc ge (typeof l) m b ofs t v1 ->
eval_simple_rvalue e m r v2 ->
match sem_binary_operation op v1 (typeof l) v2 (typeof r) m with
| None => True
| Some v3 =>
match sem_cast v3 tyres (typeof l) with
| None => True
| Some v4 => forall t2 m', ~(assign_loc ge (typeof l) m b ofs v4 t2 m')
end
end ->
ty = typeof l ->
estep (ExprState f (C (Eassignop op l r tyres ty)) k e m)
t Stuckstate
| step_postincr: forall f C id l ty k e m b ofs v1 v2 v3 t1 t2 m' t,
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
deref_loc ge ty m b ofs t1 v1 ->
sem_incrdecr id v1 ty = Some v2 ->
sem_cast v2 (typeconv ty) ty = Some v3 ->
assign_loc ge ty m b ofs v3 t2 m' ->
ty = typeof l ->
t = t1 ** t2 ->
estep (ExprState f (C (Epostincr id l ty)) k e m)
t (ExprState f (C (Eval v1 ty)) k e m')
| step_postincr_stuck: forall f C id l ty k e m b ofs v1 t,
leftcontext RV RV C ->
eval_simple_lvalue e m l b ofs ->
deref_loc ge ty m b ofs t v1 ->
match sem_incrdecr id v1 ty with
| None => True
| Some v2 =>
match sem_cast v2 (typeconv ty) ty with
| None => True
| Some v3 => forall t2 m', ~(assign_loc ge (typeof l) m b ofs v3 t2 m')
end
end ->
ty = typeof l ->
estep (ExprState f (C (Epostincr id l ty)) k e m)
t Stuckstate
| step_comma: forall f C r1 r2 ty k e m v,
leftcontext RV RV C ->
eval_simple_rvalue e m r1 v ->
ty = typeof r2 ->
estep (ExprState f (C (Ecomma r1 r2 ty)) k e m)
E0 (ExprState f (C r2) k e m)
| step_paren: forall f C r ty k e m v1 v,
leftcontext RV RV C ->
eval_simple_rvalue e m r v1 ->
sem_cast v1 (typeof r) ty = Some v ->
estep (ExprState f (C (Eparen r ty)) k e m)
E0 (ExprState f (C (Eval v ty)) k e m)
| step_call: forall f C rf rargs ty k e m targs tres vf vargs fd,
leftcontext RV RV C ->
classify_fun (typeof rf) = fun_case_f targs tres ->
eval_simple_rvalue e m rf vf ->
eval_simple_list e m rargs targs vargs ->
Genv.find_funct ge vf = Some fd ->
type_of_fundef fd = Tfunction targs tres ->
estep (ExprState f (C (Ecall rf rargs ty)) k e m)
E0 (Callstate fd vargs (Kcall f e C ty k) m)
| step_builtin: forall f C ef tyargs rargs ty k e m vargs t vres m',
leftcontext RV RV C ->
eval_simple_list e m rargs tyargs vargs ->
external_call ef ge vargs m t vres m' ->
estep (ExprState f (C (Ebuiltin ef tyargs rargs ty)) k e m)
t (ExprState f (C (Eval vres ty)) k e m').
Definition step (S: state) (t: trace) (S': state) : Prop :=
estep S t S' \/ sstep ge S t S'.
(** Properties of contexts *)
Lemma context_compose:
forall k2 k3 C2, context k2 k3 C2 ->
forall k1 C1, context k1 k2 C1 ->
context k1 k3 (fun x => C2(C1 x))
with contextlist_compose:
forall k2 C2, contextlist k2 C2 ->
forall k1 C1, context k1 k2 C1 ->
contextlist k1 (fun x => C2(C1 x)).
Proof.
induction 1; intros; try (constructor; eauto).
replace (fun x => C1 x) with C1. auto. apply extensionality; auto.
induction 1; intros; constructor; eauto.
Qed.
Hint Constructors context contextlist.
Hint Resolve context_compose contextlist_compose.
(** * Safe executions. *)
(** A state is safe according to the nondeterministic semantics
if it cannot get stuck by doing silent transitions only. *)
Definition safe (s: Csem.state) : Prop :=
forall s', star Csem.step ge s E0 s' ->
(exists r, final_state s' r) \/ (exists t, exists s'', Csem.step ge s' t s'').
Lemma safe_steps:
forall s s',
safe s -> star Csem.step ge s E0 s' -> safe s'.
Proof.
intros; red; intros.
eapply H. eapply star_trans; eauto.
Qed.
Lemma star_safe:
forall s1 s2 t s3,
safe s1 -> star Csem.step ge s1 E0 s2 -> (safe s2 -> star Csem.step ge s2 t s3) ->
star Csem.step ge s1 t s3.
Proof.
intros. eapply star_trans; eauto. apply H1. eapply safe_steps; eauto. auto.
Qed.
Lemma plus_safe:
forall s1 s2 t s3,
safe s1 -> star Csem.step ge s1 E0 s2 -> (safe s2 -> plus Csem.step ge s2 t s3) ->
plus Csem.step ge s1 t s3.
Proof.
intros. eapply star_plus_trans; eauto. apply H1. eapply safe_steps; eauto. auto.
Qed.
Require Import Classical.
Lemma safe_imm_safe:
forall f C a k e m K,
safe (ExprState f (C a) k e m) ->
context K RV C ->
imm_safe ge e K a m.
Proof.
intros. destruct (classic (imm_safe ge e K a m)); auto.
destruct (H Stuckstate).
apply star_one. left. econstructor; eauto.
destruct H2 as [r F]. inv F.
destruct H2 as [t [s' S]]. inv S. inv H2. inv H2.
Qed.
(** Safe expressions are well-formed with respect to l-values and r-values. *)
Definition expr_kind (a: expr) : kind :=
match a with
| Eloc _ _ _ => LV
| Evar _ _ => LV
| Ederef _ _ => LV
| Efield _ _ _ => LV
| _ => RV
end.
Lemma lred_kind:
forall e a m a' m', lred ge e a m a' m' -> expr_kind a = LV.
Proof.
induction 1; auto.
Qed.
Lemma rred_kind:
forall a m t a' m', rred ge a m t a' m' -> expr_kind a = RV.
Proof.
induction 1; auto.
Qed.
Lemma callred_kind:
forall a fd args ty, callred ge a fd args ty -> expr_kind a = RV.
Proof.
induction 1; auto.
Qed.
Lemma context_kind:
forall a from to C, context from to C -> expr_kind a = from -> expr_kind (C a) = to.
Proof.
induction 1; intros; simpl; auto.
Qed.
Lemma imm_safe_kind:
forall e k a m, imm_safe ge e k a m -> expr_kind a = k.
Proof.
induction 1.
auto.
auto.
eapply context_kind; eauto. eapply lred_kind; eauto.
eapply context_kind; eauto. eapply rred_kind; eauto.
eapply context_kind; eauto. eapply callred_kind; eauto.
Qed.
Lemma safe_expr_kind:
forall from C f a k e m,
context from RV C ->
safe (ExprState f (C a) k e m) ->
expr_kind a = from.
Proof.
intros. eapply imm_safe_kind. eapply safe_imm_safe; eauto.
Qed.
(** Painful inversion lemmas on particular states that are safe. *)
Section INVERSION_LEMMAS.
Variable e: env.
Fixpoint exprlist_all_values (rl: exprlist) : Prop :=
match rl with
| Enil => True
| Econs (Eval v ty) rl' => exprlist_all_values rl'
| Econs _ _ => False
end.
Definition invert_expr_prop (a: expr) (m: mem) : Prop :=
match a with
| Eloc b ofs ty => False
| Evar x ty =>
exists b,
e!x = Some(b, ty)
\/ (e!x = None /\ Genv.find_symbol ge x = Some b /\ type_of_global ge b = Some ty)
| Ederef (Eval v ty1) ty =>
exists b, exists ofs, v = Vptr b ofs
| Efield (Eval v ty1) f ty =>
exists b, exists ofs, v = Vptr b ofs /\
match ty1 with
| Tstruct _ fList _ => exists delta, field_offset f fList = Errors.OK delta
| Tunion _ _ _ => True
| _ => False
end
| Eval v ty => False
| Evalof (Eloc b ofs ty') ty =>
ty' = ty /\ exists t, exists v, deref_loc ge ty m b ofs t v
| Eunop op (Eval v1 ty1) ty =>
exists v, sem_unary_operation op v1 ty1 = Some v
| Ebinop op (Eval v1 ty1) (Eval v2 ty2) ty =>
exists v, sem_binary_operation op v1 ty1 v2 ty2 m = Some v
| Ecast (Eval v1 ty1) ty =>
exists v, sem_cast v1 ty1 ty = Some v
| Eseqand (Eval v1 ty1) r2 ty =>
exists b, bool_val v1 ty1 = Some b
| Eseqor (Eval v1 ty1) r2 ty =>
exists b, bool_val v1 ty1 = Some b
| Econdition (Eval v1 ty1) r1 r2 ty =>
exists b, bool_val v1 ty1 = Some b
| Eassign (Eloc b ofs ty1) (Eval v2 ty2) ty =>
exists v, exists m', exists t,
ty = ty1 /\ sem_cast v2 ty2 ty1 = Some v /\ assign_loc ge ty1 m b ofs v t m'
| Eassignop op (Eloc b ofs ty1) (Eval v2 ty2) tyres ty =>
exists t, exists v1,
ty = ty1
/\ deref_loc ge ty1 m b ofs t v1
| Epostincr id (Eloc b ofs ty1) ty =>
exists t, exists v1,
ty = ty1
/\ deref_loc ge ty m b ofs t v1
| Ecomma (Eval v ty1) r2 ty =>
typeof r2 = ty
| Eparen (Eval v1 ty1) ty =>
exists v, sem_cast v1 ty1 ty = Some v
| Ecall (Eval vf tyf) rargs ty =>
exprlist_all_values rargs ->
exists tyargs, exists tyres, exists fd, exists vl,
classify_fun tyf = fun_case_f tyargs tyres
/\ Genv.find_funct ge vf = Some fd
/\ cast_arguments rargs tyargs vl
/\ type_of_fundef fd = Tfunction tyargs tyres
| Ebuiltin ef tyargs rargs ty =>
exprlist_all_values rargs ->
exists vargs, exists t, exists vres, exists m',
cast_arguments rargs tyargs vargs
/\ external_call ef ge vargs m t vres m'
| _ => True
end.
Lemma lred_invert:
forall l m l' m', lred ge e l m l' m' -> invert_expr_prop l m.
Proof.
induction 1; red; auto.
exists b; auto.
exists b; auto.
exists b; exists ofs; auto.
exists b; exists ofs; split; auto. exists delta; auto.
exists b; exists ofs; auto.
Qed.
Lemma rred_invert:
forall r m t r' m', rred ge r m t r' m' -> invert_expr_prop r m.
Proof.
induction 1; red; auto.
split; auto; exists t; exists v; auto.
exists v; auto.
exists v; auto.
exists v; auto.
exists true; auto. exists false; auto.
exists true; auto. exists false; auto.
exists b; auto.
exists v; exists m'; exists t; auto.
exists t; exists v1; auto.
exists t; exists v1; auto.
exists v; auto.
intros. exists vargs; exists t; exists vres; exists m'; auto.
Qed.
Lemma callred_invert:
forall r fd args ty m,
callred ge r fd args ty ->
invert_expr_prop r m.
Proof.
intros. inv H. simpl.
intros. exists tyargs; exists tyres; exists fd; exists args; auto.
Qed.
Scheme context_ind2 := Minimality for context Sort Prop
with contextlist_ind2 := Minimality for contextlist Sort Prop.
Combined Scheme context_contextlist_ind from context_ind2, contextlist_ind2.
Lemma invert_expr_context:
(forall from to C, context from to C ->
forall a m,
invert_expr_prop a m ->
invert_expr_prop (C a) m)
/\(forall from C, contextlist from C ->
forall a m,
invert_expr_prop a m ->
~exprlist_all_values (C a)).
Proof.
apply context_contextlist_ind; intros; try (exploit H0; [eauto|intros]); simpl.
auto.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
auto.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct e1; auto; destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct e1; auto; destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct e1; auto; destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
destruct e1; auto. intros. elim (H0 a m); auto.
intros. elim (H0 a m); auto.
destruct (C a); auto; contradiction.
destruct (C a); auto; contradiction.
red; intros. destruct (C a); auto.
red; intros. destruct e1; auto. elim (H0 a m); auto.
Qed.
Lemma imm_safe_inv:
forall k a m,
imm_safe ge e k a m ->
match a with
| Eloc _ _ _ => True
| Eval _ _ => True
| _ => invert_expr_prop a m
end.
Proof.
destruct invert_expr_context as [A B].
intros. inv H.
auto.
auto.
assert (invert_expr_prop (C e0) m).
eapply A; eauto. eapply lred_invert; eauto.
red in H. destruct (C e0); auto; contradiction.
assert (invert_expr_prop (C e0) m).
eapply A; eauto. eapply rred_invert; eauto.
red in H. destruct (C e0); auto; contradiction.
assert (invert_expr_prop (C e0) m).
eapply A; eauto. eapply callred_invert; eauto.
red in H. destruct (C e0); auto; contradiction.
Qed.
Lemma safe_inv:
forall k C f a K m,
safe (ExprState f (C a) K e m) ->
context k RV C ->
match a with
| Eloc _ _ _ => True
| Eval _ _ => True
| _ => invert_expr_prop a m
end.
Proof.
intros. eapply imm_safe_inv; eauto. eapply safe_imm_safe; eauto.
Qed.
End INVERSION_LEMMAS.
(** * Correctness of the strategy. *)
Section SIMPLE_EVAL.
Variable f: function.
Variable k: cont.
Variable e: env.
Variable m: mem.
Lemma eval_simple_steps:
(forall a v, eval_simple_rvalue e m a v ->
forall C, context RV RV C ->
star Csem.step ge (ExprState f (C a) k e m)
E0 (ExprState f (C (Eval v (typeof a))) k e m))
/\ (forall a b ofs, eval_simple_lvalue e m a b ofs ->
forall C, context LV RV C ->
star Csem.step ge (ExprState f (C a) k e m)
E0 (ExprState f (C (Eloc b ofs (typeof a))) k e m)).
Proof.
Ltac Steps REC C' := eapply star_trans; [apply (REC C'); eauto | idtac | simpl; reflexivity].
Ltac FinishR := apply star_one; left; apply step_rred; eauto; simpl; try (econstructor; eauto; fail).
Ltac FinishL := apply star_one; left; apply step_lred; eauto; simpl; try (econstructor; eauto; fail).
apply eval_simple_rvalue_lvalue_ind; intros.
(* val *)
apply star_refl.
(* valof *)
Steps H0 (fun x => C(Evalof x ty)). rewrite <- H1 in *. FinishR.
(* addrof *)
Steps H0 (fun x => C(Eaddrof x ty)). FinishR.
(* unop *)
Steps H0 (fun x => C(Eunop op x ty)). FinishR.
(* binop *)
Steps H0 (fun x => C(Ebinop op x r2 ty)).
Steps H2 (fun x => C(Ebinop op (Eval v1 (typeof r1)) x ty)).
FinishR.
(* cast *)
Steps H0 (fun x => C(Ecast x ty)). FinishR.
(* sizeof *)
FinishR.
(* alignof *)
FinishR.
(* loc *)
apply star_refl.
(* var local *)
FinishL.
(* var global *)
FinishL. apply red_var_global; auto.
(* deref *)
Steps H0 (fun x => C(Ederef x ty)). FinishL.
(* field struct *)
Steps H0 (fun x => C(Efield x f0 ty)). rewrite H1 in *. FinishL.
(* field union *)
Steps H0 (fun x => C(Efield x f0 ty)). rewrite H1 in *. FinishL.
Qed.
Lemma eval_simple_rvalue_steps:
forall a v, eval_simple_rvalue e m a v ->
forall C, context RV RV C ->
star Csem.step ge (ExprState f (C a) k e m)
E0 (ExprState f (C (Eval v (typeof a))) k e m).
Proof (proj1 eval_simple_steps).
Lemma eval_simple_lvalue_steps:
forall a b ofs, eval_simple_lvalue e m a b ofs ->
forall C, context LV RV C ->
star Csem.step ge (ExprState f (C a) k e m)
E0 (ExprState f (C (Eloc b ofs (typeof a))) k e m).
Proof (proj2 eval_simple_steps).
Corollary eval_simple_rvalue_safe:
forall C a v,
eval_simple_rvalue e m a v ->
context RV RV C -> safe (ExprState f (C a) k e m) ->
safe (ExprState f (C (Eval v (typeof a))) k e m).
Proof.
intros. eapply safe_steps; eauto. eapply eval_simple_rvalue_steps; eauto.
Qed.
Corollary eval_simple_lvalue_safe:
forall C a b ofs,
eval_simple_lvalue e m a b ofs ->
context LV RV C -> safe (ExprState f (C a) k e m) ->
safe (ExprState f (C (Eloc b ofs (typeof a))) k e m).
Proof.
intros. eapply safe_steps; eauto. eapply eval_simple_lvalue_steps; eauto.
Qed.
Lemma simple_can_eval:
forall a from C,
simple a = true -> context from RV C -> safe (ExprState f (C a) k e m) ->
match from with
| LV => exists b, exists ofs, eval_simple_lvalue e m a b ofs
| RV => exists v, eval_simple_rvalue e m a v
end.
Proof.
Ltac StepL REC C' a :=
let b := fresh "b" in let ofs := fresh "ofs" in
let E := fresh "E" in let S := fresh "SAFE" in
exploit (REC LV C'); eauto; intros [b [ofs E]];
assert (S: safe (ExprState f (C' (Eloc b ofs (typeof a))) k e m)) by
(eapply (eval_simple_lvalue_safe C'); eauto);
simpl in S.
Ltac StepR REC C' a :=
let v := fresh "v" in let E := fresh "E" in let S := fresh "SAFE" in
exploit (REC RV C'); eauto; intros [v E];
assert (S: safe (ExprState f (C' (Eval v (typeof a))) k e m)) by
(eapply (eval_simple_rvalue_safe C'); eauto);
simpl in S.
induction a; intros from C S CTX SAFE;
generalize (safe_expr_kind _ _ _ _ _ _ _ CTX SAFE); intro K; subst;
simpl in S; try discriminate; simpl.
(* val *)
exists v; constructor.
(* var *)
exploit safe_inv; eauto; simpl. intros [b A].
exists b; exists Int.zero.
intuition. apply esl_var_local; auto. apply esl_var_global; auto.
(* field *)
StepR IHa (fun x => C(Efield x f0 ty)) a.
exploit safe_inv. eexact SAFE0. eauto. simpl.
intros [b [ofs [EQ TY]]]. subst v. destruct (typeof a) eqn:?; try contradiction.
destruct TY as [delta OFS]. exists b; exists (Int.add ofs (Int.repr delta)); econstructor; eauto.
exists b; exists ofs; econstructor; eauto.
(* valof *)
destruct (andb_prop _ _ S) as [S1 S2]. clear S. rewrite negb_true_iff in S2.
StepL IHa (fun x => C(Evalof x ty)) a.
exploit safe_inv. eexact SAFE0. eauto. simpl. intros [TY [t [v LOAD]]].
assert (t = E0). inv LOAD; auto. congruence. subst t.
exists v; econstructor; eauto. congruence.
(* deref *)
StepR IHa (fun x => C(Ederef x ty)) a.
exploit safe_inv. eexact SAFE0. eauto. simpl. intros [b [ofs EQ]].
subst v. exists b; exists ofs; econstructor; eauto.
(* addrof *)
StepL IHa (fun x => C(Eaddrof x ty)) a.
exists (Vptr b ofs); econstructor; eauto.
(* unop *)
StepR IHa (fun x => C(Eunop op x ty)) a.
exploit safe_inv. eexact SAFE0. eauto. simpl. intros [v' EQ].
exists v'; econstructor; eauto.
(* binop *)
destruct (andb_prop _ _ S) as [S1 S2]; clear S.
StepR IHa1 (fun x => C(Ebinop op x a2 ty)) a1.
StepR IHa2 (fun x => C(Ebinop op (Eval v (typeof a1)) x ty)) a2.
exploit safe_inv. eexact SAFE1. eauto. simpl. intros [v' EQ].
exists v'; econstructor; eauto.
(* cast *)
StepR IHa (fun x => C(Ecast x ty)) a.
exploit safe_inv. eexact SAFE0. eauto. simpl. intros [v' CAST].
exists v'; econstructor; eauto.
(* sizeof *)
econstructor; econstructor.
(* alignof *)
econstructor; econstructor.
(* loc *)
exists b; exists ofs; constructor.
Qed.
Lemma simple_can_eval_rval:
forall r C,
simple r = true -> context RV RV C -> safe (ExprState f (C r) k e m) ->
exists v, eval_simple_rvalue e m r v
/\ safe (ExprState f (C (Eval v (typeof r))) k e m).
Proof.
intros. exploit (simple_can_eval r RV); eauto. intros [v A].
exists v; split; auto. eapply eval_simple_rvalue_safe; eauto.
Qed.
Lemma simple_can_eval_lval:
forall l C,
simple l = true -> context LV RV C -> safe (ExprState f (C l) k e m) ->
exists b, exists ofs, eval_simple_lvalue e m l b ofs
/\ safe (ExprState f (C (Eloc b ofs (typeof l))) k e m).
Proof.
intros. exploit (simple_can_eval l LV); eauto. intros [b [ofs A]].
exists b; exists ofs; split; auto. eapply eval_simple_lvalue_safe; eauto.
Qed.
Fixpoint rval_list (vl: list val) (rl: exprlist) : exprlist :=
match vl, rl with
| v1 :: vl', Econs r1 rl' => Econs (Eval v1 (typeof r1)) (rval_list vl' rl')
| _, _ => Enil
end.
Inductive eval_simple_list': exprlist -> list val -> Prop :=
| esrl'_nil:
eval_simple_list' Enil nil
| esrl'_cons: forall r rl v vl,
eval_simple_rvalue e m r v ->
eval_simple_list' rl vl ->
eval_simple_list' (Econs r rl) (v :: vl).
Lemma eval_simple_list_implies:
forall rl tyl vl,
eval_simple_list e m rl tyl vl ->
exists vl', cast_arguments (rval_list vl' rl) tyl vl /\ eval_simple_list' rl vl'.
Proof.
induction 1.
exists (@nil val); split. constructor. constructor.
destruct IHeval_simple_list as [vl' [A B]].
exists (v' :: vl'); split. constructor; auto. constructor; auto.
Qed.
Lemma can_eval_simple_list:
forall rl vl,
eval_simple_list' rl vl ->
forall tyl vl',
cast_arguments (rval_list vl rl) tyl vl' ->
eval_simple_list e m rl tyl vl'.
Proof.
induction 1; simpl; intros.
inv H. constructor.
inv H1. econstructor; eauto.
Qed.
Fixpoint exprlist_app (rl1 rl2: exprlist) : exprlist :=
match rl1 with
| Enil => rl2
| Econs r1 rl1' => Econs r1 (exprlist_app rl1' rl2)
end.
Lemma exprlist_app_assoc:
forall rl2 rl3 rl1,
exprlist_app (exprlist_app rl1 rl2) rl3 =
exprlist_app rl1 (exprlist_app rl2 rl3).
Proof.
induction rl1; auto. simpl. congruence.
Qed.
Inductive contextlist' : (exprlist -> expr) -> Prop :=
| contextlist'_call: forall r1 rl0 ty C,
context RV RV C ->
contextlist' (fun rl => C (Ecall r1 (exprlist_app rl0 rl) ty))
| contextlist'_builtin: forall ef tyargs rl0 ty C,
context RV RV C ->
contextlist' (fun rl => C (Ebuiltin ef tyargs (exprlist_app rl0 rl) ty)).
Lemma exprlist_app_context:
forall rl1 rl2,
contextlist RV (fun x => exprlist_app rl1 (Econs x rl2)).
Proof.
induction rl1; simpl; intros.
apply ctx_list_head. constructor.
apply ctx_list_tail. auto.
Qed.
Lemma contextlist'_head:
forall rl C,
contextlist' C ->
context RV RV (fun x => C (Econs x rl)).
Proof.
intros. inv H.
set (C' := fun x => Ecall r1 (exprlist_app rl0 (Econs x rl)) ty).
assert (context RV RV C'). constructor. apply exprlist_app_context.
change (context RV RV (fun x => C0 (C' x))).
eapply context_compose; eauto.
set (C' := fun x => Ebuiltin ef tyargs (exprlist_app rl0 (Econs x rl)) ty).
assert (context RV RV C'). constructor. apply exprlist_app_context.
change (context RV RV (fun x => C0 (C' x))).
eapply context_compose; eauto.
Qed.
Lemma contextlist'_tail:
forall r1 C,
contextlist' C ->
contextlist' (fun x => C (Econs r1 x)).
Proof.
intros. inv H.
replace (fun x => C0 (Ecall r0 (exprlist_app rl0 (Econs r1 x)) ty))
with (fun x => C0 (Ecall r0 (exprlist_app (exprlist_app rl0 (Econs r1 Enil)) x) ty)).
constructor. auto.
apply extensionality; intros. f_equal. f_equal. apply exprlist_app_assoc.
replace (fun x => C0 (Ebuiltin ef tyargs (exprlist_app rl0 (Econs r1 x)) ty))
with (fun x => C0 (Ebuiltin ef tyargs (exprlist_app (exprlist_app rl0 (Econs r1 Enil)) x) ty)).
constructor. auto.
apply extensionality; intros. f_equal. f_equal. apply exprlist_app_assoc.
Qed.
Hint Resolve contextlist'_head contextlist'_tail.
Lemma eval_simple_list_steps:
forall rl vl, eval_simple_list' rl vl ->
forall C, contextlist' C ->
star Csem.step ge (ExprState f (C rl) k e m)
E0 (ExprState f (C (rval_list vl rl)) k e m).
Proof.
induction 1; intros.
(* nil *)
apply star_refl.
(* cons *)
eapply star_trans.
eapply eval_simple_rvalue_steps with (C := fun x => C(Econs x rl)); eauto.
apply IHeval_simple_list' with (C := fun x => C(Econs (Eval v (typeof r)) x)); auto.
auto.
Qed.
Lemma simple_list_can_eval:
forall rl C,
simplelist rl = true ->
contextlist' C ->
safe (ExprState f (C rl) k e m) ->
exists vl, eval_simple_list' rl vl.
Proof.
induction rl; intros.
econstructor; constructor.
simpl in H. destruct (andb_prop _ _ H).
exploit (simple_can_eval r1 RV (fun x => C(Econs x rl))); eauto.
intros [v1 EV1].
exploit (IHrl (fun x => C(Econs (Eval v1 (typeof r1)) x))); eauto.
apply (eval_simple_rvalue_safe (fun x => C(Econs x rl))); eauto.
intros [vl EVl].
exists (v1 :: vl); constructor; auto.
Qed.
Lemma rval_list_all_values:
forall vl rl, exprlist_all_values (rval_list vl rl).
Proof.
induction vl; simpl; intros. auto.
destruct rl; simpl; auto.
Qed.
End SIMPLE_EVAL.
(** Decomposition *)
Section DECOMPOSITION.
Variable f: function.
Variable k: cont.
Variable e: env.
Variable m: mem.
Definition simple_side_effect (r: expr) : Prop :=
match r with
| Evalof l _ => simple l = true /\ type_is_volatile (typeof l) = true
| Eseqand r1 r2 _ => simple r1 = true
| Eseqor r1 r2 _ => simple r1 = true
| Econdition r1 r2 r3 _ => simple r1 = true
| Eassign l1 r2 _ => simple l1 = true /\ simple r2 = true
| Eassignop _ l1 r2 _ _ => simple l1 = true /\ simple r2 = true
| Epostincr _ l1 _ => simple l1 = true
| Ecomma r1 r2 _ => simple r1 = true
| Ecall r1 rl _ => simple r1 = true /\ simplelist rl = true
| Ebuiltin ef tyargs rl _ => simplelist rl = true
| Eparen r1 _ => simple r1 = true
| _ => False
end.
Scheme expr_ind2 := Induction for expr Sort Prop
with exprlist_ind2 := Induction for exprlist Sort Prop.
Combined Scheme expr_expr_list_ind from expr_ind2, exprlist_ind2.
Hint Constructors leftcontext leftcontextlist.
Lemma decompose_expr:
(forall a from C,
context from RV C -> safe (ExprState f (C a) k e m) ->
simple a = true
\/ exists C', exists a', a = C' a' /\ simple_side_effect a' /\ leftcontext RV from C')
/\(forall rl C,
contextlist' C -> safe (ExprState f (C rl) k e m) ->
simplelist rl = true
\/ exists C', exists a', rl = C' a' /\ simple_side_effect a' /\ leftcontextlist RV C').
Proof.
apply expr_expr_list_ind; intros; simpl; auto.
Ltac Kind :=
exploit safe_expr_kind; eauto; simpl; intros X; rewrite <- X in *; clear X.
Ltac Rec HR kind C C' :=
destruct (HR kind (fun x => C(C' x))) as [? | [C'' [a' [D [A B]]]]];
[eauto | eauto | auto |
right; exists (fun x => C'(C'' x)); exists a'; rewrite D; auto].
Ltac Base :=
right; exists (fun x => x); econstructor; split; [eauto | simpl; auto].
(* field *)
Kind. Rec H RV C (fun x => Efield x f0 ty).
(* rvalof *)
Kind. Rec H LV C (fun x => Evalof x ty).
destruct (type_is_volatile (typeof l)) eqn:?.
Base. rewrite H2; auto.
(* deref *)
Kind. Rec H RV C (fun x => Ederef x ty).
(* addrof *)
Kind. Rec H LV C (fun x => Eaddrof x ty).
(* unop *)
Kind. Rec H RV C (fun x => Eunop op x ty).
(* binop *)
Kind. Rec H RV C (fun x => Ebinop op x r2 ty). rewrite H3.
Rec H0 RV C (fun x => Ebinop op r1 x ty).
(* cast *)
Kind. Rec H RV C (fun x => Ecast x ty).
(* seqand *)
Kind. Rec H RV C (fun x => Eseqand x r2 ty). Base.
(* seqor *)
Kind. Rec H RV C (fun x => Eseqor x r2 ty). Base.
(* condition *)
Kind. Rec H RV C (fun x => Econdition x r2 r3 ty). Base.
(* assign *)
Kind. Rec H LV C (fun x => Eassign x r ty). Rec H0 RV C (fun x => Eassign l x ty). Base.
(* assignop *)
Kind. Rec H LV C (fun x => Eassignop op x r tyres ty). Rec H0 RV C (fun x => Eassignop op l x tyres ty). Base.
(* postincr *)
Kind. Rec H LV C (fun x => Epostincr id x ty). Base.
(* comma *)
Kind. Rec H RV C (fun x => Ecomma x r2 ty). Base.
(* call *)
Kind. Rec H RV C (fun x => Ecall x rargs ty).
destruct (H0 (fun x => C (Ecall r1 x ty))) as [A | [C' [a' [D [A B]]]]].
eapply contextlist'_call with (C := C) (rl0 := Enil). auto. auto.
Base.
right; exists (fun x => Ecall r1 (C' x) ty); exists a'. rewrite D; simpl; auto.
(* builtin *)
Kind.
destruct (H (fun x => C (Ebuiltin ef tyargs x ty))) as [A | [C' [a' [D [A B]]]]].
eapply contextlist'_builtin with (C := C) (rl0 := Enil). auto. auto.
Base.
right; exists (fun x => Ebuiltin ef tyargs (C' x) ty); exists a'. rewrite D; simpl; auto.
(* rparen *)
Kind. Rec H RV C (fun x => (Eparen x ty)). Base.
(* cons *)
destruct (H RV (fun x => C (Econs x rl))) as [A | [C' [a' [A [B D]]]]].
eapply contextlist'_head; eauto. auto.
destruct (H0 (fun x => C (Econs r1 x))) as [A' | [C' [a' [A' [B D]]]]].
eapply contextlist'_tail; eauto. auto.
rewrite A; rewrite A'; auto.
right; exists (fun x => Econs r1 (C' x)); exists a'. rewrite A'; eauto.
right; exists (fun x => Econs (C' x) rl); exists a'. rewrite A; eauto.
Qed.
Lemma decompose_topexpr:
forall a,
safe (ExprState f a k e m) ->
simple a = true
\/ exists C, exists a', a = C a' /\ simple_side_effect a' /\ leftcontext RV RV C.
Proof.
intros. eapply (proj1 decompose_expr). apply ctx_top. auto.
Qed.
End DECOMPOSITION.
(** Simulation for expressions. *)
Lemma estep_simulation:
forall S t S',
estep S t S' -> plus Csem.step ge S t S'.
Proof.
intros. inv H.
(* simple *)
exploit eval_simple_rvalue_steps; eauto. simpl; intros STEPS.
exploit star_inv; eauto. intros [[EQ1 EQ2] | A]; eauto.
inversion EQ1. rewrite <- H2 in H1; contradiction.
(* valof volatile *)
eapply plus_right.
eapply eval_simple_lvalue_steps with (C := fun x => C(Evalof x (typeof l))); eauto.
left. apply step_rred; eauto. econstructor; eauto. auto.
(* seqand true *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eseqand x r2 ty)); eauto.
left. apply step_rred; eauto. apply red_seqand_true; auto. traceEq.
(* seqand false *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eseqand x r2 ty)); eauto.
left. apply step_rred; eauto. apply red_seqand_false; auto. traceEq.
(* seqor true *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eseqor x r2 ty)); eauto.
left. apply step_rred; eauto. apply red_seqor_true; auto. traceEq.
(* seqor false *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eseqor x r2 ty)); eauto.
left. apply step_rred; eauto. apply red_seqor_false; auto. traceEq.
(* condition *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Econdition x r2 r3 ty)); eauto.
left; apply step_rred; eauto. constructor; auto. auto.
(* assign *)
eapply star_plus_trans.
eapply eval_simple_lvalue_steps with (C := fun x => C(Eassign x r (typeof l))); eauto.
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eassign (Eloc b ofs (typeof l)) x (typeof l))); eauto.
left; apply step_rred; eauto. econstructor; eauto.
reflexivity. auto.
(* assignop *)
eapply star_plus_trans.
eapply eval_simple_lvalue_steps with (C := fun x => C(Eassignop op x r tyres (typeof l))); eauto.
eapply star_plus_trans.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eassignop op (Eloc b ofs (typeof l)) x tyres (typeof l))); eauto.
eapply plus_left.
left; apply step_rred; auto. econstructor; eauto.
eapply star_left.
left; apply step_rred with (C := fun x => C(Eassign (Eloc b ofs (typeof l)) x (typeof l))); eauto. econstructor; eauto.
apply star_one.
left; apply step_rred; auto. econstructor; eauto.
reflexivity. reflexivity. reflexivity. traceEq.
(* assignop stuck *)
eapply star_plus_trans.
eapply eval_simple_lvalue_steps with (C := fun x => C(Eassignop op x r tyres (typeof l))); eauto.
eapply star_plus_trans.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eassignop op (Eloc b ofs (typeof l)) x tyres (typeof l))); eauto.
eapply plus_left.
left; apply step_rred; auto. econstructor; eauto.
destruct (sem_binary_operation op v1 (typeof l) v2 (typeof r) m) as [v3|] eqn:?.
eapply star_left.
left; apply step_rred with (C := fun x => C(Eassign (Eloc b ofs (typeof l)) x (typeof l))); eauto. econstructor; eauto.
apply star_one.
left; eapply step_stuck; eauto.
red; intros. exploit imm_safe_inv; eauto. simpl. intros [v4' [m' [t' [A [B D]]]]].
rewrite B in H4. eelim H4; eauto.
reflexivity.
apply star_one.
left; eapply step_stuck with (C := fun x => C(Eassign (Eloc b ofs (typeof l)) x (typeof l))); eauto.
red; intros. exploit imm_safe_inv; eauto. simpl. intros [v3 A]. congruence.
reflexivity.
reflexivity. traceEq.
(* postincr *)
eapply star_plus_trans.
eapply eval_simple_lvalue_steps with (C := fun x => C(Epostincr id x (typeof l))); eauto.
eapply plus_left.
left; apply step_rred; auto. econstructor; eauto.
eapply star_left.
left; apply step_rred with (C := fun x => C (Ecomma (Eassign (Eloc b ofs (typeof l)) x (typeof l)) (Eval v1 (typeof l)) (typeof l))); eauto.
econstructor. instantiate (1 := v2). destruct id; assumption.
eapply star_left.
left; apply step_rred with (C := fun x => C (Ecomma x (Eval v1 (typeof l)) (typeof l))); eauto.
econstructor; eauto.
apply star_one.
left; apply step_rred; auto. econstructor; eauto.
reflexivity. reflexivity. reflexivity. traceEq.
(* postincr stuck *)
eapply star_plus_trans.
eapply eval_simple_lvalue_steps with (C := fun x => C(Epostincr id x (typeof l))); eauto.
eapply plus_left.
left; apply step_rred; auto. econstructor; eauto.
set (op := match id with Incr => Oadd | Decr => Osub end).
assert (SEM: sem_binary_operation op v1 (typeof l) (Vint Int.one) type_int32s m =
sem_incrdecr id v1 (typeof l)).
destruct id; auto.
destruct (sem_incrdecr id v1 (typeof l)) as [v2|].
eapply star_left.
left; apply step_rred with (C := fun x => C (Ecomma (Eassign (Eloc b ofs (typeof l)) x (typeof l)) (Eval v1 (typeof l)) (typeof l))); eauto.
econstructor; eauto.
apply star_one.
left; eapply step_stuck with (C := fun x => C (Ecomma x (Eval v1 (typeof l)) (typeof l))); eauto.
red; intros. exploit imm_safe_inv; eauto. simpl. intros [v3 [m' [t' [A [B D]]]]].
rewrite B in H3. eelim H3; eauto.
reflexivity.
apply star_one.
left; eapply step_stuck with (C := fun x => C (Ecomma (Eassign (Eloc b ofs (typeof l)) x (typeof l)) (Eval v1 (typeof l)) (typeof l))); eauto.
red; intros. exploit imm_safe_inv; eauto. simpl. intros [v2 A]. congruence.
reflexivity.
traceEq.
(* comma *)
eapply plus_right.
eapply eval_simple_rvalue_steps with (C := fun x => C(Ecomma x r2 (typeof r2))); eauto.
left; apply step_rred; eauto. econstructor; eauto. auto.
(* paren *)
eapply plus_right; eauto.
eapply eval_simple_rvalue_steps with (C := fun x => C(Eparen x ty)); eauto.
left; apply step_rred; eauto. econstructor; eauto. auto.
(* call *)
exploit eval_simple_list_implies; eauto. intros [vl' [A B]].
eapply star_plus_trans.
eapply eval_simple_rvalue_steps with (C := fun x => C(Ecall x rargs ty)); eauto.
eapply plus_right.
eapply eval_simple_list_steps with (C := fun x => C(Ecall (Eval vf (typeof rf)) x ty)); eauto.
eapply contextlist'_call with (rl0 := Enil); auto.
left; apply Csem.step_call; eauto. econstructor; eauto.
traceEq. auto.
(* builtin *)
exploit eval_simple_list_implies; eauto. intros [vl' [A B]].
eapply plus_right.
eapply eval_simple_list_steps with (C := fun x => C(Ebuiltin ef tyargs x ty)); eauto.
eapply contextlist'_builtin with (rl0 := Enil); auto.
left; apply Csem.step_rred; eauto. econstructor; eauto.
traceEq.
Qed.
Lemma can_estep:
forall f a k e m,
safe (ExprState f a k e m) ->
match a with Eval _ _ => False | _ => True end ->
exists t, exists S, estep (ExprState f a k e m) t S.
Proof.
intros. destruct (decompose_topexpr f k e m a H) as [A | [C [b [P [Q R]]]]].
(* simple expr *)
exploit (simple_can_eval f k e m a RV (fun x => x)); auto. intros [v P].
econstructor; econstructor; eapply step_expr; eauto.
(* side effect *)
clear H0. subst a. red in Q. destruct b; try contradiction.
(* valof volatile *)
destruct Q.
exploit (simple_can_eval_lval f k e m b (fun x => C(Evalof x ty))); eauto.
intros [b1 [ofs [E1 S1]]].
exploit safe_inv. eexact S1. eauto. simpl. intros [A [t [v B]]].
econstructor; econstructor; eapply step_rvalof_volatile; eauto. congruence.
(* seqand *)
exploit (simple_can_eval_rval f k e m b1 (fun x => C(Eseqand x b2 ty))); eauto.
intros [v1 [E1 S1]].
exploit safe_inv. eexact S1. eauto. simpl. intros [b BV].
destruct b.
econstructor; econstructor; eapply step_seqand_true; eauto.
econstructor; econstructor; eapply step_seqand_false; eauto.
(* seqor *)
exploit (simple_can_eval_rval f k e m b1 (fun x => C(Eseqor x b2 ty))); eauto.
intros [v1 [E1 S1]].
exploit safe_inv. eexact S1. eauto. simpl. intros [b BV].
destruct b.
econstructor; econstructor; eapply step_seqor_true; eauto.
econstructor; econstructor; eapply step_seqor_false; eauto.
(* condition *)
exploit (simple_can_eval_rval f k e m b1 (fun x => C(Econdition x b2 b3 ty))); eauto.
intros [v1 [E1 S1]].
exploit safe_inv. eexact S1. eauto. simpl. intros [b BV].
econstructor; econstructor. eapply step_condition; eauto.
(* assign *)
destruct Q.
exploit (simple_can_eval_lval f k e m b1 (fun x => C(Eassign x b2 ty))); eauto.
intros [b [ofs [E1 S1]]].
exploit (simple_can_eval_rval f k e m b2 (fun x => C(Eassign (Eloc b ofs (typeof b1)) x ty))); eauto.
intros [v [E2 S2]].
exploit safe_inv. eexact S2. eauto. simpl. intros [v' [m' [t [A [B D]]]]].
econstructor; econstructor; eapply step_assign; eauto.
(* assignop *)
destruct Q.
exploit (simple_can_eval_lval f k e m b1 (fun x => C(Eassignop op x b2 tyres ty))); eauto.
intros [b [ofs [E1 S1]]].
exploit (simple_can_eval_rval f k e m b2 (fun x => C(Eassignop op (Eloc b ofs (typeof b1)) x tyres ty))); eauto.
intros [v [E2 S2]].
exploit safe_inv. eexact S2. eauto. simpl. intros [t1 [v1 [A B]]].
destruct (sem_binary_operation op v1 (typeof b1) v (typeof b2) m) as [v3|] eqn:?.
destruct (sem_cast v3 tyres (typeof b1)) as [v4|] eqn:?.
destruct (classic (exists t2, exists m', assign_loc ge (typeof b1) m b ofs v4 t2 m')).
destruct H2 as [t2 [m' D]].
econstructor; econstructor; eapply step_assignop; eauto.
econstructor; econstructor; eapply step_assignop_stuck; eauto.
rewrite Heqo. rewrite Heqo0. intros; red; intros. elim H2. exists t2; exists m'; auto.
econstructor; econstructor; eapply step_assignop_stuck; eauto.
rewrite Heqo. rewrite Heqo0. auto.
econstructor; econstructor; eapply step_assignop_stuck; eauto.
rewrite Heqo. auto.
(* postincr *)
exploit (simple_can_eval_lval f k e m b (fun x => C(Epostincr id x ty))); eauto.
intros [b1 [ofs [E1 S1]]].
exploit safe_inv. eexact S1. eauto. simpl. intros [t [v1 [A B]]].
destruct (sem_incrdecr id v1 ty) as [v2|] eqn:?.
destruct (sem_cast v2 (typeconv ty) ty) as [v3|] eqn:?.
destruct (classic (exists t2, exists m', assign_loc ge ty m b1 ofs v3 t2 m')).
destruct H0 as [t2 [m' D]].
econstructor; econstructor; eapply step_postincr; eauto.
econstructor; econstructor; eapply step_postincr_stuck; eauto.
rewrite Heqo. rewrite Heqo0. intros; red; intros. elim H0. exists t2; exists m'; congruence.
econstructor; econstructor; eapply step_postincr_stuck; eauto.
rewrite Heqo. rewrite Heqo0. auto.
econstructor; econstructor; eapply step_postincr_stuck; eauto.
rewrite Heqo. auto.
(* comma *)
exploit (simple_can_eval_rval f k e m b1 (fun x => C(Ecomma x b2 ty))); eauto.
intros [v1 [E1 S1]].
exploit safe_inv. eexact S1. eauto. simpl. intros EQ.
econstructor; econstructor; eapply step_comma; eauto.
(* call *)
destruct Q.
exploit (simple_can_eval_rval f k e m b (fun x => C(Ecall x rargs ty))); eauto.
intros [vf [E1 S1]].
pose (C' := fun x => C(Ecall (Eval vf (typeof b)) x ty)).
assert (contextlist' C'). unfold C'; eapply contextlist'_call with (rl0 := Enil); auto.
exploit (simple_list_can_eval f k e m rargs C'); eauto.
intros [vl E2].
exploit safe_inv. 2: eapply leftcontext_context; eexact R.
eapply safe_steps. eexact S1.
apply (eval_simple_list_steps f k e m rargs vl E2 C'); auto.
simpl. intros X. exploit X. eapply rval_list_all_values.
intros [tyargs [tyres [fd [vargs [P [Q [U V]]]]]]].
econstructor; econstructor; eapply step_call; eauto. eapply can_eval_simple_list; eauto.
(* builtin *)
pose (C' := fun x => C(Ebuiltin ef tyargs x ty)).
assert (contextlist' C'). unfold C'; eapply contextlist'_builtin with (rl0 := Enil); auto.
exploit (simple_list_can_eval f k e m rargs C'); eauto.
intros [vl E].
exploit safe_inv. 2: eapply leftcontext_context; eexact R.
eapply safe_steps. eexact H.
apply (eval_simple_list_steps f k e m rargs vl E C'); auto.
simpl. intros X. exploit X. eapply rval_list_all_values.
intros [vargs [t [vres [m' [U V]]]]].
econstructor; econstructor; eapply step_builtin; eauto.
eapply can_eval_simple_list; eauto.
(* paren *)
exploit (simple_can_eval_rval f k e m b (fun x => C(Eparen x ty))); eauto.
intros [v1 [E1 S1]].
exploit safe_inv. eexact S1. eauto. simpl. intros [v CAST].
econstructor; econstructor; eapply step_paren; eauto.
Qed.
(** Simulation for all states *)
Theorem step_simulation:
forall S1 t S2,
step S1 t S2 -> plus Csem.step ge S1 t S2.
Proof.
intros. inv H.
apply estep_simulation; auto.
apply plus_one. right. auto.
Qed.
Theorem progress:
forall S,
safe S -> (exists r, final_state S r) \/ (exists t, exists S', step S t S').
Proof.
intros. exploit H. apply star_refl. intros [FIN | [t [S' STEP]]].
(* 1. Finished. *)
auto.
right. destruct STEP.
(* 2. Expression step. *)
assert (exists t, exists S', estep S t S').
inv H0.
(* lred *)
eapply can_estep; eauto. inv H2; auto.
(* rred *)
eapply can_estep; eauto. inv H2; auto. inv H1; auto.
(* callred *)
eapply can_estep; eauto. inv H2; auto. inv H1; auto.
(* stuck *)
exploit (H Stuckstate). apply star_one. left. econstructor; eauto.
intros [[r F] | [t [S' R]]]. inv F. inv R. inv H0. inv H0.
destruct H1 as [t' [S'' ESTEP]].
exists t'; exists S''; left; auto.
(* 3. Other step. *)
exists t; exists S'; right; auto.
Qed.
End STRATEGY.
(** The semantics that follows the strategy. *)
Definition semantics (p: program) :=
Semantics step (initial_state p) final_state (Genv.globalenv p).
(** This semantics is receptive to changes in events. *)
Remark deref_loc_trace:
forall F V (ge: Genv.t F V) ty m b ofs t v,
deref_loc ge ty m b ofs t v ->
match t with nil => True | ev :: nil => True | _ => False end.
Proof.
intros. inv H; simpl; auto. inv H2; simpl; auto.
Qed.
Remark deref_loc_receptive:
forall F V (ge: Genv.t F V) ty m b ofs ev1 t1 v ev2,
deref_loc ge ty m b ofs (ev1 :: t1) v ->
match_traces ge (ev1 :: nil) (ev2 :: nil) ->
t1 = nil /\ exists v', deref_loc ge ty m b ofs (ev2 :: nil) v'.
Proof.
intros.
assert (t1 = nil). exploit deref_loc_trace; eauto. destruct t1; simpl; tauto.
inv H. exploit volatile_load_receptive; eauto. intros [v' A].
split; auto; exists v'; econstructor; eauto.
Qed.
Remark assign_loc_trace:
forall F V (ge: Genv.t F V) ty m b ofs t v m',
assign_loc ge ty m b ofs v t m' ->
match t with nil => True | ev :: nil => output_event ev | _ => False end.
Proof.
intros. inv H; simpl; auto. inv H2; simpl; auto.
Qed.
Remark assign_loc_receptive:
forall F V (ge: Genv.t F V) ty m b ofs ev1 t1 v m' ev2,
assign_loc ge ty m b ofs v (ev1 :: t1) m' ->
match_traces ge (ev1 :: nil) (ev2 :: nil) ->
ev1 :: t1 = ev2 :: nil.
Proof.
intros.
assert (t1 = nil). exploit assign_loc_trace; eauto. destruct t1; simpl; tauto.
inv H. eapply volatile_store_receptive; eauto.
Qed.
Lemma semantics_strongly_receptive:
forall p, strongly_receptive (semantics p).
Proof.
intros. constructor; simpl; intros.
(* receptiveness *)
inversion H; subst.
inv H1.
(* valof volatile *)
exploit deref_loc_receptive; eauto. intros [A [v' B]].
econstructor; econstructor. left; eapply step_rvalof_volatile; eauto.
(* assign *)
exploit assign_loc_receptive; eauto. intro EQ; rewrite EQ in H.
econstructor; econstructor; eauto.
(* assignop *)
destruct t0 as [ | ev0 t0]; simpl in H10.
subst t2. exploit assign_loc_receptive; eauto. intros EQ; rewrite EQ in H.
econstructor; econstructor; eauto.
inv H10. exploit deref_loc_receptive; eauto. intros [EQ [v1' A]]. subst t0.
destruct (sem_binary_operation op v1' (typeof l) v2 (typeof r) m) as [v3'|] eqn:?.
destruct (sem_cast v3' tyres (typeof l)) as [v4'|] eqn:?.
destruct (classic (exists t2', exists m'', assign_loc (Genv.globalenv p) (typeof l) m b ofs v4' t2' m'')).
destruct H1 as [t2' [m'' P]].
econstructor; econstructor. left; eapply step_assignop with (v1 := v1'); eauto. simpl; reflexivity.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0. intros; red; intros; elim H1. exists t0; exists m'0; auto.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0; auto.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; auto.
(* assignop stuck *)
exploit deref_loc_receptive; eauto. intros [EQ [v1' A]]. subst t1.
destruct (sem_binary_operation op v1' (typeof l) v2 (typeof r) m) as [v3'|] eqn:?.
destruct (sem_cast v3' tyres (typeof l)) as [v4'|] eqn:?.
destruct (classic (exists t2', exists m'', assign_loc (Genv.globalenv p) (typeof l) m b ofs v4' t2' m'')).
destruct H1 as [t2' [m'' P]].
econstructor; econstructor. left; eapply step_assignop with (v1 := v1'); eauto. simpl; reflexivity.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0. intros; red; intros; elim H1. exists t2; exists m'; auto.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0; auto.
econstructor; econstructor. left; eapply step_assignop_stuck with (v1 := v1'); eauto.
rewrite Heqo; auto.
(* postincr *)
destruct t0 as [ | ev0 t0]; simpl in H9.
subst t2. exploit assign_loc_receptive; eauto. intros EQ; rewrite EQ in H.
econstructor; econstructor; eauto.
inv H9. exploit deref_loc_receptive; eauto. intros [EQ [v1' A]]. subst t0.
destruct (sem_incrdecr id v1' (typeof l)) as [v2'|] eqn:?.
destruct (sem_cast v2' (typeconv (typeof l)) (typeof l)) as [v3'|] eqn:?.
destruct (classic (exists t2', exists m'', assign_loc (Genv.globalenv p) (typeof l) m b ofs v3' t2' m'')).
destruct H1 as [t2' [m'' P]].
econstructor; econstructor. left; eapply step_postincr with (v1 := v1'); eauto. simpl; reflexivity.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0. intros; red; intros; elim H1. exists t0; exists m'0; auto.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0; auto.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; auto.
(* postincr stuck *)
exploit deref_loc_receptive; eauto. intros [EQ [v1' A]]. subst t1.
destruct (sem_incrdecr id v1' (typeof l)) as [v2'|] eqn:?.
destruct (sem_cast v2' (typeconv (typeof l)) (typeof l)) as [v3'|] eqn:?.
destruct (classic (exists t2', exists m'', assign_loc (Genv.globalenv p) (typeof l) m b ofs v3' t2' m'')).
destruct H1 as [t2' [m'' P]].
econstructor; econstructor. left; eapply step_postincr with (v1 := v1'); eauto. simpl; reflexivity.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0. intros; red; intros; elim H1. exists t2; exists m'; auto.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; rewrite Heqo0; auto.
econstructor; econstructor. left; eapply step_postincr_stuck with (v1 := v1'); eauto.
rewrite Heqo; auto.
(* builtin *)
exploit external_call_trace_length; eauto. destruct t1; simpl; intros.
exploit external_call_receptive; eauto. intros [vres2 [m2 EC2]].
econstructor; econstructor. left; eapply step_builtin; eauto.
omegaContradiction.
(* external calls *)
inv H1.
exploit external_call_trace_length; eauto. destruct t1; simpl; intros.
exploit external_call_receptive; eauto. intros [vres2 [m2 EC2]].
exists (Returnstate vres2 k m2); exists E0; right; econstructor; eauto.
omegaContradiction.
(* well-behaved traces *)
red; intros. inv H; inv H0; simpl; auto.
(* valof volatile *)
exploit deref_loc_trace; eauto. destruct t; auto. destruct t; tauto.
(* assign *)
exploit assign_loc_trace; eauto. destruct t; auto. destruct t; simpl; tauto.
(* assignop *)
exploit deref_loc_trace; eauto. exploit assign_loc_trace; eauto.
destruct t1. destruct t2. simpl; auto. destruct t2; simpl; tauto.
destruct t1. destruct t2. simpl; auto. destruct t2; simpl; tauto.
tauto.
(* assignop stuck *)
exploit deref_loc_trace; eauto. destruct t; auto. destruct t; tauto.
(* postincr *)
exploit deref_loc_trace; eauto. exploit assign_loc_trace; eauto.
destruct t1. destruct t2. simpl; auto. destruct t2; simpl; tauto.
destruct t1. destruct t2. simpl; auto. destruct t2; simpl; tauto.
tauto.
(* postincr stuck *)
exploit deref_loc_trace; eauto. destruct t; auto. destruct t; tauto.
(* builtins *)
exploit external_call_trace_length; eauto.
destruct t; simpl; auto. destruct t; simpl; auto. intros; omegaContradiction.
(* external calls *)
exploit external_call_trace_length; eauto.
destruct t; simpl; auto. destruct t; simpl; auto. intros; omegaContradiction.
Qed.
(** The main simulation result. *)
Theorem strategy_simulation:
forall p, backward_simulation (Csem.semantics p) (semantics p).
Proof.
intros.
apply backward_simulation_plus with (match_states := fun (S1 S2: state) => S1 = S2); simpl.
(* symbols *)
auto.
(* initial states exist *)
intros. exists s1; auto.
(* initial states match *)
intros. exists s2; auto.
(* final states match *)
intros. subst s2. auto.
(* progress *)
intros. subst s2. apply progress. auto.
(* simulation *)
intros. subst s1. exists s2'; split; auto. apply step_simulation; auto.
Qed.
(** * A big-step semantics for CompCert C implementing the reduction strategy. *)
Section BIGSTEP.
Variable ge: genv.
(** The execution of a statement produces an ``outcome'', indicating
how the execution terminated: either normally or prematurely
through the execution of a [break], [continue] or [return] statement. *)
Inductive outcome: Type :=
| Out_break: outcome (**r terminated by [break] *)
| Out_continue: outcome (**r terminated by [continue] *)
| Out_normal: outcome (**r terminated normally *)
| Out_return: option (val * type) -> outcome. (**r terminated by [return] *)
Inductive out_normal_or_continue : outcome -> Prop :=
| Out_normal_or_continue_N: out_normal_or_continue Out_normal
| Out_normal_or_continue_C: out_normal_or_continue Out_continue.
Inductive out_break_or_return : outcome -> outcome -> Prop :=
| Out_break_or_return_B: out_break_or_return Out_break Out_normal
| Out_break_or_return_R: forall ov,
out_break_or_return (Out_return ov) (Out_return ov).
Definition outcome_switch (out: outcome) : outcome :=
match out with
| Out_break => Out_normal
| o => o
end.
Definition outcome_result_value (out: outcome) (t: type) (v: val) : Prop :=
match out, t with
| Out_normal, Tvoid => v = Vundef
| Out_return None, Tvoid => v = Vundef
| Out_return (Some (v', ty')), ty => ty <> Tvoid /\ sem_cast v' ty' ty = Some v
| _, _ => False
end.
(** [eval_expression ge e m1 a t m2 a'] describes the evaluation of the
complex expression e. [v] is the resulting value, [m2] the final
memory state, and [t] the trace of input/output events performed
during this evaluation. *)
Inductive eval_expression: env -> mem -> expr -> trace -> mem -> val -> Prop :=
| eval_expression_intro: forall e m a t m' a' v,
eval_expr e m RV a t m' a' -> eval_simple_rvalue ge e m' a' v ->
eval_expression e m a t m' v
with eval_expr: env -> mem -> kind -> expr -> trace -> mem -> expr -> Prop :=
| eval_val: forall e m v ty,
eval_expr e m RV (Eval v ty) E0 m (Eval v ty)
| eval_var: forall e m x ty,
eval_expr e m LV (Evar x ty) E0 m (Evar x ty)
| eval_field: forall e m a t m' a' f ty,
eval_expr e m RV a t m' a' ->
eval_expr e m LV (Efield a f ty) t m' (Efield a' f ty)
| eval_valof: forall e m a t m' a' ty,
type_is_volatile (typeof a) = false ->
eval_expr e m LV a t m' a' ->
eval_expr e m RV (Evalof a ty) t m' (Evalof a' ty)
| eval_valof_volatile: forall e m a t1 m' a' ty b ofs t2 v,
type_is_volatile (typeof a) = true ->
eval_expr e m LV a t1 m' a' ->
eval_simple_lvalue ge e m' a' b ofs ->
deref_loc ge (typeof a) m' b ofs t2 v ->
ty = typeof a ->
eval_expr e m RV (Evalof a ty) (t1 ** t2) m' (Eval v ty)
| eval_deref: forall e m a t m' a' ty,
eval_expr e m RV a t m' a' ->
eval_expr e m LV (Ederef a ty) t m' (Ederef a' ty)
| eval_addrof: forall e m a t m' a' ty,
eval_expr e m LV a t m' a' ->
eval_expr e m RV (Eaddrof a ty) t m' (Eaddrof a' ty)
| eval_unop: forall e m a t m' a' op ty,
eval_expr e m RV a t m' a' ->
eval_expr e m RV (Eunop op a ty) t m' (Eunop op a' ty)
| eval_binop: forall e m a1 t1 m' a1' a2 t2 m'' a2' op ty,
eval_expr e m RV a1 t1 m' a1' -> eval_expr e m' RV a2 t2 m'' a2' ->
eval_expr e m RV (Ebinop op a1 a2 ty) (t1 ** t2) m'' (Ebinop op a1' a2' ty)
| eval_cast: forall e m a t m' a' ty,
eval_expr e m RV a t m' a' ->
eval_expr e m RV (Ecast a ty) t m' (Ecast a' ty)
| eval_seqand_true: forall e m a1 a2 ty t1 m' a1' v1 t2 m'' a2' v2 v' v,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some true ->
eval_expr e m' RV a2 t2 m'' a2' -> eval_simple_rvalue ge e m'' a2' v2 ->
sem_cast v2 (typeof a2) type_bool = Some v' ->
sem_cast v' type_bool ty = Some v ->
eval_expr e m RV (Eseqand a1 a2 ty) (t1**t2) m'' (Eval v ty)
| eval_seqand_false: forall e m a1 a2 ty t1 m' a1' v1,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some false ->
eval_expr e m RV (Eseqand a1 a2 ty) t1 m' (Eval (Vint Int.zero) ty)
| eval_seqor_false: forall e m a1 a2 ty t1 m' a1' v1 t2 m'' a2' v2 v' v,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some false ->
eval_expr e m' RV a2 t2 m'' a2' -> eval_simple_rvalue ge e m'' a2' v2 ->
sem_cast v2 (typeof a2) type_bool = Some v' ->
sem_cast v' type_bool ty = Some v ->
eval_expr e m RV (Eseqor a1 a2 ty) (t1**t2) m'' (Eval v ty)
| eval_seqor_true: forall e m a1 a2 ty t1 m' a1' v1,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some true ->
eval_expr e m RV (Eseqor a1 a2 ty) t1 m' (Eval (Vint Int.one) ty)
| eval_condition: forall e m a1 a2 a3 ty t1 m' a1' v1 t2 m'' a' v' b v,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some b ->
eval_expr e m' RV (if b then a2 else a3) t2 m'' a' -> eval_simple_rvalue ge e m'' a' v' ->
sem_cast v' (typeof (if b then a2 else a3)) ty = Some v ->
eval_expr e m RV (Econdition a1 a2 a3 ty) (t1**t2) m'' (Eval v ty)
| eval_sizeof: forall e m ty' ty,
eval_expr e m RV (Esizeof ty' ty) E0 m (Esizeof ty' ty)
| eval_alignof: forall e m ty' ty,
eval_expr e m RV (Ealignof ty' ty) E0 m (Ealignof ty' ty)
| eval_assign: forall e m l r ty t1 m1 l' t2 m2 r' b ofs v v' t3 m3,
eval_expr e m LV l t1 m1 l' -> eval_expr e m1 RV r t2 m2 r' ->
eval_simple_lvalue ge e m2 l' b ofs ->
eval_simple_rvalue ge e m2 r' v ->
sem_cast v (typeof r) (typeof l) = Some v' ->
assign_loc ge (typeof l) m2 b ofs v' t3 m3 ->
ty = typeof l ->
eval_expr e m RV (Eassign l r ty) (t1**t2**t3) m3 (Eval v' ty)
| eval_assignop: forall e m op l r tyres ty t1 m1 l' t2 m2 r' b ofs
v1 v2 v3 v4 t3 t4 m3,
eval_expr e m LV l t1 m1 l' -> eval_expr e m1 RV r t2 m2 r' ->
eval_simple_lvalue ge e m2 l' b ofs ->
deref_loc ge (typeof l) m2 b ofs t3 v1 ->
eval_simple_rvalue ge e m2 r' v2 ->
sem_binary_operation op v1 (typeof l) v2 (typeof r) m2 = Some v3 ->
sem_cast v3 tyres (typeof l) = Some v4 ->
assign_loc ge (typeof l) m2 b ofs v4 t4 m3 ->
ty = typeof l ->
eval_expr e m RV (Eassignop op l r tyres ty) (t1**t2**t3**t4) m3 (Eval v4 ty)
| eval_postincr: forall e m id l ty t1 m1 l' b ofs v1 v2 v3 m2 t2 t3,
eval_expr e m LV l t1 m1 l' ->
eval_simple_lvalue ge e m1 l' b ofs ->
deref_loc ge ty m1 b ofs t2 v1 ->
sem_incrdecr id v1 ty = Some v2 ->
sem_cast v2 (typeconv ty) ty = Some v3 ->
assign_loc ge ty m1 b ofs v3 t3 m2 ->
ty = typeof l ->
eval_expr e m RV (Epostincr id l ty) (t1**t2**t3) m2 (Eval v1 ty)
| eval_comma: forall e m r1 r2 ty t1 m1 r1' v1 t2 m2 r2',
eval_expr e m RV r1 t1 m1 r1' ->
eval_simple_rvalue ge e m1 r1' v1 ->
eval_expr e m1 RV r2 t2 m2 r2' ->
ty = typeof r2 ->
eval_expr e m RV (Ecomma r1 r2 ty) (t1**t2) m2 r2'
| eval_call: forall e m rf rargs ty t1 m1 rf' t2 m2 rargs' vf vargs
targs tres fd t3 m3 vres,
eval_expr e m RV rf t1 m1 rf' -> eval_exprlist e m1 rargs t2 m2 rargs' ->
eval_simple_rvalue ge e m2 rf' vf ->
eval_simple_list ge e m2 rargs' targs vargs ->
classify_fun (typeof rf) = fun_case_f targs tres ->
Genv.find_funct ge vf = Some fd ->
type_of_fundef fd = Tfunction targs tres ->
eval_funcall m2 fd vargs t3 m3 vres ->
eval_expr e m RV (Ecall rf rargs ty) (t1**t2**t3) m3 (Eval vres ty)
with eval_exprlist: env -> mem -> exprlist -> trace -> mem -> exprlist -> Prop :=
| eval_nil: forall e m,
eval_exprlist e m Enil E0 m Enil
| eval_cons: forall e m a1 al t1 m1 a1' t2 m2 al',
eval_expr e m RV a1 t1 m1 a1' -> eval_exprlist e m1 al t2 m2 al' ->
eval_exprlist e m (Econs a1 al) (t1**t2) m2 (Econs a1' al')
(** [exec_stmt ge e m1 s t m2 out] describes the execution of
the statement [s]. [out] is the outcome for this execution.
[m1] is the initial memory state, [m2] the final memory state.
[t] is the trace of input/output events performed during this
evaluation. *)
with exec_stmt: env -> mem -> statement -> trace -> mem -> outcome -> Prop :=
| exec_Sskip: forall e m,
exec_stmt e m Sskip
E0 m Out_normal
| exec_Sdo: forall e m a t m' v,
eval_expression e m a t m' v ->
exec_stmt e m (Sdo a)
t m' Out_normal
| exec_Sseq_1: forall e m s1 s2 t1 m1 t2 m2 out,
exec_stmt e m s1 t1 m1 Out_normal ->
exec_stmt e m1 s2 t2 m2 out ->
exec_stmt e m (Ssequence s1 s2)
(t1 ** t2) m2 out
| exec_Sseq_2: forall e m s1 s2 t1 m1 out,
exec_stmt e m s1 t1 m1 out ->
out <> Out_normal ->
exec_stmt e m (Ssequence s1 s2)
t1 m1 out
| exec_Sifthenelse: forall e m a s1 s2 t1 m1 v1 t2 m2 b out,
eval_expression e m a t1 m1 v1 ->
bool_val v1 (typeof a) = Some b ->
exec_stmt e m1 (if b then s1 else s2) t2 m2 out ->
exec_stmt e m (Sifthenelse a s1 s2)
(t1**t2) m2 out
| exec_Sreturn_none: forall e m,
exec_stmt e m (Sreturn None)
E0 m (Out_return None)
| exec_Sreturn_some: forall e m a t m' v,
eval_expression e m a t m' v ->
exec_stmt e m (Sreturn (Some a))
t m' (Out_return (Some(v, typeof a)))
| exec_Sbreak: forall e m,
exec_stmt e m Sbreak
E0 m Out_break
| exec_Scontinue: forall e m,
exec_stmt e m Scontinue
E0 m Out_continue
| exec_Swhile_false: forall e m a s t m' v,
eval_expression e m a t m' v ->
bool_val v (typeof a) = Some false ->
exec_stmt e m (Swhile a s)
t m' Out_normal
| exec_Swhile_stop: forall e m a s t1 m1 v t2 m2 out' out,
eval_expression e m a t1 m1 v ->
bool_val v (typeof a) = Some true ->
exec_stmt e m1 s t2 m2 out' ->
out_break_or_return out' out ->
exec_stmt e m (Swhile a s)
(t1**t2) m2 out
| exec_Swhile_loop: forall e m a s t1 m1 v t2 m2 out1 t3 m3 out,
eval_expression e m a t1 m1 v ->
bool_val v (typeof a) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_normal_or_continue out1 ->
exec_stmt e m2 (Swhile a s) t3 m3 out ->
exec_stmt e m (Swhile a s)
(t1 ** t2 ** t3) m3 out
| exec_Sdowhile_false: forall e m s a t1 m1 out1 t2 m2 v,
exec_stmt e m s t1 m1 out1 ->
out_normal_or_continue out1 ->
eval_expression e m1 a t2 m2 v ->
bool_val v (typeof a) = Some false ->
exec_stmt e m (Sdowhile a s)
(t1 ** t2) m2 Out_normal
| exec_Sdowhile_stop: forall e m s a t m1 out1 out,
exec_stmt e m s t m1 out1 ->
out_break_or_return out1 out ->
exec_stmt e m (Sdowhile a s)
t m1 out
| exec_Sdowhile_loop: forall e m s a t1 m1 out1 t2 m2 v t3 m3 out,
exec_stmt e m s t1 m1 out1 ->
out_normal_or_continue out1 ->
eval_expression e m1 a t2 m2 v ->
bool_val v (typeof a) = Some true ->
exec_stmt e m2 (Sdowhile a s) t3 m3 out ->
exec_stmt e m (Sdowhile a s)
(t1 ** t2 ** t3) m3 out
| exec_Sfor_start: forall e m s a1 a2 a3 out m1 m2 t1 t2,
exec_stmt e m a1 t1 m1 Out_normal ->
exec_stmt e m1 (Sfor Sskip a2 a3 s) t2 m2 out ->
exec_stmt e m (Sfor a1 a2 a3 s)
(t1 ** t2) m2 out
| exec_Sfor_false: forall e m s a2 a3 t m' v,
eval_expression e m a2 t m' v ->
bool_val v (typeof a2) = Some false ->
exec_stmt e m (Sfor Sskip a2 a3 s)
t m' Out_normal
| exec_Sfor_stop: forall e m s a2 a3 t1 m1 v t2 m2 out1 out,
eval_expression e m a2 t1 m1 v ->
bool_val v (typeof a2) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_break_or_return out1 out ->
exec_stmt e m (Sfor Sskip a2 a3 s)
(t1 ** t2) m2 out
| exec_Sfor_loop: forall e m s a2 a3 t1 m1 v t2 m2 out1 t3 m3 t4 m4 out,
eval_expression e m a2 t1 m1 v ->
bool_val v (typeof a2) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_normal_or_continue out1 ->
exec_stmt e m2 a3 t3 m3 Out_normal ->
exec_stmt e m3 (Sfor Sskip a2 a3 s) t4 m4 out ->
exec_stmt e m (Sfor Sskip a2 a3 s)
(t1 ** t2 ** t3 ** t4) m4 out
| exec_Sswitch: forall e m a sl t1 m1 n t2 m2 out,
eval_expression e m a t1 m1 (Vint n) ->
exec_stmt e m1 (seq_of_labeled_statement (select_switch n sl)) t2 m2 out ->
exec_stmt e m (Sswitch a sl)
(t1 ** t2) m2 (outcome_switch out)
(** [eval_funcall m1 fd args t m2 res] describes the invocation of
function [fd] with arguments [args]. [res] is the value returned
by the call. *)
with eval_funcall: mem -> fundef -> list val -> trace -> mem -> val -> Prop :=
| eval_funcall_internal: forall m f vargs t e m1 m2 m3 out vres m4,
list_norepet (var_names f.(fn_params) ++ var_names f.(fn_vars)) ->
alloc_variables empty_env m (f.(fn_params) ++ f.(fn_vars)) e m1 ->
bind_parameters ge e m1 f.(fn_params) vargs m2 ->
exec_stmt e m2 f.(fn_body) t m3 out ->
outcome_result_value out f.(fn_return) vres ->
Mem.free_list m3 (blocks_of_env e) = Some m4 ->
eval_funcall m (Internal f) vargs t m4 vres
| eval_funcall_external: forall m ef targs tres vargs t vres m',
external_call ef ge vargs m t vres m' ->
eval_funcall m (External ef targs tres) vargs t m' vres.
Scheme eval_expression_ind5 := Minimality for eval_expression Sort Prop
with eval_expr_ind5 := Minimality for eval_expr Sort Prop
with eval_exprlist_ind5 := Minimality for eval_exprlist Sort Prop
with exec_stmt_ind5 := Minimality for exec_stmt Sort Prop
with eval_funcall_ind5 := Minimality for eval_funcall Sort Prop.
Combined Scheme bigstep_induction from
eval_expression_ind5, eval_expr_ind5, eval_exprlist_ind5,
exec_stmt_ind5, eval_funcall_ind5.
(** [evalinf_expr ge e m1 K a T] denotes the fact that expression [a]
diverges in initial state [m1]. [T] is the trace of input/output
events performed during this evaluation. *)
CoInductive evalinf_expr: env -> mem -> kind -> expr -> traceinf -> Prop :=
| evalinf_field: forall e m a t f ty,
evalinf_expr e m RV a t ->
evalinf_expr e m LV (Efield a f ty) t
| evalinf_valof: forall e m a t ty,
evalinf_expr e m LV a t ->
evalinf_expr e m RV (Evalof a ty) t
| evalinf_deref: forall e m a t ty,
evalinf_expr e m RV a t ->
evalinf_expr e m LV (Ederef a ty) t
| evalinf_addrof: forall e m a t ty,
evalinf_expr e m LV a t ->
evalinf_expr e m RV (Eaddrof a ty) t
| evalinf_unop: forall e m a t op ty,
evalinf_expr e m RV a t ->
evalinf_expr e m RV (Eunop op a ty) t
| evalinf_binop_left: forall e m a1 t1 a2 op ty,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Ebinop op a1 a2 ty) t1
| evalinf_binop_right: forall e m a1 t1 m' a1' a2 t2 op ty,
eval_expr e m RV a1 t1 m' a1' -> evalinf_expr e m' RV a2 t2 ->
evalinf_expr e m RV (Ebinop op a1 a2 ty) (t1 *** t2)
| evalinf_cast: forall e m a t ty,
evalinf_expr e m RV a t ->
evalinf_expr e m RV (Ecast a ty) t
| evalinf_seqand: forall e m a1 a2 ty t1,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Eseqand a1 a2 ty) t1
| evalinf_seqand_2: forall e m a1 a2 ty t1 m' a1' v1 t2,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some true ->
evalinf_expr e m' RV a2 t2 ->
evalinf_expr e m RV (Eseqand a1 a2 ty) (t1***t2)
| evalinf_seqor: forall e m a1 a2 ty t1,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Eseqor a1 a2 ty) t1
| evalinf_seqor_2: forall e m a1 a2 ty t1 m' a1' v1 t2,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some false ->
evalinf_expr e m' RV a2 t2 ->
evalinf_expr e m RV (Eseqor a1 a2 ty) (t1***t2)
| evalinf_condition: forall e m a1 a2 a3 ty t1,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Econdition a1 a2 a3 ty) t1
| evalinf_condition_2: forall e m a1 a2 a3 ty t1 m' a1' v1 t2 b,
eval_expr e m RV a1 t1 m' a1' -> eval_simple_rvalue ge e m' a1' v1 ->
bool_val v1 (typeof a1) = Some b ->
evalinf_expr e m' RV (if b then a2 else a3) t2 ->
evalinf_expr e m RV (Econdition a1 a2 a3 ty) (t1***t2)
| evalinf_assign_left: forall e m a1 t1 a2 ty,
evalinf_expr e m LV a1 t1 ->
evalinf_expr e m RV (Eassign a1 a2 ty) t1
| evalinf_assign_right: forall e m a1 t1 m' a1' a2 t2 ty,
eval_expr e m LV a1 t1 m' a1' -> evalinf_expr e m' RV a2 t2 ->
evalinf_expr e m RV (Eassign a1 a2 ty) (t1 *** t2)
| evalinf_assignop_left: forall e m a1 t1 a2 op tyres ty,
evalinf_expr e m LV a1 t1 ->
evalinf_expr e m RV (Eassignop op a1 a2 tyres ty) t1
| evalinf_assignop_right: forall e m a1 t1 m' a1' a2 t2 op tyres ty,
eval_expr e m LV a1 t1 m' a1' -> evalinf_expr e m' RV a2 t2 ->
evalinf_expr e m RV (Eassignop op a1 a2 tyres ty) (t1 *** t2)
| evalinf_postincr: forall e m a t id ty,
evalinf_expr e m LV a t ->
evalinf_expr e m RV (Epostincr id a ty) t
| evalinf_comma_left: forall e m a1 t1 a2 ty,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Ecomma a1 a2 ty) t1
| evalinf_comma_right: forall e m a1 t1 m1 a1' v1 a2 t2 ty,
eval_expr e m RV a1 t1 m1 a1' -> eval_simple_rvalue ge e m1 a1' v1 ->
ty = typeof a2 ->
evalinf_expr e m1 RV a2 t2 ->
evalinf_expr e m RV (Ecomma a1 a2 ty) (t1 *** t2)
| evalinf_call_left: forall e m a1 t1 a2 ty,
evalinf_expr e m RV a1 t1 ->
evalinf_expr e m RV (Ecall a1 a2 ty) t1
| evalinf_call_right: forall e m a1 t1 m1 a1' a2 t2 ty,
eval_expr e m RV a1 t1 m1 a1' ->
evalinf_exprlist e m1 a2 t2 ->
evalinf_expr e m RV (Ecall a1 a2 ty) (t1 *** t2)
| evalinf_call: forall e m rf rargs ty t1 m1 rf' t2 m2 rargs' vf vargs
targs tres fd t3,
eval_expr e m RV rf t1 m1 rf' -> eval_exprlist e m1 rargs t2 m2 rargs' ->
eval_simple_rvalue ge e m2 rf' vf ->
eval_simple_list ge e m2 rargs' targs vargs ->
classify_fun (typeof rf) = fun_case_f targs tres ->
Genv.find_funct ge vf = Some fd ->
type_of_fundef fd = Tfunction targs tres ->
evalinf_funcall m2 fd vargs t3 ->
evalinf_expr e m RV (Ecall rf rargs ty) (t1***t2***t3)
with evalinf_exprlist: env -> mem -> exprlist -> traceinf -> Prop :=
| evalinf_cons_left: forall e m a1 al t1,
evalinf_expr e m RV a1 t1 ->
evalinf_exprlist e m (Econs a1 al) t1
| evalinf_cons_right: forall e m a1 al t1 m1 a1' t2,
eval_expr e m RV a1 t1 m1 a1' -> evalinf_exprlist e m1 al t2 ->
evalinf_exprlist e m (Econs a1 al) (t1***t2)
(** [execinf_stmt ge e m1 s t] describes the diverging execution of
the statement [s]. *)
with execinf_stmt: env -> mem -> statement -> traceinf -> Prop :=
| execinf_Sdo: forall e m a t,
evalinf_expr e m RV a t ->
execinf_stmt e m (Sdo a) t
| execinf_Sseq_1: forall e m s1 s2 t1,
execinf_stmt e m s1 t1 ->
execinf_stmt e m (Ssequence s1 s2) t1
| execinf_Sseq_2: forall e m s1 s2 t1 m1 t2,
exec_stmt e m s1 t1 m1 Out_normal ->
execinf_stmt e m1 s2 t2 ->
execinf_stmt e m (Ssequence s1 s2) (t1***t2)
| execinf_Sifthenelse_test: forall e m a s1 s2 t1,
evalinf_expr e m RV a t1 ->
execinf_stmt e m (Sifthenelse a s1 s2) t1
| execinf_Sifthenelse: forall e m a s1 s2 t1 m1 v1 t2 b,
eval_expression e m a t1 m1 v1 ->
bool_val v1 (typeof a) = Some b ->
execinf_stmt e m1 (if b then s1 else s2) t2 ->
execinf_stmt e m (Sifthenelse a s1 s2) (t1***t2)
| execinf_Sreturn_some: forall e m a t,
evalinf_expr e m RV a t ->
execinf_stmt e m (Sreturn (Some a)) t
| execinf_Swhile_test: forall e m a s t1,
evalinf_expr e m RV a t1 ->
execinf_stmt e m (Swhile a s) t1
| execinf_Swhile_body: forall e m a s t1 m1 v t2,
eval_expression e m a t1 m1 v ->
bool_val v (typeof a) = Some true ->
execinf_stmt e m1 s t2 ->
execinf_stmt e m (Swhile a s) (t1***t2)
| execinf_Swhile_loop: forall e m a s t1 m1 v t2 m2 out1 t3,
eval_expression e m a t1 m1 v ->
bool_val v (typeof a) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_normal_or_continue out1 ->
execinf_stmt e m2 (Swhile a s) t3 ->
execinf_stmt e m (Swhile a s) (t1***t2***t3)
| execinf_Sdowhile_body: forall e m s a t1,
execinf_stmt e m s t1 ->
execinf_stmt e m (Sdowhile a s) t1
| execinf_Sdowhile_test: forall e m s a t1 m1 out1 t2,
exec_stmt e m s t1 m1 out1 ->
out_normal_or_continue out1 ->
evalinf_expr e m1 RV a t2 ->
execinf_stmt e m (Sdowhile a s) (t1***t2)
| execinf_Sdowhile_loop: forall e m s a t1 m1 out1 t2 m2 v t3,
exec_stmt e m s t1 m1 out1 ->
out_normal_or_continue out1 ->
eval_expression e m1 a t2 m2 v ->
bool_val v (typeof a) = Some true ->
execinf_stmt e m2 (Sdowhile a s) t3 ->
execinf_stmt e m (Sdowhile a s) (t1***t2***t3)
| execinf_Sfor_start_1: forall e m s a1 a2 a3 t1,
execinf_stmt e m a1 t1 ->
execinf_stmt e m (Sfor a1 a2 a3 s) t1
| execinf_Sfor_start_2: forall e m s a1 a2 a3 m1 t1 t2,
exec_stmt e m a1 t1 m1 Out_normal -> a1 <> Sskip ->
execinf_stmt e m1 (Sfor Sskip a2 a3 s) t2 ->
execinf_stmt e m (Sfor a1 a2 a3 s) (t1***t2)
| execinf_Sfor_test: forall e m s a2 a3 t,
evalinf_expr e m RV a2 t ->
execinf_stmt e m (Sfor Sskip a2 a3 s) t
| execinf_Sfor_body: forall e m s a2 a3 t1 m1 v t2,
eval_expression e m a2 t1 m1 v ->
bool_val v (typeof a2) = Some true ->
execinf_stmt e m1 s t2 ->
execinf_stmt e m (Sfor Sskip a2 a3 s) (t1***t2)
| execinf_Sfor_next: forall e m s a2 a3 t1 m1 v t2 m2 out1 t3,
eval_expression e m a2 t1 m1 v ->
bool_val v (typeof a2) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_normal_or_continue out1 ->
execinf_stmt e m2 a3 t3 ->
execinf_stmt e m (Sfor Sskip a2 a3 s) (t1***t2***t3)
| execinf_Sfor_loop: forall e m s a2 a3 t1 m1 v t2 m2 out1 t3 m3 t4,
eval_expression e m a2 t1 m1 v ->
bool_val v (typeof a2) = Some true ->
exec_stmt e m1 s t2 m2 out1 ->
out_normal_or_continue out1 ->
exec_stmt e m2 a3 t3 m3 Out_normal ->
execinf_stmt e m3 (Sfor Sskip a2 a3 s) t4 ->
execinf_stmt e m (Sfor Sskip a2 a3 s) (t1***t2***t3***t4)
| execinf_Sswitch_expr: forall e m a sl t1,
evalinf_expr e m RV a t1 ->
execinf_stmt e m (Sswitch a sl) t1
| execinf_Sswitch_body: forall e m a sl t1 m1 n t2,
eval_expression e m a t1 m1 (Vint n) ->
execinf_stmt e m1 (seq_of_labeled_statement (select_switch n sl)) t2 ->
execinf_stmt e m (Sswitch a sl) (t1***t2)
(** [evalinf_funcall m1 fd args t m2 res] describes a diverging
invocation of function [fd] with arguments [args]. *)
with evalinf_funcall: mem -> fundef -> list val -> traceinf -> Prop :=
| evalinf_funcall_internal: forall m f vargs t e m1 m2,
list_norepet (var_names f.(fn_params) ++ var_names f.(fn_vars)) ->
alloc_variables empty_env m (f.(fn_params) ++ f.(fn_vars)) e m1 ->
bind_parameters ge e m1 f.(fn_params) vargs m2 ->
execinf_stmt e m2 f.(fn_body) t ->
evalinf_funcall m (Internal f) vargs t.
(** ** Implication from big-step semantics to transition semantics *)
Inductive outcome_state_match
(e: env) (m: mem) (f: function) (k: cont): outcome -> state -> Prop :=
| osm_normal:
outcome_state_match e m f k Out_normal (State f Sskip k e m)
| osm_break:
outcome_state_match e m f k Out_break (State f Sbreak k e m)
| osm_continue:
outcome_state_match e m f k Out_continue (State f Scontinue k e m)
| osm_return_none: forall k',
call_cont k' = call_cont k ->
outcome_state_match e m f k
(Out_return None) (State f (Sreturn None) k' e m)
| osm_return_some: forall v ty k',
call_cont k' = call_cont k ->
outcome_state_match e m f k
(Out_return (Some (v, ty))) (ExprState f (Eval v ty) (Kreturn k') e m).
Lemma is_call_cont_call_cont:
forall k, is_call_cont k -> call_cont k = k.
Proof.
destruct k; simpl; intros; contradiction || auto.
Qed.
Lemma leftcontext_compose:
forall k2 k3 C2, leftcontext k2 k3 C2 ->
forall k1 C1, leftcontext k1 k2 C1 ->
leftcontext k1 k3 (fun x => C2(C1 x))
with leftcontextlist_compose:
forall k2 C2, leftcontextlist k2 C2 ->
forall k1 C1, leftcontext k1 k2 C1 ->
leftcontextlist k1 (fun x => C2(C1 x)).
Proof.
induction 1; intros; try (constructor; eauto).
replace (fun x => C1 x) with C1. auto. apply extensionality; auto.
induction 1; intros; constructor; eauto.
Qed.
Lemma exprlist_app_leftcontext:
forall rl1 rl2,
simplelist rl1 = true -> leftcontextlist RV (fun x => exprlist_app rl1 (Econs x rl2)).
Proof.
induction rl1; simpl; intros.
apply lctx_list_head. constructor.
destruct (andb_prop _ _ H). apply lctx_list_tail. auto. auto.
Qed.
Lemma exprlist_app_simple:
forall rl1 rl2,
simplelist (exprlist_app rl1 rl2) = simplelist rl1 && simplelist rl2.
Proof.
induction rl1; intros; simpl. auto. rewrite IHrl1. apply andb_assoc.
Qed.
Lemma bigstep_to_steps:
(forall e m a t m' v,
eval_expression e m a t m' v ->
forall f k,
star step ge (ExprState f a k e m) t (ExprState f (Eval v (typeof a)) k e m'))
/\(forall e m K a t m' a',
eval_expr e m K a t m' a' ->
forall C f k, leftcontext K RV C ->
simple a' = true /\ typeof a' = typeof a /\
star step ge (ExprState f (C a) k e m) t (ExprState f (C a') k e m'))
/\(forall e m al t m' al',
eval_exprlist e m al t m' al' ->
forall a1 al2 ty C f k, leftcontext RV RV C -> simple a1 = true -> simplelist al2 = true ->
simplelist al' = true /\
star step ge (ExprState f (C (Ecall a1 (exprlist_app al2 al) ty)) k e m)
t (ExprState f (C (Ecall a1 (exprlist_app al2 al') ty)) k e m'))
/\(forall e m s t m' out,
exec_stmt e m s t m' out ->
forall f k,
exists S,
star step ge (State f s k e m) t S /\ outcome_state_match e m' f k out S)
/\(forall m fd args t m' res,
eval_funcall m fd args t m' res ->
forall k,
is_call_cont k ->
star step ge (Callstate fd args k m) t (Returnstate res k m')).
Proof.
apply bigstep_induction; intros.
(* expression, general *)
exploit (H0 (fun x => x) f k). constructor. intros [A [B C]].
assert (match a' with Eval _ _ => False | _ => True end ->
star step ge (ExprState f a k e m) t (ExprState f (Eval v (typeof a)) k e m')).
intro. eapply star_right. eauto. left. eapply step_expr; eauto. traceEq.
destruct a'; auto.
simpl in B. rewrite B in C. inv H1. auto.
(* val *)
simpl; intuition. apply star_refl.
(* var *)
simpl; intuition. apply star_refl.
(* field *)
exploit (H0 (fun x => C(Efield x f ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto.
(* valof *)
exploit (H1 (fun x => C(Evalof x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto. rewrite A; rewrite B; rewrite H; auto.
(* valof volatile *)
exploit (H1 (fun x => C(Evalof x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition.
eapply star_right. eexact D.
left. eapply step_rvalof_volatile; eauto. rewrite H4; eauto. congruence. congruence.
traceEq.
(* deref *)
exploit (H0 (fun x => C(Ederef x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto.
(* addrof *)
exploit (H0 (fun x => C(Eaddrof x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto.
(* unop *)
exploit (H0 (fun x => C(Eunop op x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto.
(* binop *)
exploit (H0 (fun x => C(Ebinop op x a2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H2 (fun x => C(Ebinop op a1' x ty))).
eapply leftcontext_compose; eauto. repeat constructor. auto. intros [E [F G]].
simpl; intuition. eapply star_trans; eauto.
(* cast *)
exploit (H0 (fun x => C(Ecast x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition; eauto.
(* seqand true *)
exploit (H0 (fun x => C(Eseqand x a2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H4 (fun x => C(Eparen (Eparen x type_bool) ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [E [F G]].
simpl; intuition. eapply star_trans. eexact D.
eapply star_left. left; eapply step_seqand_true; eauto. rewrite B; auto.
eapply star_trans. eexact G.
set (C' := fun x => C (Eparen x ty)).
change (C (Eparen (Eparen a2' type_bool) ty)) with (C' (Eparen a2' type_bool)).
eapply star_two.
left; eapply step_paren; eauto. unfold C'; eapply leftcontext_compose; eauto. repeat constructor.
rewrite F; eauto.
unfold C'. left; eapply step_paren; eauto. constructor.
eauto. eauto. eauto. traceEq.
(* seqand false *)
exploit (H0 (fun x => C(Eseqand x a2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition. eapply star_right. eexact D.
left; eapply step_seqand_false; eauto. rewrite B; auto.
traceEq.
(* seqor false *)
exploit (H0 (fun x => C(Eseqor x a2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H4 (fun x => C(Eparen (Eparen x type_bool) ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [E [F G]].
simpl; intuition. eapply star_trans. eexact D.
eapply star_left. left; eapply step_seqor_false; eauto. rewrite B; auto.
eapply star_trans. eexact G.
set (C' := fun x => C (Eparen x ty)).
change (C (Eparen (Eparen a2' type_bool) ty)) with (C' (Eparen a2' type_bool)).
eapply star_two.
left; eapply step_paren; eauto. unfold C'; eapply leftcontext_compose; eauto. repeat constructor.
rewrite F; eauto.
unfold C'. left; eapply step_paren; eauto. constructor.
eauto. eauto. eauto. traceEq.
(* seqor true *)
exploit (H0 (fun x => C(Eseqor x a2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition. eapply star_right. eexact D.
left; eapply step_seqor_true; eauto. rewrite B; auto.
traceEq.
(* condition *)
exploit (H0 (fun x => C(Econdition x a2 a3 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H4 (fun x => C(Eparen x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [E [F G]].
simpl. split; auto. split; auto.
eapply star_trans. eexact D.
eapply star_left. left; eapply step_condition; eauto. rewrite B; eauto.
eapply star_right. eexact G. left; eapply step_paren; eauto. congruence.
reflexivity. reflexivity. traceEq.
(* sizeof *)
simpl; intuition. apply star_refl.
(* alignof *)
simpl; intuition. apply star_refl.
(* assign *)
exploit (H0 (fun x => C(Eassign x r ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H2 (fun x => C(Eassign l' x ty))).
eapply leftcontext_compose; eauto. repeat constructor. auto. intros [E [F G]].
simpl; intuition.
eapply star_trans. eexact D.
eapply star_right. eexact G.
left. eapply step_assign; eauto. congruence. rewrite B; eauto. congruence.
reflexivity. traceEq.
(* assignop *)
exploit (H0 (fun x => C(Eassignop op x r tyres ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H2 (fun x => C(Eassignop op l' x tyres ty))).
eapply leftcontext_compose; eauto. repeat constructor. auto. intros [E [F G]].
simpl; intuition.
eapply star_trans. eexact D.
eapply star_right. eexact G.
left. eapply step_assignop; eauto.
rewrite B; eauto. rewrite B; rewrite F; eauto. congruence. rewrite B; eauto. congruence.
reflexivity. traceEq.
(* postincr *)
exploit (H0 (fun x => C(Epostincr id x ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
simpl; intuition.
eapply star_right. eexact D.
left. eapply step_postincr; eauto. congruence.
traceEq.
(* comma *)
exploit (H0 (fun x => C(Ecomma x r2 ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H3 C). auto. intros [E [F G]].
simpl; intuition. congruence.
eapply star_trans. eexact D.
eapply star_left. left; eapply step_comma; eauto.
eexact G.
reflexivity. traceEq.
(* call *)
exploit (H0 (fun x => C(Ecall x rargs ty))).
eapply leftcontext_compose; eauto. repeat constructor. intros [A [B D]].
exploit (H2 rf' Enil ty C); eauto. intros [E F].
simpl; intuition.
eapply star_trans. eexact D.
eapply star_trans. eexact F.
eapply star_left. left; eapply step_call; eauto. congruence.
eapply star_right. eapply H9. red; auto.
right; constructor.
reflexivity. reflexivity. reflexivity. traceEq.
(* nil *)
simpl; intuition. apply star_refl.
(* cons *)
exploit (H0 (fun x => C(Ecall a0 (exprlist_app al2 (Econs x al)) ty))).
eapply leftcontext_compose; eauto. repeat constructor. auto.
apply exprlist_app_leftcontext; auto. intros [A [B D]].
exploit (H2 a0 (exprlist_app al2 (Econs a1' Enil))); eauto.
rewrite exprlist_app_simple. simpl. rewrite H5; rewrite A; auto.
repeat rewrite exprlist_app_assoc. simpl.
intros [E F].
simpl; intuition.
eapply star_trans; eauto.
(* skip *)
econstructor; split. apply star_refl. constructor.
(* do *)
econstructor; split.
eapply star_left. right; constructor.
eapply star_right. apply H0. right; constructor.
reflexivity. traceEq.
constructor.
(* sequence 2 *)
destruct (H0 f (Kseq s2 k)) as [S1 [A1 B1]]; auto. inv B1.
destruct (H2 f k) as [S2 [A2 B2]]; auto.
econstructor; split.
eapply star_left. right; econstructor.
eapply star_trans. eexact A1.
eapply star_left. right; constructor. eexact A2.
reflexivity. reflexivity. traceEq.
auto.
(* sequence 1 *)
destruct (H0 f (Kseq s2 k)) as [S1 [A1 B1]]; auto.
set (S2 :=
match out with
| Out_break => State f Sbreak k e m1
| Out_continue => State f Scontinue k e m1
| _ => S1
end).
exists S2; split.
eapply star_left. right; econstructor.
eapply star_trans. eexact A1.
unfold S2; inv B1.
congruence.
apply star_one. right; apply step_break_seq.
apply star_one. right; apply step_continue_seq.
apply star_refl.
apply star_refl.
reflexivity. traceEq.
unfold S2; inv B1; congruence || econstructor; eauto.
(* ifthenelse *)
destruct (H3 f k) as [S1 [A1 B1]]; auto.
exists S1; split.
eapply star_left. right; apply step_ifthenelse_1.
eapply star_trans. eapply H0.
eapply star_left. 2: eexact A1. right; eapply step_ifthenelse_2; eauto.
reflexivity. reflexivity. traceEq.
auto.
(* return none *)
econstructor; split. apply star_refl. constructor. auto.
(* return some *)
econstructor; split.
eapply star_left. right; apply step_return_1.
eapply H0. traceEq.
econstructor; eauto.
(* break *)
econstructor; split. apply star_refl. constructor.
(* continue *)
econstructor; split. apply star_refl. constructor.
(* while false *)
econstructor; split.
eapply star_left. right; apply step_while.
eapply star_right. apply H0. right; eapply step_while_false; eauto.
reflexivity. traceEq.
constructor.
(* while stop *)
destruct (H3 f (Kwhile2 a s k)) as [S1 [A1 B1]].
set (S2 :=
match out' with
| Out_break => State f Sskip k e m2
| _ => S1
end).
exists S2; split.
eapply star_left. right; apply step_while.
eapply star_trans. apply H0.
eapply star_left. right; eapply step_while_true; eauto.
eapply star_trans. eexact A1.
unfold S2. inversion H4; subst.
inv B1. apply star_one. right; constructor.
apply star_refl.
reflexivity. reflexivity. reflexivity. traceEq.
unfold S2. inversion H4; subst. constructor. inv B1; econstructor; eauto.
(* while loop *)
destruct (H3 f (Kwhile2 a s k)) as [S1 [A1 B1]].
destruct (H6 f k) as [S2 [A2 B2]]; auto.
exists S2; split.
eapply star_left. right; apply step_while.
eapply star_trans. apply H0.
eapply star_left. right; eapply step_while_true; eauto.
eapply star_trans. eexact A1.
eapply star_left.
inv H4; inv B1; right; apply step_skip_or_continue_while; auto.
eexact A2.
reflexivity. reflexivity. reflexivity. reflexivity. traceEq.
auto.
(* dowhile false *)
destruct (H0 f (Kdowhile1 a s k)) as [S1 [A1 B1]].
exists (State f Sskip k e m2); split.
eapply star_left. right; constructor.
eapply star_trans. eexact A1.
eapply star_left.
inv H1; inv B1; right; eapply step_skip_or_continue_dowhile; eauto.
eapply star_right. apply H3.
right; eapply step_dowhile_false; eauto.
reflexivity. reflexivity. reflexivity. traceEq.
constructor.
(* dowhile stop *)
destruct (H0 f (Kdowhile1 a s k)) as [S1 [A1 B1]].
set (S2 :=
match out1 with
| Out_break => State f Sskip k e m1
| _ => S1
end).
exists S2; split.
eapply star_left. right; apply step_dowhile.
eapply star_trans. eexact A1.
unfold S2. inversion H1; subst.
inv B1. apply star_one. right; constructor.
apply star_refl.
reflexivity. traceEq.
unfold S2. inversion H1; subst. constructor. inv B1; econstructor; eauto.
(* dowhile loop *)
destruct (H0 f (Kdowhile1 a s k)) as [S1 [A1 B1]].
destruct (H6 f k) as [S2 [A2 B2]]; auto.
exists S2; split.
eapply star_left. right; constructor.
eapply star_trans. eexact A1.
eapply star_left.
inv H1; inv B1; right; eapply step_skip_or_continue_dowhile; eauto.
eapply star_trans. apply H3.
eapply star_left. right; eapply step_dowhile_true; eauto.
eexact A2.
reflexivity. reflexivity. reflexivity. reflexivity. traceEq.
auto.
(* for start *)
assert (a1 = Sskip \/ a1 <> Sskip). destruct a1; auto; right; congruence.
destruct H3.
subst a1. inv H. apply H2; auto.
destruct (H0 f (Kseq (Sfor Sskip a2 a3 s) k)) as [S1 [A1 B1]]; auto. inv B1.
destruct (H2 f k) as [S2 [A2 B2]]; auto.
exists S2; split.
eapply star_left. right; apply step_for_start; auto.
eapply star_trans. eexact A1.
eapply star_left. right; constructor. eexact A2.
reflexivity. reflexivity. traceEq.
auto.
(* for false *)
econstructor; split.
eapply star_left. right; apply step_for.
eapply star_right. apply H0. right; eapply step_for_false; eauto.
reflexivity. traceEq.
constructor.
(* for stop *)
destruct (H3 f (Kfor3 a2 a3 s k)) as [S1 [A1 B1]].
set (S2 :=
match out1 with
| Out_break => State f Sskip k e m2
| _ => S1
end).
exists S2; split.
eapply star_left. right; apply step_for.
eapply star_trans. apply H0.
eapply star_left. right; eapply step_for_true; eauto.
eapply star_trans. eexact A1.
unfold S2. inversion H4; subst.
inv B1. apply star_one. right; constructor.
apply star_refl.
reflexivity. reflexivity. reflexivity. traceEq.
unfold S2. inversion H4; subst. constructor. inv B1; econstructor; eauto.
(* for loop *)
destruct (H3 f (Kfor3 a2 a3 s k)) as [S1 [A1 B1]].
destruct (H6 f (Kfor4 a2 a3 s k)) as [S2 [A2 B2]]; auto. inv B2.
destruct (H8 f k) as [S3 [A3 B3]]; auto.
exists S3; split.
eapply star_left. right; apply step_for.
eapply star_trans. apply H0.
eapply star_left. right; eapply step_for_true; eauto.
eapply star_trans. eexact A1.
eapply star_trans with (s2 := State f a3 (Kfor4 a2 a3 s k) e m2).
inv H4; inv B1.
apply star_one. right; constructor; auto.
apply star_one. right; constructor; auto.
eapply star_trans. eexact A2.
eapply star_left. right; constructor.
eexact A3.
reflexivity. reflexivity. reflexivity. reflexivity.
reflexivity. reflexivity. traceEq.
auto.
(* switch *)
destruct (H2 f (Kswitch2 k)) as [S1 [A1 B1]].
set (S2 :=
match out with
| Out_normal => State f Sskip k e m2
| Out_break => State f Sskip k e m2
| Out_continue => State f Scontinue k e m2
| _ => S1
end).
exists S2; split.
eapply star_left. right; eapply step_switch.
eapply star_trans. apply H0.
eapply star_left. right; eapply step_expr_switch.
eapply star_trans. eexact A1.
unfold S2; inv B1.
apply star_one. right; constructor. auto.
apply star_one. right; constructor. auto.
apply star_one. right; constructor.
apply star_refl.
apply star_refl.
reflexivity. reflexivity. reflexivity. traceEq.
unfold S2. inv B1; simpl; econstructor; eauto.
(* call internal *)
destruct (H3 f k) as [S1 [A1 B1]].
eapply star_left. right; eapply step_internal_function; eauto.
eapply star_right. eexact A1.
inv B1; simpl in H4; try contradiction.
(* Out_normal *)
assert (fn_return f = Tvoid /\ vres = Vundef).
destruct (fn_return f); auto || contradiction.
destruct H7 as [P Q]. subst vres. right; eapply step_skip_call; eauto.
(* Out_return None *)
assert (fn_return f = Tvoid /\ vres = Vundef).
destruct (fn_return f); auto || contradiction.
destruct H8 as [P Q]. subst vres.
rewrite <- (is_call_cont_call_cont k H6). rewrite <- H7.
right; apply step_return_0; auto.
(* Out_return Some *)
destruct H4. rewrite <- (is_call_cont_call_cont k H6). rewrite <- H7.
right; eapply step_return_2; eauto.
reflexivity. traceEq.
(* call external *)
apply star_one. right; apply step_external_function; auto.
Qed.
Lemma eval_expression_to_steps:
forall e m a t m' v,
eval_expression e m a t m' v ->
forall f k,
star step ge (ExprState f a k e m) t (ExprState f (Eval v (typeof a)) k e m').
Proof (proj1 bigstep_to_steps).
Lemma eval_expr_to_steps:
forall e m K a t m' a',
eval_expr e m K a t m' a' ->
forall C f k, leftcontext K RV C ->
simple a' = true /\ typeof a' = typeof a /\
star step ge (ExprState f (C a) k e m) t (ExprState f (C a') k e m').
Proof (proj1 (proj2 bigstep_to_steps)).
Lemma eval_exprlist_to_steps:
forall e m al t m' al',
eval_exprlist e m al t m' al' ->
forall a1 al2 ty C f k, leftcontext RV RV C -> simple a1 = true -> simplelist al2 = true ->
simplelist al' = true /\
star step ge (ExprState f (C (Ecall a1 (exprlist_app al2 al) ty)) k e m)
t (ExprState f (C (Ecall a1 (exprlist_app al2 al') ty)) k e m').
Proof (proj1 (proj2 (proj2 bigstep_to_steps))).
Lemma exec_stmt_to_steps:
forall e m s t m' out,
exec_stmt e m s t m' out ->
forall f k,
exists S,
star step ge (State f s k e m) t S /\ outcome_state_match e m' f k out S.
Proof (proj1 (proj2 (proj2 (proj2 bigstep_to_steps)))).
Lemma eval_funcall_to_steps:
forall m fd args t m' res,
eval_funcall m fd args t m' res ->
forall k,
is_call_cont k ->
star step ge (Callstate fd args k m) t (Returnstate res k m').
Proof (proj2 (proj2 (proj2 (proj2 bigstep_to_steps)))).
Fixpoint esize (a: expr) : nat :=
match a with
| Eloc _ _ _ => 1%nat
| Evar _ _ => 1%nat
| Ederef r1 _ => S(esize r1)
| Efield l1 _ _ => S(esize l1)
| Eval _ _ => O
| Evalof l1 _ => S(esize l1)
| Eaddrof l1 _ => S(esize l1)
| Eunop _ r1 _ => S(esize r1)
| Ebinop _ r1 r2 _ => S(esize r1 + esize r2)%nat
| Ecast r1 _ => S(esize r1)
| Eseqand r1 r2 _ => S(esize r1)
| Eseqor r1 r2 _ => S(esize r1)
| Econdition r1 _ _ _ => S(esize r1)
| Esizeof _ _ => 1%nat
| Ealignof _ _ => 1%nat
| Eassign l1 r2 _ => S(esize l1 + esize r2)%nat
| Eassignop _ l1 r2 _ _ => S(esize l1 + esize r2)%nat
| Epostincr _ l1 _ => S(esize l1)
| Ecomma r1 r2 _ => S(esize r1 + esize r2)%nat
| Ecall r1 rl2 _ => S(esize r1 + esizelist rl2)%nat
| Ebuiltin ef tyargs rl _ => S(esizelist rl)
| Eparen r1 _ => S(esize r1)
end
with esizelist (el: exprlist) : nat :=
match el with
| Enil => O
| Econs r1 rl2 => S(esize r1 + esizelist rl2)%nat
end.
Lemma leftcontext_size:
forall from to C,
leftcontext from to C ->
forall e1 e2,
(esize e1 < esize e2)%nat ->
(esize (C e1) < esize (C e2))%nat
with leftcontextlist_size:
forall from C,
leftcontextlist from C ->
forall e1 e2,
(esize e1 < esize e2)%nat ->
(esizelist (C e1) < esizelist (C e2))%nat.
Proof.
induction 1; intros; simpl; auto with arith.
exploit leftcontextlist_size; eauto. auto with arith.
exploit leftcontextlist_size; eauto. auto with arith.
induction 1; intros; simpl; auto with arith.
exploit leftcontext_size; eauto. auto with arith.
Qed.
Lemma evalinf_funcall_steps:
forall m fd args t k,
evalinf_funcall m fd args t ->
forever_N step lt ge O (Callstate fd args k m) t.
Proof.
cofix COF.
assert (COS:
forall e m s t f k,
execinf_stmt e m s t ->
forever_N step lt ge O (State f s k e m) t).
cofix COS.
assert (COE:
forall e m K a t C f k,
evalinf_expr e m K a t ->
leftcontext K RV C ->
forever_N step lt ge (esize a) (ExprState f (C a) k e m) t).
cofix COE.
assert (COEL:
forall e m a t C f k a1 al ty,
evalinf_exprlist e m a t ->
leftcontext RV RV C -> simple a1 = true -> simplelist al = true ->
forever_N step lt ge (esizelist a)
(ExprState f (C (Ecall a1 (exprlist_app al a) ty)) k e m) t).
cofix COEL.
intros. inv H.
(* cons left *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ecall a1 (exprlist_app al (Econs x al0)) ty)).
eauto. eapply leftcontext_compose; eauto. constructor. auto.
apply exprlist_app_leftcontext; auto. traceEq.
(* cons right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H3
(fun x => C(Ecall a1 (exprlist_app al (Econs x al0)) ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor. auto.
apply exprlist_app_leftcontext; auto.
eapply forever_N_star with (a2 := (esizelist al0)).
eexact R. simpl; omega.
change (Econs a1' al0) with (exprlist_app (Econs a1' Enil) al0).
rewrite <- exprlist_app_assoc.
eapply COEL. eauto. auto. auto.
rewrite exprlist_app_simple. simpl. rewrite H2; rewrite P; auto.
auto.
intros. inv H.
(* field *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Efield x f0 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* valof *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Evalof x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* deref *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ederef x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* addrof *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eaddrof x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* unop *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eunop op x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* binop left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ebinop op x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* binop right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Ebinop op x a2 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_star with (a2 := (esize a2)). eexact R. simpl; omega.
eapply COE with (C := fun x => C(Ebinop op a1' x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. auto. traceEq.
(* cast *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ecast x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* seqand left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eseqand x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* seqand 2 *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Eseqand x a2 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_plus. eapply plus_right. eexact R.
left; eapply step_seqand_true; eauto. rewrite Q; eauto.
reflexivity.
eapply COE with (C := fun x => (C (Eparen (Eparen x type_bool) ty))). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* seqor left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eseqor x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* seqor 2 *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Eseqor x a2 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_plus. eapply plus_right. eexact R.
left; eapply step_seqor_false; eauto. rewrite Q; eauto.
reflexivity.
eapply COE with (C := fun x => (C (Eparen (Eparen x type_bool) ty))). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* condition top *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Econdition x a2 a3 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* condition *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Econdition x a2 a3 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_plus. eapply plus_right. eexact R.
left; eapply step_condition; eauto. rewrite Q; eauto.
reflexivity.
eapply COE with (C := fun x => (C (Eparen x ty))). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* assign left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eassign x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* assign right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Eassign x a2 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_star with (a2 := (esize a2)). eexact R. simpl; omega.
eapply COE with (C := fun x => C(Eassign a1' x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. auto. traceEq.
(* assignop left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Eassignop op x a2 tyres ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* assignop right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Eassignop op x a2 tyres ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_star with (a2 := (esize a2)). eexact R. simpl; omega.
eapply COE with (C := fun x => C(Eassignop op a1' x tyres ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. auto. traceEq.
(* postincr *)
eapply forever_N_star with (a2 := (esize a0)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Epostincr id x ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* comma left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ecomma x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* comma right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Ecomma x a2 (typeof a2))) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_plus. eapply plus_right. eexact R.
left; eapply step_comma; eauto. reflexivity.
eapply COE with (C := C); eauto. traceEq.
(* call left *)
eapply forever_N_star with (a2 := (esize a1)). apply star_refl. simpl; omega.
eapply COE with (C := fun x => C(Ecall x a2 ty)). eauto.
eapply leftcontext_compose; eauto. repeat constructor. traceEq.
(* call right *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Ecall x a2 ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
eapply forever_N_star with (a2 := (esizelist a2)). eexact R. simpl; omega.
eapply COEL with (al := Enil). eauto. auto. auto. auto. traceEq.
(* call *)
destruct (eval_expr_to_steps _ _ _ _ _ _ _ H1 (fun x => C(Ecall x rargs ty)) f k)
as [P [Q R]].
eapply leftcontext_compose; eauto. repeat constructor.
destruct (eval_exprlist_to_steps _ _ _ _ _ _ H2 rf' Enil ty C f k)
as [S T]. auto. auto. simpl; auto.
eapply forever_N_plus. eapply plus_right.
eapply star_trans. eexact R. eexact T. reflexivity.
simpl. left; eapply step_call; eauto. congruence. reflexivity.
apply COF. eauto. traceEq.
(* statements *)
intros. inv H.
(* do *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* seq 1 *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COS; eauto. traceEq.
(* seq 2 *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H0 f (Kseq s2 k)) as [S1 [A1 B1]]; auto. inv B1.
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_right. eauto. right; constructor.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* if test *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* if true/false *)
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_right. eapply eval_expression_to_steps; eauto.
right. eapply step_ifthenelse_2 with (b := b). auto.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* return some *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* while test *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* while body *)
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_right. eapply eval_expression_to_steps; eauto.
right; apply step_while_true; auto.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* while loop *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H2 f (Kwhile2 a s0 k)) as [S1 [A1 B1]]; auto.
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_trans. eapply eval_expression_to_steps; eauto.
eapply star_left. right; apply step_while_true; auto.
eapply star_trans. eexact A1.
inv H3; inv B1; apply star_one; right; apply step_skip_or_continue_while; auto.
reflexivity. reflexivity. reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* dowhile body *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COS; eauto. traceEq.
(* dowhile test *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H0 f (Kdowhile1 a s0 k)) as [S1 [A1 B1]]; auto.
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_trans. eexact A1.
eapply star_one. right. inv H1; inv B1; apply step_skip_or_continue_dowhile; auto.
reflexivity. reflexivity.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* dowhile loop *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H0 f (Kdowhile1 a s0 k)) as [S1 [A1 B1]]; auto.
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_trans. eexact A1.
eapply star_left. right. inv H1; inv B1; apply step_skip_or_continue_dowhile; auto.
eapply star_right. eapply eval_expression_to_steps; eauto.
right; apply step_dowhile_true; auto.
reflexivity. reflexivity. reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* for start 1 *)
assert (a1 <> Sskip). red; intros; subst a1; inv H0.
eapply forever_N_plus. apply plus_one. right. constructor. auto.
eapply COS; eauto. traceEq.
(* for start 2 *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H0 f (Kseq (Sfor Sskip a2 a3 s0) k)) as [S1 [A1 B1]]; auto. inv B1.
eapply forever_N_plus.
eapply plus_left. right; constructor. auto.
eapply star_trans. eexact A1.
apply star_one. right; constructor.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* for test *)
eapply forever_N_plus. apply plus_one; right; apply step_for.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* for body *)
eapply forever_N_plus.
eapply plus_left. right; apply step_for.
eapply star_right. eapply eval_expression_to_steps; eauto.
right; apply step_for_true; auto.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* for next *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H2 f (Kfor3 a2 a3 s0 k)) as [S1 [A1 B1]]; auto.
eapply forever_N_plus.
eapply plus_left. right; apply step_for.
eapply star_trans. eapply eval_expression_to_steps; eauto.
eapply star_left. right; apply step_for_true; auto.
eapply star_trans. eexact A1.
inv H3; inv B1; apply star_one; right; apply step_skip_or_continue_for3; auto.
reflexivity. reflexivity. reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* for loop *)
destruct (exec_stmt_to_steps _ _ _ _ _ _ H2 f (Kfor3 a2 a3 s0 k)) as [S1 [A1 B1]]; auto.
destruct (exec_stmt_to_steps _ _ _ _ _ _ H4 f (Kfor4 a2 a3 s0 k)) as [S2 [A2 B2]]; auto. inv B2.
eapply forever_N_plus.
eapply plus_left. right; apply step_for.
eapply star_trans. eapply eval_expression_to_steps; eauto.
eapply star_left. right; apply step_for_true; auto.
eapply star_trans. eexact A1.
eapply star_left.
inv H3; inv B1; right; apply step_skip_or_continue_for3; auto.
eapply star_right. eexact A2.
right; constructor.
reflexivity. reflexivity. reflexivity. reflexivity. reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* switch expr *)
eapply forever_N_plus. apply plus_one; right; constructor.
eapply COE with (C := fun x => x); eauto. constructor. traceEq.
(* switch body *)
eapply forever_N_plus.
eapply plus_left. right; constructor.
eapply star_right. eapply eval_expression_to_steps; eauto.
right; constructor.
reflexivity. reflexivity.
eapply COS; eauto. traceEq.
(* funcalls *)
intros. inv H.
eapply forever_N_plus. apply plus_one. right; econstructor; eauto.
eapply COS; eauto. traceEq.
Qed.
End BIGSTEP.
(** ** Whole-program behaviors, big-step style. *)
Inductive bigstep_program_terminates (p: program): trace -> int -> Prop :=
| bigstep_program_terminates_intro: forall b f m0 m1 t r,
let ge := Genv.globalenv p in
Genv.init_mem p = Some m0 ->
Genv.find_symbol ge p.(prog_main) = Some b ->
Genv.find_funct_ptr ge b = Some f ->
type_of_fundef f = Tfunction Tnil type_int32s ->
eval_funcall ge m0 f nil t m1 (Vint r) ->
bigstep_program_terminates p t r.
Inductive bigstep_program_diverges (p: program): traceinf -> Prop :=
| bigstep_program_diverges_intro: forall b f m0 t,
let ge := Genv.globalenv p in
Genv.init_mem p = Some m0 ->
Genv.find_symbol ge p.(prog_main) = Some b ->
Genv.find_funct_ptr ge b = Some f ->
type_of_fundef f = Tfunction Tnil type_int32s ->
evalinf_funcall ge m0 f nil t ->
bigstep_program_diverges p t.
Definition bigstep_semantics (p: program) :=
Bigstep_semantics (bigstep_program_terminates p) (bigstep_program_diverges p).
Theorem bigstep_semantics_sound:
forall p, bigstep_sound (bigstep_semantics p) (semantics p).
Proof.
intros; constructor; intros.
(* termination *)
inv H. econstructor; econstructor.
split. econstructor; eauto.
split. apply eval_funcall_to_steps. eauto. red; auto.
econstructor.
(* divergence *)
inv H. econstructor.
split. econstructor; eauto.
eapply forever_N_forever with (order := lt).
apply lt_wf.
eapply evalinf_funcall_steps; eauto.
Qed.
|
[STATEMENT]
lemma take_minus_one_conv_butlast:
"n\<le>length l \<Longrightarrow> take (n - Suc 0) l = butlast (take n l)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n \<le> length l \<Longrightarrow> take (n - Suc 0) l = butlast (take n l)
[PROOF STEP]
by (simp add: butlast_take) |
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
Multisets.
-/
import logic.function order.boolean_algebra
data.list.basic data.list.perm data.list.sort data.quot data.string
algebra.order_functions algebra.group_power algebra.ordered_group
category.traversable.lemmas tactic.interactive
category.traversable.instances category.basic
open list subtype nat lattice
variables {α : Type*} {β : Type*} {γ : Type*}
local infix ` • ` := add_monoid.smul
instance list.perm.setoid (α : Type*) : setoid (list α) :=
setoid.mk perm ⟨perm.refl, @perm.symm _, @perm.trans _⟩
/-- `multiset α` is the quotient of `list α` by list permutation. The result
is a type of finite sets with duplicates allowed. -/
def {u} multiset (α : Type u) : Type u :=
quotient (list.perm.setoid α)
namespace multiset
instance : has_coe (list α) (multiset α) := ⟨quot.mk _⟩
@[simp] theorem quot_mk_to_coe (l : list α) : @eq (multiset α) ⟦l⟧ l := rfl
@[simp] theorem quot_mk_to_coe' (l : list α) : @eq (multiset α) (quot.mk (≈) l) l := rfl
@[simp] theorem quot_mk_to_coe'' (l : list α) : @eq (multiset α) (quot.mk setoid.r l) l := rfl
@[simp] theorem coe_eq_coe {l₁ l₂ : list α} : (l₁ : multiset α) = l₂ ↔ l₁ ~ l₂ := quotient.eq
instance has_decidable_eq [decidable_eq α] : decidable_eq (multiset α)
| s₁ s₂ := quotient.rec_on_subsingleton₂ s₁ s₂ $ λ l₁ l₂,
decidable_of_iff' _ quotient.eq
/- empty multiset -/
/-- `0 : multiset α` is the empty set -/
protected def zero : multiset α := @nil α
instance : has_zero (multiset α) := ⟨multiset.zero⟩
instance : has_emptyc (multiset α) := ⟨0⟩
instance : inhabited (multiset α) := ⟨0⟩
@[simp] theorem coe_nil_eq_zero : (@nil α : multiset α) = 0 := rfl
@[simp] theorem empty_eq_zero : (∅ : multiset α) = 0 := rfl
theorem coe_eq_zero (l : list α) : (l : multiset α) = 0 ↔ l = [] :=
iff.trans coe_eq_coe perm_nil
/- cons -/
/-- `cons a s` is the multiset which contains `s` plus one more
instance of `a`. -/
def cons (a : α) (s : multiset α) : multiset α :=
quot.lift_on s (λ l, (a :: l : multiset α))
(λ l₁ l₂ p, quot.sound ((perm_cons a).2 p))
notation a :: b := cons a b
instance : has_insert α (multiset α) := ⟨cons⟩
@[simp] theorem insert_eq_cons (a : α) (s : multiset α) :
insert a s = a::s := rfl
@[simp] theorem cons_coe (a : α) (l : list α) :
(a::l : multiset α) = (a::l : list α) := rfl
theorem singleton_coe (a : α) : (a::0 : multiset α) = ([a] : list α) := rfl
@[simp] theorem cons_inj_left {a b : α} (s : multiset α) :
a::s = b::s ↔ a = b :=
⟨quot.induction_on s $ λ l e,
have [a] ++ l ~ [b] ++ l, from quotient.exact e,
eq_singleton_of_perm $ (perm_app_right_iff _).1 this, congr_arg _⟩
@[simp] theorem cons_inj_right (a : α) : ∀{s t : multiset α}, a::s = a::t ↔ s = t :=
by rintros ⟨l₁⟩ ⟨l₂⟩; simp [perm_cons]
@[recursor 5] protected theorem induction {p : multiset α → Prop}
(h₁ : p 0) (h₂ : ∀ ⦃a : α⦄ {s : multiset α}, p s → p (a :: s)) : ∀s, p s :=
by rintros ⟨l⟩; induction l with _ _ ih; [exact h₁, exact h₂ ih]
@[elab_as_eliminator] protected theorem induction_on {p : multiset α → Prop}
(s : multiset α) (h₁ : p 0) (h₂ : ∀ ⦃a : α⦄ {s : multiset α}, p s → p (a :: s)) : p s :=
multiset.induction h₁ h₂ s
theorem cons_swap (a b : α) (s : multiset α) : a :: b :: s = b :: a :: s :=
quot.induction_on s $ λ l, quotient.sound $ perm.swap _ _ _
section rec
variables {C : multiset α → Sort*}
/-- Dependent recursor on multisets.
TODO: should be @[recursor 6], but then the definition of `multiset.pi` failes with a stack
overflow in `whnf`.
-/
protected def rec
(C_0 : C 0)
(C_cons : Πa m, C m → C (a::m))
(C_cons_heq : ∀a a' m b, C_cons a (a'::m) (C_cons a' m b) == C_cons a' (a::m) (C_cons a m b))
(m : multiset α) : C m :=
quotient.hrec_on m (@list.rec α (λl, C ⟦l⟧) C_0 (λa l b, C_cons a ⟦l⟧ b)) $
assume l l' h,
list.rec_heq_of_perm h
(assume a l l' b b' hl, have ⟦l⟧ = ⟦l'⟧, from quot.sound hl, by cc)
(assume a a' l, C_cons_heq a a' ⟦l⟧)
@[elab_as_eliminator]
protected def rec_on (m : multiset α)
(C_0 : C 0)
(C_cons : Πa m, C m → C (a::m))
(C_cons_heq : ∀a a' m b, C_cons a (a'::m) (C_cons a' m b) == C_cons a' (a::m) (C_cons a m b)) :
C m :=
multiset.rec C_0 C_cons C_cons_heq m
variables {C_0 : C 0} {C_cons : Πa m, C m → C (a::m)}
{C_cons_heq : ∀a a' m b, C_cons a (a'::m) (C_cons a' m b) == C_cons a' (a::m) (C_cons a m b)}
@[simp] lemma rec_on_0 : @multiset.rec_on α C (0:multiset α) C_0 C_cons C_cons_heq = C_0 :=
rfl
@[simp] lemma rec_on_cons (a : α) (m : multiset α) :
(a :: m).rec_on C_0 C_cons C_cons_heq = C_cons a m (m.rec_on C_0 C_cons C_cons_heq) :=
quotient.induction_on m $ assume l, rfl
end rec
section mem
/-- `a ∈ s` means that `a` has nonzero multiplicity in `s`. -/
def mem (a : α) (s : multiset α) : Prop :=
quot.lift_on s (λ l, a ∈ l) (λ l₁ l₂ (e : l₁ ~ l₂), propext $ mem_of_perm e)
instance : has_mem α (multiset α) := ⟨mem⟩
@[simp] lemma mem_coe {a : α} {l : list α} : a ∈ (l : multiset α) ↔ a ∈ l := iff.rfl
instance decidable_mem [decidable_eq α] (a : α) (s : multiset α) : decidable (a ∈ s) :=
quot.rec_on_subsingleton s $ list.decidable_mem a
@[simp] theorem mem_cons {a b : α} {s : multiset α} : a ∈ b :: s ↔ a = b ∨ a ∈ s :=
quot.induction_on s $ λ l, iff.rfl
lemma mem_cons_of_mem {a b : α} {s : multiset α} (h : a ∈ s) : a ∈ b :: s :=
mem_cons.2 $ or.inr h
@[simp] theorem mem_cons_self (a : α) (s : multiset α) : a ∈ a :: s :=
mem_cons.2 (or.inl rfl)
theorem exists_cons_of_mem {s : multiset α} {a : α} : a ∈ s → ∃ t, s = a :: t :=
quot.induction_on s $ λ l (h : a ∈ l),
let ⟨l₁, l₂, e⟩ := mem_split h in
e.symm ▸ ⟨(l₁++l₂ : list α), quot.sound perm_middle⟩
@[simp] theorem not_mem_zero (a : α) : a ∉ (0 : multiset α) := id
theorem eq_zero_of_forall_not_mem {s : multiset α} : (∀x, x ∉ s) → s = 0 :=
quot.induction_on s $ λ l H, by rw eq_nil_iff_forall_not_mem.mpr H; refl
theorem exists_mem_of_ne_zero {s : multiset α} : s ≠ 0 → ∃ a : α, a ∈ s :=
quot.induction_on s $ assume l hl,
match l, hl with
| [] := assume h, false.elim $ h rfl
| (a :: l) := assume _, ⟨a, by simp⟩
end
@[simp] lemma zero_ne_cons {a : α} {m : multiset α} : 0 ≠ a :: m :=
assume h, have a ∈ (0:multiset α), from h.symm ▸ mem_cons_self _ _, not_mem_zero _ this
@[simp] lemma cons_ne_zero {a : α} {m : multiset α} : a :: m ≠ 0 := zero_ne_cons.symm
lemma cons_eq_cons {a b : α} {as bs : multiset α} :
a :: as = b :: bs ↔ ((a = b ∧ as = bs) ∨ (a ≠ b ∧ ∃cs, as = b :: cs ∧ bs = a :: cs)) :=
begin
haveI : decidable_eq α := classical.dec_eq α,
split,
{ assume eq,
by_cases a = b,
{ subst h, simp * at * },
{ have : a ∈ b :: bs, from eq ▸ mem_cons_self _ _,
have : a ∈ bs, by simpa [h],
rcases exists_cons_of_mem this with ⟨cs, hcs⟩,
simp [h, hcs],
have : a :: as = b :: a :: cs, by simp [eq, hcs],
have : a :: as = a :: b :: cs, by rwa [cons_swap],
simpa using this } },
{ assume h,
rcases h with ⟨eq₁, eq₂⟩ | ⟨h, cs, eq₁, eq₂⟩,
{ simp * },
{ simp [*, cons_swap a b] } }
end
end mem
/- subset -/
section subset
/-- `s ⊆ t` is the lift of the list subset relation. It means that any
element with nonzero multiplicity in `s` has nonzero multiplicity in `t`,
but it does not imply that the multiplicity of `a` in `s` is less or equal than in `t`;
see `s ≤ t` for this relation. -/
protected def subset (s t : multiset α) : Prop := ∀ ⦃a : α⦄, a ∈ s → a ∈ t
instance : has_subset (multiset α) := ⟨multiset.subset⟩
@[simp] theorem coe_subset {l₁ l₂ : list α} : (l₁ : multiset α) ⊆ l₂ ↔ l₁ ⊆ l₂ := iff.rfl
@[simp] theorem subset.refl (s : multiset α) : s ⊆ s := λ a h, h
theorem subset.trans {s t u : multiset α} : s ⊆ t → t ⊆ u → s ⊆ u :=
λ h₁ h₂ a m, h₂ (h₁ m)
theorem subset_iff {s t : multiset α} : s ⊆ t ↔ (∀⦃x⦄, x ∈ s → x ∈ t) := iff.rfl
theorem mem_of_subset {s t : multiset α} {a : α} (h : s ⊆ t) : a ∈ s → a ∈ t := @h _
@[simp] theorem zero_subset (s : multiset α) : 0 ⊆ s :=
λ a, (not_mem_nil a).elim
@[simp] theorem cons_subset {a : α} {s t : multiset α} : (a :: s) ⊆ t ↔ a ∈ t ∧ s ⊆ t :=
by simp [subset_iff, or_imp_distrib, forall_and_distrib]
theorem eq_zero_of_subset_zero {s : multiset α} (h : s ⊆ 0) : s = 0 :=
eq_zero_of_forall_not_mem h
theorem subset_zero {s : multiset α} : s ⊆ 0 ↔ s = 0 :=
⟨eq_zero_of_subset_zero, λ xeq, xeq.symm ▸ subset.refl 0⟩
end subset
/- multiset order -/
/-- `s ≤ t` means that `s` is a sublist of `t` (up to permutation).
Equivalently, `s ≤ t` means that `count a s ≤ count a t` for all `a`. -/
protected def le (s t : multiset α) : Prop :=
quotient.lift_on₂ s t (<+~) $ λ v₁ v₂ w₁ w₂ p₁ p₂,
propext (p₂.subperm_left.trans p₁.subperm_right)
instance : partial_order (multiset α) :=
{ le := multiset.le,
le_refl := by rintros ⟨l⟩; exact subperm.refl _,
le_trans := by rintros ⟨l₁⟩ ⟨l₂⟩ ⟨l₃⟩; exact @subperm.trans _ _ _ _,
le_antisymm := by rintros ⟨l₁⟩ ⟨l₂⟩ h₁ h₂; exact quot.sound (subperm.antisymm h₁ h₂) }
theorem subset_of_le {s t : multiset α} : s ≤ t → s ⊆ t :=
quotient.induction_on₂ s t $ λ l₁ l₂, subset_of_subperm
theorem mem_of_le {s t : multiset α} {a : α} (h : s ≤ t) : a ∈ s → a ∈ t :=
mem_of_subset (subset_of_le h)
@[simp] theorem coe_le {l₁ l₂ : list α} : (l₁ : multiset α) ≤ l₂ ↔ l₁ <+~ l₂ := iff.rfl
@[elab_as_eliminator] theorem le_induction_on {C : multiset α → multiset α → Prop}
{s t : multiset α} (h : s ≤ t)
(H : ∀ {l₁ l₂ : list α}, l₁ <+ l₂ → C l₁ l₂) : C s t :=
quotient.induction_on₂ s t (λ l₁ l₂ ⟨l, p, s⟩,
(show ⟦l⟧ = ⟦l₁⟧, from quot.sound p) ▸ H s) h
theorem zero_le (s : multiset α) : 0 ≤ s :=
quot.induction_on s $ λ l, subperm_of_sublist $ nil_sublist l
theorem le_zero {s : multiset α} : s ≤ 0 ↔ s = 0 :=
⟨λ h, le_antisymm h (zero_le _), le_of_eq⟩
theorem lt_cons_self (s : multiset α) (a : α) : s < a :: s :=
quot.induction_on s $ λ l,
suffices l <+~ a :: l ∧ (¬l ~ a :: l),
by simpa [lt_iff_le_and_ne],
⟨subperm_of_sublist (sublist_cons _ _),
λ p, ne_of_lt (lt_succ_self (length l)) (perm_length p)⟩
theorem le_cons_self (s : multiset α) (a : α) : s ≤ a :: s :=
le_of_lt $ lt_cons_self _ _
theorem cons_le_cons_iff (a : α) {s t : multiset α} : a :: s ≤ a :: t ↔ s ≤ t :=
quotient.induction_on₂ s t $ λ l₁ l₂, subperm_cons a
theorem cons_le_cons (a : α) {s t : multiset α} : s ≤ t → a :: s ≤ a :: t :=
(cons_le_cons_iff a).2
theorem le_cons_of_not_mem {a : α} {s t : multiset α} (m : a ∉ s) : s ≤ a :: t ↔ s ≤ t :=
begin
refine ⟨_, λ h, le_trans h $ le_cons_self _ _⟩,
suffices : ∀ {t'} (_ : s ≤ t') (_ : a ∈ t'), a :: s ≤ t',
{ exact λ h, (cons_le_cons_iff a).1 (this h (mem_cons_self _ _)) },
introv h, revert m, refine le_induction_on h _,
introv s m₁ m₂,
rcases mem_split m₂ with ⟨r₁, r₂, rfl⟩,
exact perm_middle.subperm_left.2 ((subperm_cons _).2 $ subperm_of_sublist $
(sublist_or_mem_of_sublist s).resolve_right m₁)
end
/- cardinality -/
/-- The cardinality of a multiset is the sum of the multiplicities
of all its elements, or simply the length of the underlying list. -/
def card (s : multiset α) : ℕ :=
quot.lift_on s length $ λ l₁ l₂, perm_length
@[simp] theorem coe_card (l : list α) : card (l : multiset α) = length l := rfl
@[simp] theorem card_zero : @card α 0 = 0 := rfl
@[simp] theorem card_cons (a : α) (s : multiset α) : card (a :: s) = card s + 1 :=
quot.induction_on s $ λ l, rfl
@[simp] theorem card_singleton (a : α) : card (a::0) = 1 := by simp
theorem card_le_of_le {s t : multiset α} (h : s ≤ t) : card s ≤ card t :=
le_induction_on h $ λ l₁ l₂, length_le_of_sublist
theorem eq_of_le_of_card_le {s t : multiset α} (h : s ≤ t) : card t ≤ card s → s = t :=
le_induction_on h $ λ l₁ l₂ s h₂, congr_arg coe $ eq_of_sublist_of_length_le s h₂
theorem card_lt_of_lt {s t : multiset α} (h : s < t) : card s < card t :=
lt_of_not_ge $ λ h₂, ne_of_lt h $ eq_of_le_of_card_le (le_of_lt h) h₂
theorem lt_iff_cons_le {s t : multiset α} : s < t ↔ ∃ a, a :: s ≤ t :=
⟨quotient.induction_on₂ s t $ λ l₁ l₂ h,
subperm.exists_of_length_lt (le_of_lt h) (card_lt_of_lt h),
λ ⟨a, h⟩, lt_of_lt_of_le (lt_cons_self _ _) h⟩
@[simp] theorem card_eq_zero {s : multiset α} : card s = 0 ↔ s = 0 :=
⟨λ h, (eq_of_le_of_card_le (zero_le _) (le_of_eq h)).symm, λ e, by simp [e]⟩
theorem card_pos {s : multiset α} : 0 < card s ↔ s ≠ 0 :=
pos_iff_ne_zero.trans $ not_congr card_eq_zero
theorem card_pos_iff_exists_mem {s : multiset α} : 0 < card s ↔ ∃ a, a ∈ s :=
quot.induction_on s $ λ l, length_pos_iff_exists_mem
@[elab_as_eliminator] def strong_induction_on {p : multiset α → Sort*} :
∀ (s : multiset α), (∀ s, (∀t < s, p t) → p s) → p s
| s := λ ih, ih s $ λ t h,
have card t < card s, from card_lt_of_lt h,
strong_induction_on t ih
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf card⟩]}
theorem strong_induction_eq {p : multiset α → Sort*}
(s : multiset α) (H) : @strong_induction_on _ p s H =
H s (λ t h, @strong_induction_on _ p t H) :=
by rw [strong_induction_on]
@[elab_as_eliminator] lemma case_strong_induction_on {p : multiset α → Prop}
(s : multiset α) (h₀ : p 0) (h₁ : ∀ a s, (∀t ≤ s, p t) → p (a :: s)) : p s :=
multiset.strong_induction_on s $ assume s,
multiset.induction_on s (λ _, h₀) $ λ a s _ ih, h₁ _ _ $
λ t h, ih _ $ lt_of_le_of_lt h $ lt_cons_self _ _
/- singleton -/
@[simp] theorem singleton_eq_singleton (a : α) : singleton a = a::0 := rfl
@[simp] theorem mem_singleton {a b : α} : b ∈ a::0 ↔ b = a := by simp
theorem mem_singleton_self (a : α) : a ∈ (a::0 : multiset α) := mem_cons_self _ _
theorem singleton_inj {a b : α} : a::0 = b::0 ↔ a = b := cons_inj_left _
@[simp] theorem singleton_ne_zero (a : α) : a::0 ≠ 0 :=
ne_of_gt (lt_cons_self _ _)
@[simp] theorem singleton_le {a : α} {s : multiset α} : a::0 ≤ s ↔ a ∈ s :=
⟨λ h, mem_of_le h (mem_singleton_self _),
λ h, let ⟨t, e⟩ := exists_cons_of_mem h in e.symm ▸ cons_le_cons _ (zero_le _)⟩
theorem card_eq_one {s : multiset α} : card s = 1 ↔ ∃ a, s = a::0 :=
⟨quot.induction_on s $ λ l h,
(list.length_eq_one.1 h).imp $ λ a, congr_arg coe,
λ ⟨a, e⟩, e.symm ▸ rfl⟩
/- add -/
/-- The sum of two multisets is the lift of the list append operation.
This adds the multiplicities of each element,
i.e. `count a (s + t) = count a s + count a t`. -/
protected def add (s₁ s₂ : multiset α) : multiset α :=
quotient.lift_on₂ s₁ s₂ (λ l₁ l₂, ((l₁ ++ l₂ : list α) : multiset α)) $
λ v₁ v₂ w₁ w₂ p₁ p₂, quot.sound $ perm_app p₁ p₂
instance : has_add (multiset α) := ⟨multiset.add⟩
@[simp] theorem coe_add (s t : list α) : (s + t : multiset α) = (s ++ t : list α) := rfl
protected theorem add_comm (s t : multiset α) : s + t = t + s :=
quotient.induction_on₂ s t $ λ l₁ l₂, quot.sound perm_app_comm
protected theorem zero_add (s : multiset α) : 0 + s = s :=
quot.induction_on s $ λ l, rfl
theorem singleton_add (a : α) (s : multiset α) : ↑[a] + s = a::s := rfl
protected theorem add_le_add_left (s) {t u : multiset α} : s + t ≤ s + u ↔ t ≤ u :=
quotient.induction_on₃ s t u $ λ l₁ l₂ l₃, subperm_app_left _
protected theorem add_left_cancel (s) {t u : multiset α} (h : s + t = s + u) : t = u :=
le_antisymm ((multiset.add_le_add_left _).1 (le_of_eq h))
((multiset.add_le_add_left _).1 (le_of_eq h.symm))
instance : ordered_cancel_comm_monoid (multiset α) :=
{ zero := 0,
add := (+),
add_comm := multiset.add_comm,
add_assoc := λ s₁ s₂ s₃, quotient.induction_on₃ s₁ s₂ s₃ $ λ l₁ l₂ l₃,
congr_arg coe $ append_assoc l₁ l₂ l₃,
zero_add := multiset.zero_add,
add_zero := λ s, by rw [multiset.add_comm, multiset.zero_add],
add_left_cancel := multiset.add_left_cancel,
add_right_cancel := λ s₁ s₂ s₃ h, multiset.add_left_cancel s₂ $
by simpa [multiset.add_comm] using h,
add_le_add_left := λ s₁ s₂ h s₃, (multiset.add_le_add_left _).2 h,
le_of_add_le_add_left := λ s₁ s₂ s₃, (multiset.add_le_add_left _).1,
[email protected]_order α }
@[simp] theorem cons_add (a : α) (s t : multiset α) : a :: s + t = a :: (s + t) :=
by rw [← singleton_add, ← singleton_add, add_assoc]
@[simp] theorem add_cons (a : α) (s t : multiset α) : s + a :: t = a :: (s + t) :=
by rw [add_comm, cons_add, add_comm]
theorem le_add_right (s t : multiset α) : s ≤ s + t :=
by simpa using add_le_add_left (zero_le t) s
theorem le_add_left (s t : multiset α) : s ≤ t + s :=
by simpa using add_le_add_right (zero_le t) s
@[simp] theorem card_add (s t : multiset α) : card (s + t) = card s + card t :=
quotient.induction_on₂ s t length_append
lemma card_smul (s : multiset α) (n : ℕ) :
(n • s).card = n * s.card :=
by induction n; simp [succ_smul, *, nat.succ_mul]
@[simp] theorem mem_add {a : α} {s t : multiset α} : a ∈ s + t ↔ a ∈ s ∨ a ∈ t :=
quotient.induction_on₂ s t $ λ l₁ l₂, mem_append
theorem le_iff_exists_add {s t : multiset α} : s ≤ t ↔ ∃ u, t = s + u :=
⟨λ h, le_induction_on h $ λ l₁ l₂ s,
let ⟨l, p⟩ := exists_perm_append_of_sublist s in ⟨l, quot.sound p⟩,
λ⟨u, e⟩, e.symm ▸ le_add_right s u⟩
instance : canonically_ordered_monoid (multiset α) :=
{ lt_of_add_lt_add_left := @lt_of_add_lt_add_left _ _,
le_iff_exists_add := @le_iff_exists_add _,
bot := 0,
bot_le := multiset.zero_le,
..multiset.ordered_cancel_comm_monoid }
/- repeat -/
/-- `repeat a n` is the multiset containing only `a` with multiplicity `n`. -/
def repeat (a : α) (n : ℕ) : multiset α := repeat a n
@[simp] lemma repeat_zero (a : α) : repeat a 0 = 0 := rfl
@[simp] lemma repeat_succ (a : α) (n) : repeat a (n+1) = a :: repeat a n := by simp [repeat]
@[simp] lemma repeat_one (a : α) : repeat a 1 = a :: 0 := by simp
@[simp] lemma card_repeat : ∀ (a : α) n, card (repeat a n) = n := length_repeat
theorem eq_of_mem_repeat {a b : α} {n} : b ∈ repeat a n → b = a := eq_of_mem_repeat
theorem eq_repeat' {a : α} {s : multiset α} : s = repeat a s.card ↔ ∀ b ∈ s, b = a :=
quot.induction_on s $ λ l, iff.trans ⟨λ h,
(perm_repeat.1 $ (quotient.exact h).symm).symm, congr_arg coe⟩ eq_repeat'
theorem eq_repeat_of_mem {a : α} {s : multiset α} : (∀ b ∈ s, b = a) → s = repeat a s.card :=
eq_repeat'.2
theorem eq_repeat {a : α} {n} {s : multiset α} : s = repeat a n ↔ card s = n ∧ ∀ b ∈ s, b = a :=
⟨λ h, h.symm ▸ ⟨card_repeat _ _, λ b, eq_of_mem_repeat⟩,
λ ⟨e, al⟩, e ▸ eq_repeat_of_mem al⟩
theorem repeat_subset_singleton : ∀ (a : α) n, repeat a n ⊆ a::0 := repeat_subset_singleton
theorem repeat_le_coe {a : α} {n} {l : list α} : repeat a n ≤ l ↔ list.repeat a n <+ l :=
⟨λ ⟨l', p, s⟩, (perm_repeat.1 p.symm).symm ▸ s, subperm_of_sublist⟩
/- range -/
/-- `range n` is the multiset lifted from the list `range n`,
that is, the set `{0, 1, ..., n-1}`. -/
def range (n : ℕ) : multiset ℕ := range n
@[simp] theorem range_zero : range 0 = 0 := rfl
@[simp] theorem range_succ (n : ℕ) : range (succ n) = n :: range n :=
by rw [range, range_concat, ← coe_add, add_comm]; refl
@[simp] theorem card_range (n : ℕ) : card (range n) = n := length_range _
theorem range_subset {m n : ℕ} : range m ⊆ range n ↔ m ≤ n := range_subset
@[simp] theorem mem_range {m n : ℕ} : m ∈ range n ↔ m < n := mem_range
@[simp] theorem not_mem_range_self {n : ℕ} : n ∉ range n := not_mem_range_self
/- erase -/
section erase
variables [decidable_eq α] {s t : multiset α} {a b : α}
/-- `erase s a` is the multiset that subtracts 1 from the
multiplicity of `a`. -/
def erase (s : multiset α) (a : α) : multiset α :=
quot.lift_on s (λ l, (l.erase a : multiset α))
(λ l₁ l₂ p, quot.sound (erase_perm_erase a p))
@[simp] theorem coe_erase (l : list α) (a : α) :
erase (l : multiset α) a = l.erase a := rfl
@[simp] theorem erase_zero (a : α) : (0 : multiset α).erase a = 0 := rfl
@[simp] theorem erase_cons_head (a : α) (s : multiset α) : (a :: s).erase a = s :=
quot.induction_on s $ λ l, congr_arg coe $ erase_cons_head a l
@[simp] theorem erase_cons_tail {a b : α} (s : multiset α) (h : b ≠ a) : (b::s).erase a = b :: s.erase a :=
quot.induction_on s $ λ l, congr_arg coe $ erase_cons_tail l h
@[simp] theorem erase_of_not_mem {a : α} {s : multiset α} : a ∉ s → s.erase a = s :=
quot.induction_on s $ λ l h, congr_arg coe $ erase_of_not_mem h
@[simp] theorem cons_erase {s : multiset α} {a : α} : a ∈ s → a :: s.erase a = s :=
quot.induction_on s $ λ l h, quot.sound (perm_erase h).symm
theorem le_cons_erase (s : multiset α) (a : α) : s ≤ a :: s.erase a :=
if h : a ∈ s then le_of_eq (cons_erase h).symm
else by rw erase_of_not_mem h; apply le_cons_self
@[simp] theorem card_erase_of_mem {a : α} {s : multiset α} : a ∈ s → card (s.erase a) = pred (card s) :=
quot.induction_on s $ λ l, length_erase_of_mem
theorem erase_add_left_pos {a : α} {s : multiset α} (t) : a ∈ s → (s + t).erase a = s.erase a + t :=
quotient.induction_on₂ s t $ λ l₁ l₂ h, congr_arg coe $ erase_append_left l₂ h
theorem erase_add_right_pos {a : α} (s) {t : multiset α} (h : a ∈ t) : (s + t).erase a = s + t.erase a :=
by rw [add_comm, erase_add_left_pos s h, add_comm]
theorem erase_add_right_neg {a : α} {s : multiset α} (t) : a ∉ s → (s + t).erase a = s + t.erase a :=
quotient.induction_on₂ s t $ λ l₁ l₂ h, congr_arg coe $ erase_append_right l₂ h
theorem erase_add_left_neg {a : α} (s) {t : multiset α} (h : a ∉ t) : (s + t).erase a = s.erase a + t :=
by rw [add_comm, erase_add_right_neg s h, add_comm]
theorem erase_le (a : α) (s : multiset α) : s.erase a ≤ s :=
quot.induction_on s $ λ l, subperm_of_sublist (erase_sublist a l)
@[simp] theorem erase_lt {a : α} {s : multiset α} : s.erase a < s ↔ a ∈ s :=
⟨λ h, not_imp_comm.1 erase_of_not_mem (ne_of_lt h),
λ h, by simpa [h] using lt_cons_self (s.erase a) a⟩
theorem erase_subset (a : α) (s : multiset α) : s.erase a ⊆ s :=
subset_of_le (erase_le a s)
theorem mem_erase_of_ne {a b : α} {s : multiset α} (ab : a ≠ b) : a ∈ s.erase b ↔ a ∈ s :=
quot.induction_on s $ λ l, list.mem_erase_of_ne ab
theorem mem_of_mem_erase {a b : α} {s : multiset α} : a ∈ s.erase b → a ∈ s :=
mem_of_subset (erase_subset _ _)
theorem erase_comm (s : multiset α) (a b : α) : (s.erase a).erase b = (s.erase b).erase a :=
quot.induction_on s $ λ l, congr_arg coe $ l.erase_comm a b
theorem erase_le_erase {s t : multiset α} (a : α) (h : s ≤ t) : s.erase a ≤ t.erase a :=
le_induction_on h $ λ l₁ l₂ h, subperm_of_sublist (erase_sublist_erase _ h)
theorem erase_le_iff_le_cons {s t : multiset α} {a : α} : s.erase a ≤ t ↔ s ≤ a :: t :=
⟨λ h, le_trans (le_cons_erase _ _) (cons_le_cons _ h),
λ h, if m : a ∈ s
then by rw ← cons_erase m at h; exact (cons_le_cons_iff _).1 h
else le_trans (erase_le _ _) ((le_cons_of_not_mem m).1 h)⟩
end erase
@[simp] theorem coe_reverse (l : list α) : (reverse l : multiset α) = l :=
quot.sound $ reverse_perm _
/- map -/
/-- `map f s` is the lift of the list `map` operation. The multiplicity
of `b` in `map f s` is the number of `a ∈ s` (counting multiplicity)
such that `f a = b`. -/
def map (f : α → β) (s : multiset α) : multiset β :=
quot.lift_on s (λ l : list α, (l.map f : multiset β))
(λ l₁ l₂ p, quot.sound (perm_map f p))
@[simp] theorem coe_map (f : α → β) (l : list α) : map f ↑l = l.map f := rfl
@[simp] theorem map_zero (f : α → β) : map f 0 = 0 := rfl
@[simp] theorem map_cons (f : α → β) (a s) : map f (a::s) = f a :: map f s :=
quot.induction_on s $ λ l, rfl
@[simp] lemma map_singleton (f : α → β) (a : α) : ({a} : multiset α).map f = {f a} := rfl
@[simp] theorem map_add (f : α → β) (s t) : map f (s + t) = map f s + map f t :=
quotient.induction_on₂ s t $ λ l₁ l₂, congr_arg coe $ map_append _ _ _
instance (f : α → β) : is_add_monoid_hom (map f) :=
by refine_struct {..}; simp
@[simp] theorem mem_map {f : α → β} {b : β} {s : multiset α} :
b ∈ map f s ↔ ∃ a, a ∈ s ∧ f a = b :=
quot.induction_on s $ λ l, mem_map
@[simp] theorem card_map (f : α → β) (s) : card (map f s) = card s :=
quot.induction_on s $ λ l, length_map _ _
theorem mem_map_of_mem (f : α → β) {a : α} {s : multiset α} (h : a ∈ s) : f a ∈ map f s :=
mem_map.2 ⟨_, h, rfl⟩
@[simp] theorem mem_map_of_inj {f : α → β} (H : function.injective f) {a : α} {s : multiset α} :
f a ∈ map f s ↔ a ∈ s :=
quot.induction_on s $ λ l, mem_map_of_inj H
@[simp] theorem map_map (g : β → γ) (f : α → β) (s : multiset α) : map g (map f s) = map (g ∘ f) s :=
quot.induction_on s $ λ l, congr_arg coe $ list.map_map _ _ _
@[simp] theorem map_id (s : multiset α) : map id s = s :=
quot.induction_on s $ λ l, congr_arg coe $ map_id _
@[simp] lemma map_id' (s : multiset α) : map (λx, x) s = s := map_id s
@[simp] theorem map_const (s : multiset α) (b : β) : map (function.const α b) s = repeat b s.card :=
quot.induction_on s $ λ l, congr_arg coe $ map_const _ _
@[congr] theorem map_congr {f g : α → β} {s : multiset α} : (∀ x ∈ s, f x = g x) → map f s = map g s :=
quot.induction_on s $ λ l H, congr_arg coe $ map_congr H
lemma map_hcongr {β' : Type*} {m : multiset α} {f : α → β} {f' : α → β'}
(h : β = β') (hf : ∀a∈m, f a == f' a) : map f m == map f' m :=
begin subst h, simp at hf, simp [map_congr hf] end
theorem eq_of_mem_map_const {b₁ b₂ : β} {l : list α} (h : b₁ ∈ map (function.const α b₂) l) : b₁ = b₂ :=
eq_of_mem_repeat $ by rwa map_const at h
@[simp] theorem map_le_map {f : α → β} {s t : multiset α} (h : s ≤ t) : map f s ≤ map f t :=
le_induction_on h $ λ l₁ l₂ h, subperm_of_sublist $ map_sublist_map f h
@[simp] theorem map_subset_map {f : α → β} {s t : multiset α} (H : s ⊆ t) : map f s ⊆ map f t :=
λ b m, let ⟨a, h, e⟩ := mem_map.1 m in mem_map.2 ⟨a, H h, e⟩
/- fold -/
/-- `foldl f H b s` is the lift of the list operation `foldl f b l`,
which folds `f` over the multiset. It is well defined when `f` is right-commutative,
that is, `f (f b a₁) a₂ = f (f b a₂) a₁`. -/
def foldl (f : β → α → β) (H : right_commutative f) (b : β) (s : multiset α) : β :=
quot.lift_on s (λ l, foldl f b l)
(λ l₁ l₂ p, foldl_eq_of_perm H p b)
@[simp] theorem foldl_zero (f : β → α → β) (H b) : foldl f H b 0 = b := rfl
@[simp] theorem foldl_cons (f : β → α → β) (H b a s) : foldl f H b (a :: s) = foldl f H (f b a) s :=
quot.induction_on s $ λ l, rfl
@[simp] theorem foldl_add (f : β → α → β) (H b s t) : foldl f H b (s + t) = foldl f H (foldl f H b s) t :=
quotient.induction_on₂ s t $ λ l₁ l₂, foldl_append _ _ _ _
/-- `foldr f H b s` is the lift of the list operation `foldr f b l`,
which folds `f` over the multiset. It is well defined when `f` is left-commutative,
that is, `f a₁ (f a₂ b) = f a₂ (f a₁ b)`. -/
def foldr (f : α → β → β) (H : left_commutative f) (b : β) (s : multiset α) : β :=
quot.lift_on s (λ l, foldr f b l)
(λ l₁ l₂ p, foldr_eq_of_perm H p b)
@[simp] theorem foldr_zero (f : α → β → β) (H b) : foldr f H b 0 = b := rfl
@[simp] theorem foldr_cons (f : α → β → β) (H b a s) : foldr f H b (a :: s) = f a (foldr f H b s) :=
quot.induction_on s $ λ l, rfl
@[simp] theorem foldr_add (f : α → β → β) (H b s t) : foldr f H b (s + t) = foldr f H (foldr f H b t) s :=
quotient.induction_on₂ s t $ λ l₁ l₂, foldr_append _ _ _ _
@[simp] theorem coe_foldr (f : α → β → β) (H : left_commutative f) (b : β) (l : list α) :
foldr f H b l = l.foldr f b := rfl
@[simp] theorem coe_foldl (f : β → α → β) (H : right_commutative f) (b : β) (l : list α) :
foldl f H b l = l.foldl f b := rfl
theorem coe_foldr_swap (f : α → β → β) (H : left_commutative f) (b : β) (l : list α) :
foldr f H b l = l.foldl (λ x y, f y x) b :=
(congr_arg (foldr f H b) (coe_reverse l)).symm.trans $ foldr_reverse _ _ _
theorem foldr_swap (f : α → β → β) (H : left_commutative f) (b : β) (s : multiset α) :
foldr f H b s = foldl (λ x y, f y x) (λ x y z, (H _ _ _).symm) b s :=
quot.induction_on s $ λ l, coe_foldr_swap _ _ _ _
theorem foldl_swap (f : β → α → β) (H : right_commutative f) (b : β) (s : multiset α) :
foldl f H b s = foldr (λ x y, f y x) (λ x y z, (H _ _ _).symm) b s :=
(foldr_swap _ _ _ _).symm
/-- Product of a multiset given a commutative monoid structure on `α`.
`prod {a, b, c} = a * b * c` -/
def prod [comm_monoid α] : multiset α → α :=
foldr (*) (λ x y z, by simp [mul_left_comm]) 1
attribute [to_additive multiset.sum._proof_1] prod._proof_1
attribute [to_additive multiset.sum] prod
@[to_additive multiset.sum_eq_foldr]
theorem prod_eq_foldr [comm_monoid α] (s : multiset α) :
prod s = foldr (*) (λ x y z, by simp [mul_left_comm]) 1 s := rfl
@[to_additive multiset.sum_eq_foldl]
theorem prod_eq_foldl [comm_monoid α] (s : multiset α) :
prod s = foldl (*) (λ x y z, by simp [mul_right_comm]) 1 s :=
(foldr_swap _ _ _ _).trans (by simp [mul_comm])
@[simp, to_additive multiset.coe_sum]
theorem coe_prod [comm_monoid α] (l : list α) : prod ↑l = l.prod :=
prod_eq_foldl _
@[simp, to_additive multiset.sum_zero]
theorem prod_zero [comm_monoid α] : @prod α _ 0 = 1 := rfl
@[simp, to_additive multiset.sum_cons]
theorem prod_cons [comm_monoid α] (a : α) (s) : prod (a :: s) = a * prod s :=
foldr_cons _ _ _ _ _
@[to_additive multiset.sum_singleton]
theorem prod_singleton [comm_monoid α] (a : α) : prod (a :: 0) = a := by simp
@[simp, to_additive multiset.sum_add]
theorem prod_add [comm_monoid α] (s t : multiset α) : prod (s + t) = prod s * prod t :=
quotient.induction_on₂ s t $ λ l₁ l₂, by simp
instance sum.is_add_monoid_hom [add_comm_monoid α] : is_add_monoid_hom (sum : multiset α → α) :=
by refine_struct {..}; simp
lemma prod_smul {α : Type*} [comm_monoid α] (m : multiset α) :
∀n, (add_monoid.smul n m).prod = m.prod ^ n
| 0 := rfl
| (n + 1) :=
by rw [add_monoid.add_smul, add_monoid.one_smul, _root_.pow_add, _root_.pow_one, prod_add, prod_smul n]
@[simp] theorem prod_repeat [comm_monoid α] (a : α) (n : ℕ) : prod (multiset.repeat a n) = a ^ n :=
by simp [repeat, list.prod_repeat]
@[simp] theorem sum_repeat [add_comm_monoid α] : ∀ (a : α) (n : ℕ), sum (multiset.repeat a n) = n • a :=
@prod_repeat (multiplicative α) _
attribute [to_additive multiset.sum_repeat] prod_repeat
@[simp] lemma prod_map_one [comm_monoid γ] {m : multiset α} :
prod (m.map (λa, (1 : γ))) = (1 : γ) :=
multiset.induction_on m (by simp) (by simp)
@[simp] lemma sum_map_zero [add_comm_monoid γ] {m : multiset α} :
sum (m.map (λa, (0 : γ))) = (0 : γ) :=
multiset.induction_on m (by simp) (by simp)
attribute [to_additive multiset.sum_map_zero] prod_map_one
@[simp, to_additive multiset.sum_map_add]
lemma prod_map_mul [comm_monoid γ] {m : multiset α} {f g : α → γ} :
prod (m.map $ λa, f a * g a) = prod (m.map f) * prod (m.map g) :=
multiset.induction_on m (by simp) (assume a m ih, by simp [ih]; cc)
lemma prod_map_prod_map [comm_monoid γ] (m : multiset α) (n : multiset β) {f : α → β → γ} :
prod (m.map $ λa, prod $ n.map $ λb, f a b) = prod (n.map $ λb, prod $ m.map $ λa, f a b) :=
multiset.induction_on m (by simp) (assume a m ih, by simp [ih])
lemma sum_map_sum_map [add_comm_monoid γ] : ∀ (m : multiset α) (n : multiset β) {f : α → β → γ},
sum (m.map $ λa, sum $ n.map $ λb, f a b) = sum (n.map $ λb, sum $ m.map $ λa, f a b) :=
@prod_map_prod_map _ _ (multiplicative γ) _
attribute [to_additive multiset.sum_map_sum_map] prod_map_prod_map
lemma sum_map_mul_left [semiring β] {b : β} {s : multiset α} {f : α → β} :
sum (s.map (λa, b * f a)) = b * sum (s.map f) :=
multiset.induction_on s (by simp) (assume a s ih, by simp [ih, mul_add])
lemma sum_map_mul_right [semiring β] {b : β} {s : multiset α} {f : α → β} :
sum (s.map (λa, f a * b)) = sum (s.map f) * b :=
multiset.induction_on s (by simp) (assume a s ih, by simp [ih, add_mul])
lemma prod_hom [comm_monoid α] [comm_monoid β] (f : α → β) [is_monoid_hom f] (s : multiset α) :
(s.map f).prod = f s.prod :=
multiset.induction_on s (by simp [is_monoid_hom.map_one f])
(by simp [is_monoid_hom.map_mul f] {contextual := tt})
lemma dvd_prod [comm_semiring α] {a : α} {s : multiset α} : a ∈ s → a ∣ s.prod :=
quotient.induction_on s (λ l a h, by simpa using list.dvd_prod h) a
lemma sum_hom [add_comm_monoid α] [add_comm_monoid β] (f : α → β) [is_add_monoid_hom f] (s : multiset α) :
(s.map f).sum = f s.sum :=
multiset.induction_on s (by simp [is_add_monoid_hom.map_zero f])
(by simp [is_add_monoid_hom.map_add f] {contextual := tt})
attribute [to_additive multiset.sum_hom] multiset.prod_hom
lemma le_sum_of_subadditive [add_comm_monoid α] [ordered_comm_monoid β]
(f : α → β) (h_zero : f 0 = 0) (h_add : ∀x y, f (x + y) ≤ f x + f y) (s : multiset α) :
f s.sum ≤ (s.map f).sum :=
multiset.induction_on s (le_of_eq h_zero) $
assume a s ih, by rw [sum_cons, map_cons, sum_cons];
from le_trans (h_add a s.sum) (add_le_add_left' ih)
lemma abs_sum_le_sum_abs [discrete_linear_ordered_field α] {s : multiset α} :
abs s.sum ≤ (s.map abs).sum :=
le_sum_of_subadditive _ abs_zero abs_add s
/- join -/
/-- `join S`, where `S` is a multiset of multisets, is the lift of the list join
operation, that is, the union of all the sets.
join {{1, 2}, {1, 2}, {0, 1}} = {0, 1, 1, 1, 2, 2} -/
def join : multiset (multiset α) → multiset α := sum
theorem coe_join : ∀ L : list (list α),
join (L.map (@coe _ (multiset α) _) : multiset (multiset α)) = L.join
| [] := rfl
| (l :: L) := congr_arg (λ s : multiset α, ↑l + s) (coe_join L)
@[simp] theorem join_zero : @join α 0 = 0 := rfl
@[simp] theorem join_cons (s S) : @join α (s :: S) = s + join S :=
sum_cons _ _
@[simp] theorem join_add (S T) : @join α (S + T) = join S + join T :=
sum_add _ _
@[simp] theorem mem_join {a S} : a ∈ @join α S ↔ ∃ s ∈ S, a ∈ s :=
multiset.induction_on S (by simp) $
by simp [or_and_distrib_right, exists_or_distrib] {contextual := tt}
@[simp] theorem card_join (S) : card (@join α S) = sum (map card S) :=
multiset.induction_on S (by simp) (by simp)
/- bind -/
/-- `bind s f` is the monad bind operation, defined as `join (map f s)`.
It is the union of `f a` as `a` ranges over `s`. -/
def bind (s : multiset α) (f : α → multiset β) : multiset β :=
join (map f s)
@[simp] theorem coe_bind (l : list α) (f : α → list β) :
@bind α β l (λ a, f a) = l.bind f :=
by rw [list.bind, ← coe_join, list.map_map]; refl
@[simp] theorem zero_bind (f : α → multiset β) : bind 0 f = 0 := rfl
@[simp] theorem cons_bind (a s) (f : α → multiset β) : bind (a::s) f = f a + bind s f :=
by simp [bind]
@[simp] theorem add_bind (s t) (f : α → multiset β) : bind (s + t) f = bind s f + bind t f :=
by simp [bind]
@[simp] theorem bind_zero (s : multiset α) : bind s (λa, 0 : α → multiset β) = 0 :=
by simp [bind, -map_const, join]
@[simp] theorem bind_add (s : multiset α) (f g : α → multiset β) :
bind s (λa, f a + g a) = bind s f + bind s g :=
by simp [bind, join]
@[simp] theorem bind_cons (s : multiset α) (f : α → β) (g : α → multiset β) :
bind s (λa, f a :: g a) = map f s + bind s g :=
multiset.induction_on s (by simp) (by simp {contextual := tt})
@[simp] theorem mem_bind {b s} {f : α → multiset β} : b ∈ bind s f ↔ ∃ a ∈ s, b ∈ f a :=
by simp [bind]; simp [-exists_and_distrib_right, exists_and_distrib_right.symm];
rw exists_swap; simp [and_assoc]
@[simp] theorem card_bind (s) (f : α → multiset β) : card (bind s f) = sum (map (card ∘ f) s) :=
by simp [bind]
lemma bind_congr {f g : α → multiset β} {m : multiset α} : (∀a∈m, f a = g a) → bind m f = bind m g :=
by simp [bind] {contextual := tt}
lemma bind_hcongr {β' : Type*} {m : multiset α} {f : α → multiset β} {f' : α → multiset β'}
(h : β = β') (hf : ∀a∈m, f a == f' a) : bind m f == bind m f' :=
begin subst h, simp at hf, simp [bind_congr hf] end
lemma map_bind (m : multiset α) (n : α → multiset β) (f : β → γ) :
map f (bind m n) = bind m (λa, map f (n a)) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_map (m : multiset α) (n : β → multiset γ) (f : α → β) :
bind (map f m) n = bind m (λa, n (f a)) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_assoc {s : multiset α} {f : α → multiset β} {g : β → multiset γ} :
(s.bind f).bind g = s.bind (λa, (f a).bind g) :=
multiset.induction_on s (by simp) (by simp {contextual := tt})
lemma bind_bind (m : multiset α) (n : multiset β) {f : α → β → multiset γ} :
(bind m $ λa, bind n $ λb, f a b) = (bind n $ λb, bind m $ λa, f a b) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_map_comm (m : multiset α) (n : multiset β) {f : α → β → γ} :
(bind m $ λa, n.map $ λb, f a b) = (bind n $ λb, m.map $ λa, f a b) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
@[simp, to_additive multiset.sum_bind]
lemma prod_bind [comm_monoid β] (s : multiset α) (t : α → multiset β) :
prod (bind s t) = prod (s.map $ λa, prod (t a)) :=
multiset.induction_on s (by simp) (assume a s ih, by simp [ih, cons_bind])
/- product -/
/-- The multiplicity of `(a, b)` in `product s t` is
the product of the multiplicity of `a` in `s` and `b` in `t`. -/
def product (s : multiset α) (t : multiset β) : multiset (α × β) :=
s.bind $ λ a, t.map $ prod.mk a
@[simp] theorem coe_product (l₁ : list α) (l₂ : list β) :
@product α β l₁ l₂ = l₁.product l₂ :=
by rw [product, list.product, ← coe_bind]; simp
@[simp] theorem zero_product (t) : @product α β 0 t = 0 := rfl
@[simp] theorem cons_product (a : α) (s : multiset α) (t : multiset β) :
product (a :: s) t = map (prod.mk a) t + product s t :=
by simp [product]
@[simp] theorem product_singleton (a : α) (b : β) : product (a::0) (b::0) = (a,b)::0 := rfl
@[simp] theorem add_product (s t : multiset α) (u : multiset β) :
product (s + t) u = product s u + product t u :=
by simp [product]
@[simp] theorem product_add (s : multiset α) : ∀ t u : multiset β,
product s (t + u) = product s t + product s u :=
multiset.induction_on s (λ t u, rfl) $ λ a s IH t u,
by rw [cons_product, IH]; simp
@[simp] theorem mem_product {s t} : ∀ {p : α × β}, p ∈ @product α β s t ↔ p.1 ∈ s ∧ p.2 ∈ t
| (a, b) := by simp [product, and.left_comm]
@[simp] theorem card_product (s : multiset α) (t : multiset β) : card (product s t) = card s * card t :=
by simp [product, repeat, (∘), mul_comm]
/- sigma -/
section
variable {σ : α → Type*}
/-- `sigma s t` is the dependent version of `product`. It is the sum of
`(a, b)` as `a` ranges over `s` and `b` ranges over `t a`. -/
protected def sigma (s : multiset α) (t : Π a, multiset (σ a)) : multiset (Σ a, σ a) :=
s.bind $ λ a, (t a).map $ sigma.mk a
@[simp] theorem coe_sigma (l₁ : list α) (l₂ : Π a, list (σ a)) :
@multiset.sigma α σ l₁ (λ a, l₂ a) = l₁.sigma l₂ :=
by rw [multiset.sigma, list.sigma, ← coe_bind]; simp
@[simp] theorem zero_sigma (t) : @multiset.sigma α σ 0 t = 0 := rfl
@[simp] theorem cons_sigma (a : α) (s : multiset α) (t : Π a, multiset (σ a)) :
(a :: s).sigma t = map (sigma.mk a) (t a) + s.sigma t :=
by simp [multiset.sigma]
@[simp] theorem sigma_singleton (a : α) (b : α → β) :
(a::0).sigma (λ a, b a::0) = ⟨a, b a⟩::0 := rfl
@[simp] theorem add_sigma (s t : multiset α) (u : Π a, multiset (σ a)) :
(s + t).sigma u = s.sigma u + t.sigma u :=
by simp [multiset.sigma]
@[simp] theorem sigma_add (s : multiset α) : ∀ t u : Π a, multiset (σ a),
s.sigma (λ a, t a + u a) = s.sigma t + s.sigma u :=
multiset.induction_on s (λ t u, rfl) $ λ a s IH t u,
by rw [cons_sigma, IH]; simp
@[simp] theorem mem_sigma {s t} : ∀ {p : Σ a, σ a},
p ∈ @multiset.sigma α σ s t ↔ p.1 ∈ s ∧ p.2 ∈ t p.1
| ⟨a, b⟩ := by simp [multiset.sigma, and_assoc, and.left_comm]
@[simp] theorem card_sigma (s : multiset α) (t : Π a, multiset (σ a)) :
card (s.sigma t) = sum (map (λ a, card (t a)) s) :=
by simp [multiset.sigma, (∘)]
end
/- map for partial functions -/
/-- Lift of the list `pmap` operation. Map a partial function `f` over a multiset
`s` whose elements are all in the domain of `f`. -/
def pmap {p : α → Prop} (f : Π a, p a → β) (s : multiset α) : (∀ a ∈ s, p a) → multiset β :=
quot.rec_on s (λ l H, ↑(pmap f l H)) $ λ l₁ l₂ (pp : l₁ ~ l₂),
funext $ λ (H₂ : ∀ a ∈ l₂, p a),
have H₁ : ∀ a ∈ l₁, p a, from λ a h, H₂ a ((mem_of_perm pp).1 h),
have ∀ {s₂ e H}, @eq.rec (multiset α) l₁
(λ s, (∀ a ∈ s, p a) → multiset β) (λ _, ↑(pmap f l₁ H₁))
s₂ e H = ↑(pmap f l₁ H₁), by intros s₂ e _; subst e,
this.trans $ quot.sound $ perm_pmap f pp
@[simp] theorem coe_pmap {p : α → Prop} (f : Π a, p a → β)
(l : list α) (H : ∀ a ∈ l, p a) : pmap f l H = l.pmap f H := rfl
@[simp] lemma pmap_zero {p : α → Prop} (f : Π a, p a → β) (h : ∀a∈(0:multiset α), p a) :
pmap f 0 h = 0 := rfl
@[simp] lemma pmap_cons {p : α → Prop} (f : Π a, p a → β) (a : α) (m : multiset α) :
∀(h : ∀b∈a::m, p b), pmap f (a :: m) h =
f a (h a (mem_cons_self a m)) :: pmap f m (λa ha, h a $ mem_cons_of_mem ha) :=
quotient.induction_on m $ assume l h, rfl
/-- "Attach" a proof that `a ∈ s` to each element `a` in `s` to produce
a multiset on `{x // x ∈ s}`. -/
def attach (s : multiset α) : multiset {x // x ∈ s} := pmap subtype.mk s (λ a, id)
@[simp] theorem coe_attach (l : list α) :
@eq (multiset {x // x ∈ l}) (@attach α l) l.attach := rfl
theorem pmap_eq_map (p : α → Prop) (f : α → β) (s : multiset α) :
∀ H, @pmap _ _ p (λ a _, f a) s H = map f s :=
quot.induction_on s $ λ l H, congr_arg coe $ pmap_eq_map p f l H
theorem pmap_congr {p q : α → Prop} {f : Π a, p a → β} {g : Π a, q a → β}
(s : multiset α) {H₁ H₂} (h : ∀ a h₁ h₂, f a h₁ = g a h₂) :
pmap f s H₁ = pmap g s H₂ :=
quot.induction_on s (λ l H₁ H₂, congr_arg coe $ pmap_congr l h) H₁ H₂
theorem map_pmap {p : α → Prop} (g : β → γ) (f : Π a, p a → β)
(s) : ∀ H, map g (pmap f s H) = pmap (λ a h, g (f a h)) s H :=
quot.induction_on s $ λ l H, congr_arg coe $ map_pmap g f l H
theorem pmap_eq_map_attach {p : α → Prop} (f : Π a, p a → β)
(s) : ∀ H, pmap f s H = s.attach.map (λ x, f x.1 (H _ x.2)) :=
quot.induction_on s $ λ l H, congr_arg coe $ pmap_eq_map_attach f l H
theorem attach_map_val (s : multiset α) : s.attach.map subtype.val = s :=
quot.induction_on s $ λ l, congr_arg coe $ attach_map_val l
@[simp] theorem mem_attach (s : multiset α) : ∀ x, x ∈ s.attach :=
quot.induction_on s $ λ l, mem_attach _
@[simp] theorem mem_pmap {p : α → Prop} {f : Π a, p a → β}
{s H b} : b ∈ pmap f s H ↔ ∃ a (h : a ∈ s), f a (H a h) = b :=
quot.induction_on s (λ l H, mem_pmap) H
@[simp] theorem card_pmap {p : α → Prop} (f : Π a, p a → β)
(s H) : card (pmap f s H) = card s :=
quot.induction_on s (λ l H, length_pmap) H
@[simp] theorem card_attach {m : multiset α} : card (attach m) = card m := card_pmap _ _ _
@[simp] lemma attach_zero : (0 : multiset α).attach = 0 := rfl
lemma attach_cons (a : α) (m : multiset α) :
(a :: m).attach = ⟨a, mem_cons_self a m⟩ :: (m.attach.map $ λp, ⟨p.1, mem_cons_of_mem p.2⟩) :=
quotient.induction_on m $ assume l, congr_arg coe $ congr_arg (list.cons _) $
by rw [list.map_pmap]; exact list.pmap_congr _ (assume a' h₁ h₂, subtype.eq rfl)
section decidable_pi_exists
variables {m : multiset α}
protected def decidable_forall_multiset {p : α → Prop} [hp : ∀a, decidable (p a)] :
decidable (∀a∈m, p a) :=
quotient.rec_on_subsingleton m (λl, decidable_of_iff (∀a∈l, p a) $ by simp)
instance decidable_dforall_multiset {p : Πa∈m, Prop} [hp : ∀a (h : a ∈ m), decidable (p a h)] :
decidable (∀a (h : a ∈ m), p a h) :=
decidable_of_decidable_of_iff
(@multiset.decidable_forall_multiset {a // a ∈ m} m.attach (λa, p a.1 a.2) _)
(iff.intro (assume h a ha, h ⟨a, ha⟩ (mem_attach _ _)) (assume h ⟨a, ha⟩ _, h _ _))
/-- decidable equality for functions whose domain is bounded by multisets -/
instance decidable_eq_pi_multiset {β : α → Type*} [h : ∀a, decidable_eq (β a)] :
decidable_eq (Πa∈m, β a) :=
assume f g, decidable_of_iff (∀a (h : a ∈ m), f a h = g a h) (by simp [function.funext_iff])
def decidable_exists_multiset {p : α → Prop} [decidable_pred p] :
decidable (∃ x ∈ m, p x) :=
quotient.rec_on_subsingleton m list.decidable_exists_mem
instance decidable_dexists_multiset {p : Πa∈m, Prop} [hp : ∀a (h : a ∈ m), decidable (p a h)] :
decidable (∃a (h : a ∈ m), p a h) :=
decidable_of_decidable_of_iff
(@multiset.decidable_exists_multiset {a // a ∈ m} m.attach (λa, p a.1 a.2) _)
(iff.intro (λ ⟨⟨a, ha₁⟩, _, ha₂⟩, ⟨a, ha₁, ha₂⟩)
(λ ⟨a, ha₁, ha₂⟩, ⟨⟨a, ha₁⟩, mem_attach _ _, ha₂⟩))
end decidable_pi_exists
/- subtraction -/
section
variables [decidable_eq α] {s t u : multiset α} {a b : α}
/-- `s - t` is the multiset such that
`count a (s - t) = count a s - count a t` for all `a`. -/
protected def sub (s t : multiset α) : multiset α :=
quotient.lift_on₂ s t (λ l₁ l₂, (l₁.diff l₂ : multiset α)) $ λ v₁ v₂ w₁ w₂ p₁ p₂,
quot.sound $ perm_diff_right w₁ p₂ ▸ perm_diff_left _ p₁
instance : has_sub (multiset α) := ⟨multiset.sub⟩
@[simp] theorem coe_sub (s t : list α) : (s - t : multiset α) = (s.diff t : list α) := rfl
theorem sub_eq_fold_erase (s t : multiset α) : s - t = foldl erase erase_comm s t :=
quotient.induction_on₂ s t $ λ l₁ l₂,
show ↑(l₁.diff l₂) = foldl erase erase_comm ↑l₁ ↑l₂,
by rw diff_eq_foldl l₁ l₂; exact foldl_hom _ _ _ _ (λ x y, rfl) _
@[simp] theorem sub_zero (s : multiset α) : s - 0 = s :=
quot.induction_on s $ λ l, rfl
@[simp] theorem sub_cons (a : α) (s t : multiset α) : s - a::t = s.erase a - t :=
quotient.induction_on₂ s t $ λ l₁ l₂, congr_arg coe $ diff_cons _ _ _
theorem add_sub_of_le (h : s ≤ t) : s + (t - s) = t :=
begin
revert t,
refine multiset.induction_on s (by simp) (λ a s IH t h, _),
have := cons_erase (mem_of_le h (mem_cons_self _ _)),
rw [cons_add, sub_cons, IH, this],
exact (cons_le_cons_iff a).1 (this.symm ▸ h)
end
theorem sub_add' : s - (t + u) = s - t - u :=
quotient.induction_on₃ s t u $
λ l₁ l₂ l₃, congr_arg coe $ diff_append _ _ _
theorem sub_add_cancel (h : t ≤ s) : s - t + t = s :=
by rw [add_comm, add_sub_of_le h]
@[simp] theorem add_sub_cancel_left (s : multiset α) : ∀ t, s + t - s = t :=
multiset.induction_on s (by simp)
(λ a s IH t, by rw [cons_add, sub_cons, erase_cons_head, IH])
@[simp] theorem add_sub_cancel (s t : multiset α) : s + t - t = s :=
by rw [add_comm, add_sub_cancel_left]
theorem sub_le_sub_right (h : s ≤ t) (u) : s - u ≤ t - u :=
by revert s t h; exact
multiset.induction_on u (by simp {contextual := tt})
(λ a u IH s t h, by simp [IH, erase_le_erase a h])
theorem sub_le_sub_left (h : s ≤ t) : ∀ u, u - t ≤ u - s :=
le_induction_on h $ λ l₁ l₂ h, begin
induction h with l₁ l₂ a s IH l₁ l₂ a s IH; intro u,
{ refl },
{ rw [← cons_coe, sub_cons],
exact le_trans (sub_le_sub_right (erase_le _ _) _) (IH u) },
{ rw [← cons_coe, sub_cons, ← cons_coe, sub_cons],
exact IH _ }
end
theorem sub_le_iff_le_add : s - t ≤ u ↔ s ≤ u + t :=
by revert s; exact
multiset.induction_on t (by simp)
(λ a t IH s, by simp [IH, erase_le_iff_le_cons])
theorem le_sub_add (s t : multiset α) : s ≤ s - t + t :=
sub_le_iff_le_add.1 (le_refl _)
theorem sub_le_self (s t : multiset α) : s - t ≤ s :=
sub_le_iff_le_add.2 (le_add_right _ _)
@[simp] theorem card_sub {s t : multiset α} (h : t ≤ s) : card (s - t) = card s - card t :=
(nat.sub_eq_of_eq_add $ by rw [add_comm, ← card_add, sub_add_cancel h]).symm
/- union -/
/-- `s ∪ t` is the lattice join operation with respect to the
multiset `≤`. The multiplicity of `a` in `s ∪ t` is the maximum
of the multiplicities in `s` and `t`. -/
def union (s t : multiset α) : multiset α := s - t + t
instance : has_union (multiset α) := ⟨union⟩
theorem union_def (s t : multiset α) : s ∪ t = s - t + t := rfl
theorem le_union_left (s t : multiset α) : s ≤ s ∪ t := le_sub_add _ _
theorem le_union_right (s t : multiset α) : t ≤ s ∪ t := le_add_left _ _
theorem eq_union_left : t ≤ s → s ∪ t = s := sub_add_cancel
theorem union_le_union_right (h : s ≤ t) (u) : s ∪ u ≤ t ∪ u :=
add_le_add_right (sub_le_sub_right h _) u
theorem union_le (h₁ : s ≤ u) (h₂ : t ≤ u) : s ∪ t ≤ u :=
by rw ← eq_union_left h₂; exact union_le_union_right h₁ t
@[simp] theorem mem_union : a ∈ s ∪ t ↔ a ∈ s ∨ a ∈ t :=
⟨λ h, (mem_add.1 h).imp_left (mem_of_le $ sub_le_self _ _),
or.rec (mem_of_le $ le_union_left _ _) (mem_of_le $ le_union_right _ _)⟩
@[simp] theorem map_union [decidable_eq β] {f : α → β} (finj : function.injective f) {s t : multiset α} :
map f (s ∪ t) = map f s ∪ map f t :=
quotient.induction_on₂ s t $ λ l₁ l₂,
congr_arg coe (by rw [list.map_append f, list.map_diff finj])
/- inter -/
/-- `s ∩ t` is the lattice meet operation with respect to the
multiset `≤`. The multiplicity of `a` in `s ∩ t` is the minimum
of the multiplicities in `s` and `t`. -/
def inter (s t : multiset α) : multiset α :=
quotient.lift_on₂ s t (λ l₁ l₂, (l₁.bag_inter l₂ : multiset α)) $ λ v₁ v₂ w₁ w₂ p₁ p₂,
quot.sound $ perm_bag_inter_right w₁ p₂ ▸ perm_bag_inter_left _ p₁
instance : has_inter (multiset α) := ⟨inter⟩
@[simp] theorem inter_zero (s : multiset α) : s ∩ 0 = 0 :=
quot.induction_on s $ λ l, congr_arg coe l.bag_inter_nil
@[simp] theorem zero_inter (s : multiset α) : 0 ∩ s = 0 :=
quot.induction_on s $ λ l, congr_arg coe l.nil_bag_inter
@[simp] theorem cons_inter_of_pos {a} (s : multiset α) {t} :
a ∈ t → (a :: s) ∩ t = a :: s ∩ t.erase a :=
quotient.induction_on₂ s t $ λ l₁ l₂ h,
congr_arg coe $ cons_bag_inter_of_pos _ h
@[simp] theorem cons_inter_of_neg {a} (s : multiset α) {t} :
a ∉ t → (a :: s) ∩ t = s ∩ t :=
quotient.induction_on₂ s t $ λ l₁ l₂ h,
congr_arg coe $ cons_bag_inter_of_neg _ h
theorem inter_le_left (s t : multiset α) : s ∩ t ≤ s :=
quotient.induction_on₂ s t $ λ l₁ l₂,
subperm_of_sublist $ bag_inter_sublist_left _ _
theorem inter_le_right (s : multiset α) : ∀ t, s ∩ t ≤ t :=
multiset.induction_on s (λ t, (zero_inter t).symm ▸ zero_le _) $
λ a s IH t, if h : a ∈ t
then by simpa [h] using cons_le_cons a (IH (t.erase a))
else by simp [h, IH]
theorem le_inter (h₁ : s ≤ t) (h₂ : s ≤ u) : s ≤ t ∩ u :=
begin
revert s u, refine multiset.induction_on t _ (λ a t IH, _); intros,
{ simp [h₁] },
by_cases a ∈ u,
{ rw [cons_inter_of_pos _ h, ← erase_le_iff_le_cons],
exact IH (erase_le_iff_le_cons.2 h₁) (erase_le_erase _ h₂) },
{ rw cons_inter_of_neg _ h,
exact IH ((le_cons_of_not_mem $ mt (mem_of_le h₂) h).1 h₁) h₂ }
end
@[simp] theorem mem_inter : a ∈ s ∩ t ↔ a ∈ s ∧ a ∈ t :=
⟨λ h, ⟨mem_of_le (inter_le_left _ _) h, mem_of_le (inter_le_right _ _) h⟩,
λ ⟨h₁, h₂⟩, by rw [← cons_erase h₁, cons_inter_of_pos _ h₂]; apply mem_cons_self⟩
instance : lattice (multiset α) :=
{ sup := (∪),
sup_le := @union_le _ _,
le_sup_left := le_union_left,
le_sup_right := le_union_right,
inf := (∩),
le_inf := @le_inter _ _,
inf_le_left := inter_le_left,
inf_le_right := inter_le_right,
[email protected]_order α }
@[simp] theorem sup_eq_union (s t : multiset α) : s ⊔ t = s ∪ t := rfl
@[simp] theorem inf_eq_inter (s t : multiset α) : s ⊓ t = s ∩ t := rfl
@[simp] theorem le_inter_iff : s ≤ t ∩ u ↔ s ≤ t ∧ s ≤ u := le_inf_iff
@[simp] theorem union_le_iff : s ∪ t ≤ u ↔ s ≤ u ∧ t ≤ u := sup_le_iff
instance : semilattice_inf_bot (multiset α) :=
{ bot := 0, bot_le := zero_le, ..multiset.lattice.lattice }
theorem union_comm (s t : multiset α) : s ∪ t = t ∪ s := sup_comm
theorem inter_comm (s t : multiset α) : s ∩ t = t ∩ s := inf_comm
theorem eq_union_right (h : s ≤ t) : s ∪ t = t :=
by rw [union_comm, eq_union_left h]
theorem union_le_union_left (h : s ≤ t) (u) : u ∪ s ≤ u ∪ t :=
sup_le_sup_left h _
theorem union_le_add (s t : multiset α) : s ∪ t ≤ s + t :=
union_le (le_add_right _ _) (le_add_left _ _)
theorem union_add_distrib (s t u : multiset α) : (s ∪ t) + u = (s + u) ∪ (t + u) :=
by simpa [(∪), union, eq_comm] using show s + u - (t + u) = s - t,
by rw [add_comm t, sub_add', add_sub_cancel]
theorem add_union_distrib (s t u : multiset α) : s + (t ∪ u) = (s + t) ∪ (s + u) :=
by rw [add_comm, union_add_distrib, add_comm s, add_comm s]
theorem cons_union_distrib (a : α) (s t : multiset α) : a :: (s ∪ t) = (a :: s) ∪ (a :: t) :=
by simpa using add_union_distrib (a::0) s t
theorem inter_add_distrib (s t u : multiset α) : (s ∩ t) + u = (s + u) ∩ (t + u) :=
begin
by_contra h,
cases lt_iff_cons_le.1 (lt_of_le_of_ne (le_inter
(add_le_add_right (inter_le_left s t) u)
(add_le_add_right (inter_le_right s t) u)) h) with a hl,
rw ← cons_add at hl,
exact not_le_of_lt (lt_cons_self (s ∩ t) a) (le_inter
(le_of_add_le_add_right (le_trans hl (inter_le_left _ _)))
(le_of_add_le_add_right (le_trans hl (inter_le_right _ _))))
end
theorem add_inter_distrib (s t u : multiset α) : s + (t ∩ u) = (s + t) ∩ (s + u) :=
by rw [add_comm, inter_add_distrib, add_comm s, add_comm s]
theorem cons_inter_distrib (a : α) (s t : multiset α) : a :: (s ∩ t) = (a :: s) ∩ (a :: t) :=
by simp
theorem union_add_inter (s t : multiset α) : s ∪ t + s ∩ t = s + t :=
begin
apply le_antisymm,
{ rw union_add_distrib,
refine union_le (add_le_add_left (inter_le_right _ _) _) _,
rw add_comm, exact add_le_add_right (inter_le_left _ _) _ },
{ rw [add_comm, add_inter_distrib],
refine le_inter (add_le_add_right (le_union_right _ _) _) _,
rw add_comm, exact add_le_add_right (le_union_left _ _) _ }
end
theorem sub_add_inter (s t : multiset α) : s - t + s ∩ t = s :=
begin
rw [inter_comm],
revert s, refine multiset.induction_on t (by simp) (λ a t IH s, _),
by_cases a ∈ s,
{ rw [cons_inter_of_pos _ h, sub_cons, add_cons, IH, cons_erase h] },
{ rw [cons_inter_of_neg _ h, sub_cons, erase_of_not_mem h, IH] }
end
theorem sub_inter (s t : multiset α) : s - (s ∩ t) = s - t :=
add_right_cancel $
by rw [sub_add_inter s t, sub_add_cancel (inter_le_left _ _)]
end
/- filter -/
section
variables {p : α → Prop} [decidable_pred p]
/-- `filter p s` returns the elements in `s` (with the same multiplicities)
which satisfy `p`, and removes the rest. -/
def filter (p : α → Prop) [h : decidable_pred p] (s : multiset α) : multiset α :=
quot.lift_on s (λ l, (filter p l : multiset α))
(λ l₁ l₂ h, quot.sound $ perm_filter p h)
@[simp] theorem coe_filter (p : α → Prop) [h : decidable_pred p]
(l : list α) : filter p (↑l) = l.filter p := rfl
@[simp] theorem filter_zero (p : α → Prop) [h : decidable_pred p] : filter p 0 = 0 := rfl
@[simp] theorem filter_cons_of_pos {a : α} (s) : p a → filter p (a::s) = a :: filter p s :=
quot.induction_on s $ λ l h, congr_arg coe $ filter_cons_of_pos l h
@[simp] theorem filter_cons_of_neg {a : α} (s) : ¬ p a → filter p (a::s) = filter p s :=
quot.induction_on s $ λ l h, @congr_arg _ _ _ _ coe $ filter_cons_of_neg l h
lemma filter_congr {p q : α → Prop} [decidable_pred p] [decidable_pred q]
{s : multiset α} : (∀ x ∈ s, p x ↔ q x) → filter p s = filter q s :=
quot.induction_on s $ λ l h, congr_arg coe $ filter_congr h
@[simp] theorem filter_add (s t : multiset α) :
filter p (s + t) = filter p s + filter p t :=
quotient.induction_on₂ s t $ λ l₁ l₂, congr_arg coe $ filter_append _ _
@[simp] theorem filter_le (s : multiset α) : filter p s ≤ s :=
quot.induction_on s $ λ l, subperm_of_sublist $ filter_sublist _
@[simp] theorem filter_subset (s : multiset α) : filter p s ⊆ s :=
subset_of_le $ filter_le _
@[simp] theorem mem_filter {a : α} {s} : a ∈ filter p s ↔ a ∈ s ∧ p a :=
quot.induction_on s $ λ l, mem_filter
theorem of_mem_filter {a : α} {s} (h : a ∈ filter p s) : p a :=
(mem_filter.1 h).2
theorem mem_of_mem_filter {a : α} {s} (h : a ∈ filter p s) : a ∈ s :=
(mem_filter.1 h).1
theorem mem_filter_of_mem {a : α} {l} (m : a ∈ l) (h : p a) : a ∈ filter p l :=
mem_filter.2 ⟨m, h⟩
theorem filter_eq_self {s} : filter p s = s ↔ ∀ a ∈ s, p a :=
quot.induction_on s $ λ l, iff.trans ⟨λ h,
eq_of_sublist_of_length_eq (filter_sublist _) (@congr_arg _ _ _ _ card h),
congr_arg coe⟩ filter_eq_self
theorem filter_eq_nil {s} : filter p s = 0 ↔ ∀ a ∈ s, ¬p a :=
quot.induction_on s $ λ l, iff.trans ⟨λ h,
eq_nil_of_length_eq_zero (@congr_arg _ _ _ _ card h),
congr_arg coe⟩ filter_eq_nil
theorem filter_le_filter {s t} (h : s ≤ t) : filter p s ≤ filter p t :=
le_induction_on h $ λ l₁ l₂ h, subperm_of_sublist $ filter_sublist_filter h
theorem le_filter {s t} : s ≤ filter p t ↔ s ≤ t ∧ ∀ a ∈ s, p a :=
⟨λ h, ⟨le_trans h (filter_le _), λ a m, of_mem_filter (mem_of_le h m)⟩,
λ ⟨h, al⟩, filter_eq_self.2 al ▸ filter_le_filter h⟩
@[simp] theorem filter_sub [decidable_eq α] (s t : multiset α) :
filter p (s - t) = filter p s - filter p t :=
begin
revert s, refine multiset.induction_on t (by simp) (λ a t IH s, _),
rw [sub_cons, IH],
by_cases p a,
{ rw [filter_cons_of_pos _ h, sub_cons], congr,
by_cases m : a ∈ s,
{ rw [← cons_inj_right a, ← filter_cons_of_pos _ h,
cons_erase (mem_filter_of_mem m h), cons_erase m] },
{ rw [erase_of_not_mem m, erase_of_not_mem (mt mem_of_mem_filter m)] } },
{ rw [filter_cons_of_neg _ h],
by_cases m : a ∈ s,
{ rw [(by rw filter_cons_of_neg _ h : filter p (erase s a) = filter p (a :: erase s a)),
cons_erase m] },
{ rw [erase_of_not_mem m] } }
end
@[simp] theorem filter_union [decidable_eq α] (s t : multiset α) :
filter p (s ∪ t) = filter p s ∪ filter p t :=
by simp [(∪), union]
@[simp] theorem filter_inter [decidable_eq α] (s t : multiset α) :
filter p (s ∩ t) = filter p s ∩ filter p t :=
le_antisymm (le_inter
(filter_le_filter $ inter_le_left _ _)
(filter_le_filter $ inter_le_right _ _)) $ le_filter.2
⟨inf_le_inf (filter_le _) (filter_le _),
λ a h, of_mem_filter (mem_of_le (inter_le_left _ _) h)⟩
@[simp] theorem filter_filter {q} [decidable_pred q] (s : multiset α) :
filter p (filter q s) = filter (λ a, p a ∧ q a) s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_filter l
theorem filter_add_filter {q} [decidable_pred q] (s : multiset α) :
filter p s + filter q s = filter (λ a, p a ∨ q a) s + filter (λ a, p a ∧ q a) s :=
multiset.induction_on s rfl $ λ a s IH,
by by_cases p a; by_cases q a; simp *
theorem filter_add_not (s : multiset α) :
filter p s + filter (λ a, ¬ p a) s = s :=
by rw [filter_add_filter, filter_eq_self.2, filter_eq_nil.2]; simp [decidable.em]
/- filter_map -/
/-- `filter_map f s` is a combination filter/map operation on `s`.
The function `f : α → option β` is applied to each element of `s`;
if `f a` is `some b` then `b` is added to the result, otherwise
`a` is removed from the resulting multiset. -/
def filter_map (f : α → option β) (s : multiset α) : multiset β :=
quot.lift_on s (λ l, (filter_map f l : multiset β))
(λ l₁ l₂ h, quot.sound $perm_filter_map f h)
@[simp] theorem coe_filter_map (f : α → option β) (l : list α) : filter_map f l = l.filter_map f := rfl
@[simp] theorem filter_map_zero (f : α → option β) : filter_map f 0 = 0 := rfl
@[simp] theorem filter_map_cons_none {f : α → option β} (a : α) (s : multiset α) (h : f a = none) :
filter_map f (a :: s) = filter_map f s :=
quot.induction_on s $ λ l, @congr_arg _ _ _ _ coe $ filter_map_cons_none a l h
@[simp] theorem filter_map_cons_some (f : α → option β)
(a : α) (s : multiset α) {b : β} (h : f a = some b) :
filter_map f (a :: s) = b :: filter_map f s :=
quot.induction_on s $ λ l, @congr_arg _ _ _ _ coe $ filter_map_cons_some f a l h
theorem filter_map_eq_map (f : α → β) : filter_map (some ∘ f) = map f :=
funext $ λ s, quot.induction_on s $ λ l,
@congr_arg _ _ _ _ coe $ congr_fun (filter_map_eq_map f) l
theorem filter_map_eq_filter (p : α → Prop) [decidable_pred p] :
filter_map (option.guard p) = filter p :=
funext $ λ s, quot.induction_on s $ λ l,
@congr_arg _ _ _ _ coe $ congr_fun (filter_map_eq_filter p) l
theorem filter_map_filter_map (f : α → option β) (g : β → option γ) (s : multiset α) :
filter_map g (filter_map f s) = filter_map (λ x, (f x).bind g) s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_map_filter_map f g l
theorem map_filter_map (f : α → option β) (g : β → γ) (s : multiset α) :
map g (filter_map f s) = filter_map (λ x, (f x).map g) s :=
quot.induction_on s $ λ l, congr_arg coe $ map_filter_map f g l
theorem filter_map_map (f : α → β) (g : β → option γ) (s : multiset α) :
filter_map g (map f s) = filter_map (g ∘ f) s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_map_map f g l
theorem filter_filter_map (f : α → option β) (p : β → Prop) [decidable_pred p] (s : multiset α) :
filter p (filter_map f s) = filter_map (λ x, (f x).filter p) s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_filter_map f p l
theorem filter_map_filter (p : α → Prop) [decidable_pred p] (f : α → option β) (s : multiset α) :
filter_map f (filter p s) = filter_map (λ x, if p x then f x else none) s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_map_filter p f l
@[simp] theorem filter_map_some (s : multiset α) : filter_map some s = s :=
quot.induction_on s $ λ l, congr_arg coe $ filter_map_some l
@[simp] theorem mem_filter_map (f : α → option β) (s : multiset α) {b : β} :
b ∈ filter_map f s ↔ ∃ a, a ∈ s ∧ f a = some b :=
quot.induction_on s $ λ l, mem_filter_map f l
theorem map_filter_map_of_inv (f : α → option β) (g : β → α)
(H : ∀ x : α, (f x).map g = some x) (s : multiset α) :
map g (filter_map f s) = s :=
quot.induction_on s $ λ l, congr_arg coe $ map_filter_map_of_inv f g H l
theorem filter_map_le_filter_map (f : α → option β) {s t : multiset α}
(h : s ≤ t) : filter_map f s ≤ filter_map f t :=
le_induction_on h $ λ l₁ l₂ h,
subperm_of_sublist $ filter_map_sublist_filter_map _ h
/- powerset -/
def powerset_aux (l : list α) : list (multiset α) :=
0 :: sublists_aux l (λ x y, x :: y)
theorem powerset_aux_eq_map_coe {l : list α} :
powerset_aux l = (sublists l).map coe :=
by simp [powerset_aux, sublists];
rw [← show @sublists_aux₁ α (multiset α) l (λ x, [↑x]) =
sublists_aux l (λ x, list.cons ↑x),
from sublists_aux₁_eq_sublists_aux _ _,
sublists_aux_cons_eq_sublists_aux₁,
← bind_ret_eq_map, sublists_aux₁_bind]; refl
@[simp] theorem mem_powerset_aux {l : list α} {s} :
s ∈ powerset_aux l ↔ s ≤ ↑l :=
quotient.induction_on s $
by simp [powerset_aux_eq_map_coe, subperm, and.comm]
def powerset_aux' (l : list α) : list (multiset α) := (sublists' l).map coe
theorem powerset_aux_perm_powerset_aux' {l : list α} :
powerset_aux l ~ powerset_aux' l :=
by rw powerset_aux_eq_map_coe; exact
perm_map _ (sublists_perm_sublists' _)
@[simp] theorem powerset_aux'_nil : powerset_aux' (@nil α) = [0] := rfl
@[simp] theorem powerset_aux'_cons (a : α) (l : list α) :
powerset_aux' (a::l) = powerset_aux' l ++ list.map (cons a) (powerset_aux' l) :=
by simp [powerset_aux']; refl
theorem powerset_aux'_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
powerset_aux' l₁ ~ powerset_aux' l₂ :=
begin
induction p with a l₁ l₂ p IH a b l l₁ l₂ l₃ p₁ p₂ IH₁ IH₂, {simp},
{ simp, exact perm_app IH (perm_map _ IH) },
{ simp, apply perm_app_right,
rw [← append_assoc, ← append_assoc,
(by funext s; simp [cons_swap] : cons b ∘ cons a = cons a ∘ cons b)],
exact perm_app_left _ perm_app_comm },
{ exact IH₁.trans IH₂ }
end
theorem powerset_aux_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
powerset_aux l₁ ~ powerset_aux l₂ :=
powerset_aux_perm_powerset_aux'.trans $
(powerset_aux'_perm p).trans powerset_aux_perm_powerset_aux'.symm
def powerset (s : multiset α) : multiset (multiset α) :=
quot.lift_on s
(λ l, (powerset_aux l : multiset (multiset α)))
(λ l₁ l₂ h, quot.sound (powerset_aux_perm h))
theorem powerset_coe (l : list α) :
@powerset α l = ((sublists l).map coe : list (multiset α)) :=
congr_arg coe powerset_aux_eq_map_coe
@[simp] theorem powerset_coe' (l : list α) :
@powerset α l = ((sublists' l).map coe : list (multiset α)) :=
quot.sound powerset_aux_perm_powerset_aux'
@[simp] theorem powerset_zero : @powerset α 0 = 0::0 := rfl
@[simp] theorem powerset_cons (a : α) (s) :
powerset (a::s) = powerset s + map (cons a) (powerset s) :=
quotient.induction_on s $ λ l, by simp; refl
@[simp] theorem mem_powerset {s t : multiset α} :
s ∈ powerset t ↔ s ≤ t :=
quotient.induction_on₂ s t $ by simp [subperm, and.comm]
theorem map_single_le_powerset (s : multiset α) :
s.map (λ a, a::0) ≤ powerset s :=
quotient.induction_on s $ λ l, begin
simp [powerset_coe],
show l.map (coe ∘ list.ret) <+~ (sublists l).map coe,
rw ← list.map_map,
exact subperm_of_sublist
(map_sublist_map _ (map_ret_sublist_sublists _))
end
@[simp] theorem card_powerset (s : multiset α) :
card (powerset s) = 2 ^ card s :=
quotient.induction_on s $ by simp
/- diagonal -/
theorem revzip_powerset_aux {l : list α} ⦃s t⦄
(h : (s, t) ∈ revzip (powerset_aux l)) : s + t = ↑l :=
begin
rw [revzip, powerset_aux_eq_map_coe, ← map_reverse, zip_map, ← revzip] at h,
simp at h, rcases h with ⟨l₁, l₂, h, rfl, rfl⟩,
exact quot.sound (revzip_sublists _ _ _ h)
end
theorem revzip_powerset_aux' {l : list α} ⦃s t⦄
(h : (s, t) ∈ revzip (powerset_aux' l)) : s + t = ↑l :=
begin
rw [revzip, powerset_aux', ← map_reverse, zip_map, ← revzip] at h,
simp at h, rcases h with ⟨l₁, l₂, h, rfl, rfl⟩,
exact quot.sound (revzip_sublists' _ _ _ h)
end
theorem revzip_powerset_aux_lemma [decidable_eq α] (l : list α)
{l' : list (multiset α)} (H : ∀ ⦃s t⦄, (s, t) ∈ revzip l' → s + t = ↑l) :
revzip l' = l'.map (λ x, (x, ↑l - x)) :=
begin
have : forall₂ (λ (p : multiset α × multiset α) (s : multiset α), p = (s, ↑l - s))
(revzip l') ((revzip l').map prod.fst),
{ rw forall₂_map_right_iff,
apply forall₂_same, rintro ⟨s, t⟩ h,
dsimp, rw [← H h, add_sub_cancel_left] },
rw [← forall₂_eq_eq_eq, forall₂_map_right_iff], simpa
end
theorem revzip_powerset_aux_perm_aux' {l : list α} :
revzip (powerset_aux l) ~ revzip (powerset_aux' l) :=
begin
haveI := classical.dec_eq α,
rw [revzip_powerset_aux_lemma l revzip_powerset_aux,
revzip_powerset_aux_lemma l revzip_powerset_aux'],
exact perm_map _ powerset_aux_perm_powerset_aux',
end
theorem revzip_powerset_aux_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
revzip (powerset_aux l₁) ~ revzip (powerset_aux l₂) :=
begin
haveI := classical.dec_eq α,
simp [λ l:list α, revzip_powerset_aux_lemma l revzip_powerset_aux, coe_eq_coe.2 p],
exact perm_map _ (powerset_aux_perm p)
end
def diagonal (s : multiset α) : multiset (multiset α × multiset α) :=
quot.lift_on s
(λ l, (revzip (powerset_aux l) : multiset (multiset α × multiset α)))
(λ l₁ l₂ h, quot.sound (revzip_powerset_aux_perm h))
theorem diagonal_coe (l : list α) :
@diagonal α l = revzip (powerset_aux l) := rfl
@[simp] theorem diagonal_coe' (l : list α) :
@diagonal α l = revzip (powerset_aux' l) :=
quot.sound revzip_powerset_aux_perm_aux'
@[simp] theorem mem_diagonal {s₁ s₂ t : multiset α} :
(s₁, s₂) ∈ diagonal t ↔ s₁ + s₂ = t :=
quotient.induction_on t $ λ l, begin
simp [diagonal_coe], refine ⟨λ h, revzip_powerset_aux h, λ h, _⟩,
haveI := classical.dec_eq α,
simp [revzip_powerset_aux_lemma l revzip_powerset_aux, h.symm],
exact ⟨_, le_add_right _ _, rfl, add_sub_cancel_left _ _⟩
end
@[simp] theorem diagonal_map_fst (s : multiset α) :
(diagonal s).map prod.fst = powerset s :=
quotient.induction_on s $ λ l,
by simp [powerset_aux']
@[simp] theorem diagonal_map_snd (s : multiset α) :
(diagonal s).map prod.snd = powerset s :=
quotient.induction_on s $ λ l,
by simp [powerset_aux']
@[simp] theorem diagonal_zero : @diagonal α 0 = (0, 0)::0 := rfl
@[simp] theorem diagonal_cons (a : α) (s) : diagonal (a::s) =
map (prod.map id (cons a)) (diagonal s) +
map (prod.map (cons a) id) (diagonal s) :=
quotient.induction_on s $ λ l, begin
simp [revzip, reverse_append],
rw [← zip_map, ← zip_map, zip_append, (_ : _++_=_)],
{congr; simp}, {simp}
end
@[simp] theorem card_diagonal (s : multiset α) :
card (diagonal s) = 2 ^ card s :=
by have := card_powerset s;
rwa [← diagonal_map_fst, card_map] at this
lemma prod_map_add [comm_semiring β] {s : multiset α} {f g : α → β} :
prod (s.map (λa, f a + g a)) = sum ((diagonal s).map (λp, (p.1.map f).prod * (p.2.map g).prod)) :=
begin
refine s.induction_on _ _,
{ simp },
{ assume a s ih, simp [ih, add_mul, mul_comm, mul_left_comm, mul_assoc, sum_map_mul_left.symm] },
end
/- countp -/
/-- `countp p s` counts the number of elements of `s` (with multiplicity) that
satisfy `p`. -/
def countp (p : α → Prop) [decidable_pred p] (s : multiset α) : ℕ :=
quot.lift_on s (countp p) (λ l₁ l₂, perm_countp p)
@[simp] theorem coe_countp (l : list α) : countp p l = l.countp p := rfl
@[simp] theorem countp_zero (p : α → Prop) [decidable_pred p] : countp p 0 = 0 := rfl
@[simp] theorem countp_cons_of_pos {a : α} (s) : p a → countp p (a::s) = countp p s + 1 :=
quot.induction_on s countp_cons_of_pos
@[simp] theorem countp_cons_of_neg {a : α} (s) : ¬ p a → countp p (a::s) = countp p s :=
quot.induction_on s countp_cons_of_neg
theorem countp_eq_card_filter (s) : countp p s = card (filter p s) :=
quot.induction_on s $ λ l, countp_eq_length_filter _
@[simp] theorem countp_add (s t) : countp p (s + t) = countp p s + countp p t :=
by simp [countp_eq_card_filter]
instance countp.is_add_monoid_hom : is_add_monoid_hom (countp p : multiset α → ℕ) :=
by refine_struct {..}; simp
theorem countp_pos {s} : 0 < countp p s ↔ ∃ a ∈ s, p a :=
by simp [countp_eq_card_filter, card_pos_iff_exists_mem]
@[simp] theorem countp_sub [decidable_eq α] {s t : multiset α} (h : t ≤ s) :
countp p (s - t) = countp p s - countp p t :=
by simp [countp_eq_card_filter, h, filter_le_filter]
theorem countp_pos_of_mem {s a} (h : a ∈ s) (pa : p a) : 0 < countp p s :=
countp_pos.2 ⟨_, h, pa⟩
theorem countp_le_of_le {s t} (h : s ≤ t) : countp p s ≤ countp p t :=
by simpa [countp_eq_card_filter] using card_le_of_le (filter_le_filter h)
@[simp] theorem countp_filter {q} [decidable_pred q] (s : multiset α) :
countp p (filter q s) = countp (λ a, p a ∧ q a) s :=
by simp [countp_eq_card_filter]
end
/- count -/
section
variable [decidable_eq α]
/-- `count a s` is the multiplicity of `a` in `s`. -/
def count (a : α) : multiset α → ℕ := countp (eq a)
@[simp] theorem coe_count (a : α) (l : list α) : count a (↑l) = l.count a := coe_countp _
@[simp] theorem count_zero (a : α) : count a 0 = 0 := rfl
@[simp] theorem count_cons_self (a : α) (s : multiset α) : count a (a::s) = succ (count a s) :=
countp_cons_of_pos _ rfl
@[simp] theorem count_cons_of_ne {a b : α} (h : a ≠ b) (s : multiset α) : count a (b::s) = count a s :=
countp_cons_of_neg _ h
theorem count_le_of_le (a : α) {s t} : s ≤ t → count a s ≤ count a t :=
countp_le_of_le
theorem count_le_count_cons (a b : α) (s : multiset α) : count a s ≤ count a (b :: s) :=
count_le_of_le _ (le_cons_self _ _)
theorem count_singleton (a : α) : count a (a::0) = 1 :=
by simp
@[simp] theorem count_add (a : α) : ∀ s t, count a (s + t) = count a s + count a t :=
countp_add
instance count.is_add_monoid_hom (a : α) : is_add_monoid_hom (count a : multiset α → ℕ) :=
countp.is_add_monoid_hom
@[simp] theorem count_smul (a : α) (n s) : count a (n • s) = n * count a s :=
by induction n; simp [*, succ_smul', succ_mul]
theorem count_pos {a : α} {s : multiset α} : 0 < count a s ↔ a ∈ s :=
by simp [count, countp_pos]
@[simp] theorem count_eq_zero_of_not_mem {a : α} {s : multiset α} (h : a ∉ s) : count a s = 0 :=
by_contradiction $ λ h', h $ count_pos.1 (nat.pos_of_ne_zero h')
theorem count_eq_zero {a : α} {s : multiset α} : count a s = 0 ↔ a ∉ s :=
iff_not_comm.1 $ count_pos.symm.trans pos_iff_ne_zero
@[simp] theorem count_repeat (a : α) (n : ℕ) : count a (repeat a n) = n :=
by simp [repeat]
@[simp] theorem count_erase_self (a : α) (s : multiset α) : count a (erase s a) = pred (count a s) :=
begin
by_cases a ∈ s,
{ rw [(by rw cons_erase h : count a s = count a (a::erase s a)),
count_cons_self]; refl },
{ rw [erase_of_not_mem h, count_eq_zero.2 h]; refl }
end
@[simp] theorem count_erase_of_ne {a b : α} (ab : a ≠ b) (s : multiset α) : count a (erase s b) = count a s :=
begin
by_cases b ∈ s,
{ rw [← count_cons_of_ne ab, cons_erase h] },
{ rw [erase_of_not_mem h] }
end
@[simp] theorem count_sub (a : α) (s t : multiset α) : count a (s - t) = count a s - count a t :=
begin
revert s, refine multiset.induction_on t (by simp) (λ b t IH s, _),
rw [sub_cons, IH],
by_cases ab : a = b,
{ subst b, rw [count_erase_self, count_cons_self, sub_succ, pred_sub] },
{ rw [count_erase_of_ne ab, count_cons_of_ne ab] }
end
@[simp] theorem count_union (a : α) (s t : multiset α) : count a (s ∪ t) = max (count a s) (count a t) :=
by simp [(∪), union, sub_add_eq_max, -add_comm]
@[simp] theorem count_inter (a : α) (s t : multiset α) : count a (s ∩ t) = min (count a s) (count a t) :=
begin
apply @nat.add_left_cancel (count a (s - t)),
rw [← count_add, sub_add_inter, count_sub, sub_add_min],
end
lemma count_bind {m : multiset β} {f : β → multiset α} {a : α} :
count a (bind m f) = sum (m.map $ λb, count a $ f b) :=
multiset.induction_on m (by simp) (by simp)
theorem le_count_iff_repeat_le {a : α} {s : multiset α} {n : ℕ} : n ≤ count a s ↔ repeat a n ≤ s :=
quot.induction_on s $ λ l, le_count_iff_repeat_sublist.trans repeat_le_coe.symm
@[simp] theorem count_filter {p} [decidable_pred p]
{a} {s : multiset α} (h : p a) : count a (filter p s) = count a s :=
quot.induction_on s $ λ l, count_filter h
theorem ext {s t : multiset α} : s = t ↔ ∀ a, count a s = count a t :=
quotient.induction_on₂ s t $ λ l₁ l₂, quotient.eq.trans perm_iff_count
@[extensionality]
theorem ext' {s t : multiset α} : (∀ a, count a s = count a t) → s = t :=
ext.2
@[simp] theorem coe_inter (s t : list α) : (s ∩ t : multiset α) = (s.bag_inter t : list α) :=
by ext; simp
theorem le_iff_count {s t : multiset α} : s ≤ t ↔ ∀ a, count a s ≤ count a t :=
⟨λ h a, count_le_of_le a h, λ al,
by rw ← (ext.2 (λ a, by simp [max_eq_right (al a)]) : s ∪ t = t);
apply le_union_left⟩
instance : distrib_lattice (multiset α) :=
{ le_sup_inf := λ s t u, le_of_eq $ eq.symm $
ext.2 $ λ a, by simp [max_min_distrib_left],
..multiset.lattice.lattice }
instance : semilattice_sup_bot (multiset α) :=
{ bot := 0,
bot_le := zero_le,
..multiset.lattice.lattice }
end
/- relator -/
section rel
/-- `rel r s t` -- lift the relation `r` between two elements to a relation between `s` and `t`,
s.t. there is a one-to-one mapping betweem elements in `s` and `t` following `r`. -/
inductive rel (r : α → β → Prop) : multiset α → multiset β → Prop
| zero {} : rel 0 0
| cons {a b as bs} : r a b → rel as bs → rel (a :: as) (b :: bs)
run_cmd tactic.mk_iff_of_inductive_prop `multiset.rel `multiset.rel_iff
variables {δ : Type*} {r : α → β → Prop} {p : γ → δ → Prop}
private lemma rel_flip_aux {s t} (h : rel r s t) : rel (flip r) t s :=
rel.rec_on h rel.zero (assume _ _ _ _ h₀ h₁ ih, rel.cons h₀ ih)
lemma rel_flip {s t} : rel (flip r) s t ↔ rel r t s :=
⟨rel_flip_aux, rel_flip_aux⟩
lemma rel_eq_refl {s : multiset α} : rel (=) s s :=
multiset.induction_on s rel.zero (assume a s, rel.cons rfl)
lemma rel_eq {s t : multiset α} : rel (=) s t ↔ s = t :=
begin
split,
{ assume h, induction h; simp * },
{ assume h, subst h, exact rel_eq_refl }
end
lemma rel.mono {p : α → β → Prop} {s t} (h : ∀a b, r a b → p a b) (hst : rel r s t) : rel p s t :=
begin
induction hst,
case rel.zero { exact rel.zero },
case rel.cons : a b s t hab hst ih { exact ih.cons (h a b hab) }
end
lemma rel.add {s t u v} (hst : rel r s t) (huv : rel r u v) : rel r (s + u) (t + v) :=
begin
induction hst,
case rel.zero { simpa using huv },
case rel.cons : a b s t hab hst ih { simpa using ih.cons hab }
end
lemma rel_flip_eq {s t : multiset α} : rel (λa b, b = a) s t ↔ s = t :=
show rel (flip (=)) s t ↔ s = t, by rw [rel_flip, rel_eq, eq_comm]
@[simp] lemma rel_zero_left {b : multiset β} : rel r 0 b ↔ b = 0 :=
by rw [rel_iff]; simp
@[simp] lemma rel_zero_right {a : multiset α} : rel r a 0 ↔ a = 0 :=
by rw [rel_iff]; simp
lemma rel_cons_left {a as bs} :
rel r (a :: as) bs ↔ (∃b bs', r a b ∧ rel r as bs' ∧ bs = b :: bs') :=
begin
split,
{ generalize hm : a :: as = m,
assume h,
induction h generalizing as,
case rel.zero { simp at hm, contradiction },
case rel.cons : a' b as' bs ha'b h ih {
rcases cons_eq_cons.1 hm with ⟨eq₁, eq₂⟩ | ⟨h, cs, eq₁, eq₂⟩,
{ subst eq₁, subst eq₂, exact ⟨b, bs, ha'b, h, rfl⟩ },
{ rcases ih eq₂.symm with ⟨b', bs', h₁, h₂, eq⟩,
exact ⟨b', b::bs', h₁, eq₁.symm ▸ rel.cons ha'b h₂, eq.symm ▸ cons_swap _ _ _⟩ }
} },
{ exact assume ⟨b, bs', hab, h, eq⟩, eq.symm ▸ rel.cons hab h }
end
lemma rel_cons_right {as b bs} :
rel r as (b :: bs) ↔ (∃a as', r a b ∧ rel r as' bs ∧ as = a :: as') :=
begin
rw [← rel_flip, rel_cons_left],
apply exists_congr, assume a,
apply exists_congr, assume as',
rw [rel_flip, flip]
end
lemma rel_add_left {as₀ as₁} :
∀{bs}, rel r (as₀ + as₁) bs ↔ (∃bs₀ bs₁, rel r as₀ bs₀ ∧ rel r as₁ bs₁ ∧ bs = bs₀ + bs₁) :=
multiset.induction_on as₀ (by simp)
begin
assume a s ih bs,
simp only [ih, cons_add, rel_cons_left],
split,
{ assume h,
rcases h with ⟨b, bs', hab, h, rfl⟩,
rcases h with ⟨bs₀, bs₁, h₀, h₁, rfl⟩,
exact ⟨b :: bs₀, bs₁, ⟨b, bs₀, hab, h₀, rfl⟩, h₁, by simp⟩ },
{ assume h,
rcases h with ⟨bs₀, bs₁, h, h₁, rfl⟩,
rcases h with ⟨b, bs, hab, h₀, rfl⟩,
exact ⟨b, bs + bs₁, hab, ⟨bs, bs₁, h₀, h₁, rfl⟩, by simp⟩ }
end
lemma rel_add_right {as bs₀ bs₁} :
rel r as (bs₀ + bs₁) ↔ (∃as₀ as₁, rel r as₀ bs₀ ∧ rel r as₁ bs₁ ∧ as = as₀ + as₁) :=
by rw [← rel_flip, rel_add_left]; simp [rel_flip]
lemma rel_map_left {s : multiset γ} {f : γ → α} :
∀{t}, rel r (s.map f) t ↔ rel (λa b, r (f a) b) s t :=
multiset.induction_on s (by simp) (by simp [rel_cons_left] {contextual := tt})
lemma rel_map_right {s : multiset α} {t : multiset γ} {f : γ → β} :
rel r s (t.map f) ↔ rel (λa b, r a (f b)) s t :=
by rw [← rel_flip, rel_map_left, ← rel_flip]; refl
lemma rel_join {s t} (h : rel (rel r) s t) : rel r s.join t.join :=
begin
induction h,
case rel.zero { simp },
case rel.cons : a b s t hab hst ih { simpa using hab.add ih }
end
lemma rel_map {p : γ → δ → Prop} {s t} {f : α → γ} {g : β → δ} (h : (r ⇒ p) f g) (hst : rel r s t) :
rel p (s.map f) (t.map g) :=
by rw [rel_map_left, rel_map_right]; exact hst.mono (assume a b, h)
lemma rel_bind {p : γ → δ → Prop} {s t} {f : α → multiset γ} {g : β → multiset δ}
(h : (r ⇒ rel p) f g) (hst : rel r s t) :
rel p (s.bind f) (t.bind g) :=
by apply rel_join; apply rel_map; assumption
lemma card_eq_card_of_rel {r : α → β → Prop} {s : multiset α} {t : multiset β} (h : rel r s t) :
card s = card t :=
by induction h; simp [*]
lemma exists_mem_of_rel_of_mem {r : α → β → Prop} {s : multiset α} {t : multiset β} (h : rel r s t) :
∀ {a : α} (ha : a ∈ s), ∃ b ∈ t, r a b :=
begin
induction h with x y s t hxy hst ih,
{ simp },
{ assume a ha,
cases mem_cons.1 ha with ha ha,
{ exact ⟨y, mem_cons_self _ _, ha.symm ▸ hxy⟩ },
{ rcases ih ha with ⟨b, hbt, hab⟩,
exact ⟨b, mem_cons.2 (or.inr hbt), hab⟩ } }
end
end rel
section map
theorem map_eq_map {f : α → β} (hf : function.injective f) {s t : multiset α} :
s.map f = t.map f ↔ s = t :=
by rw [← rel_eq, ← rel_eq, rel_map_left, rel_map_right]; simp [hf.eq_iff]
theorem injective_map {f : α → β} (hf : function.injective f) :
function.injective (multiset.map f) :=
assume x y, (map_eq_map hf).1
end map
section quot
theorem map_mk_eq_map_mk_of_rel {r : α → α → Prop} {s t : multiset α} (hst : s.rel r t) :
s.map (quot.mk r) = t.map (quot.mk r) :=
rel.rec_on hst rfl $ assume a b s t hab hst ih, by simp [ih, quot.sound hab]
theorem exists_multiset_eq_map_quot_mk {r : α → α → Prop} (s : multiset (quot r)) :
∃t:multiset α, s = t.map (quot.mk r) :=
multiset.induction_on s ⟨0, rfl⟩ $
assume a s ⟨t, ht⟩, quot.induction_on a $ assume a, ht.symm ▸ ⟨a::t, (map_cons _ _ _).symm⟩
theorem induction_on_multiset_quot
{r : α → α → Prop} {p : multiset (quot r) → Prop} (s : multiset (quot r)) :
(∀s:multiset α, p (s.map (quot.mk r))) → p s :=
match s, exists_multiset_eq_map_quot_mk s with _, ⟨t, rfl⟩ := assume h, h _ end
end quot
/- disjoint -/
/-- `disjoint s t` means that `s` and `t` have no elements in common. -/
def disjoint (s t : multiset α) : Prop := ∀ ⦃a⦄, a ∈ s → a ∈ t → false
@[simp] theorem coe_disjoint (l₁ l₂ : list α) : @disjoint α l₁ l₂ ↔ l₁.disjoint l₂ := iff.rfl
theorem disjoint.symm {s t : multiset α} (d : disjoint s t) : disjoint t s
| a i₂ i₁ := d i₁ i₂
@[simp] theorem disjoint_comm {s t : multiset α} : disjoint s t ↔ disjoint t s :=
⟨disjoint.symm, disjoint.symm⟩
theorem disjoint_left {s t : multiset α} : disjoint s t ↔ ∀ {a}, a ∈ s → a ∉ t := iff.rfl
theorem disjoint_right {s t : multiset α} : disjoint s t ↔ ∀ {a}, a ∈ t → a ∉ s :=
disjoint_comm
theorem disjoint_iff_ne {s t : multiset α} : disjoint s t ↔ ∀ a ∈ s, ∀ b ∈ t, a ≠ b :=
by simp [disjoint_left, imp_not_comm]
theorem disjoint_of_subset_left {s t u : multiset α} (h : s ⊆ u) (d : disjoint u t) : disjoint s t
| x m₁ := d (h m₁)
theorem disjoint_of_subset_right {s t u : multiset α} (h : t ⊆ u) (d : disjoint s u) : disjoint s t
| x m m₁ := d m (h m₁)
theorem disjoint_of_le_left {s t u : multiset α} (h : s ≤ u) : disjoint u t → disjoint s t :=
disjoint_of_subset_left (subset_of_le h)
theorem disjoint_of_le_right {s t u : multiset α} (h : t ≤ u) : disjoint s u → disjoint s t :=
disjoint_of_subset_right (subset_of_le h)
@[simp] theorem zero_disjoint (l : multiset α) : disjoint 0 l
| a := (not_mem_nil a).elim
@[simp] theorem singleton_disjoint {l : multiset α} {a : α} : disjoint (a::0) l ↔ a ∉ l :=
by simp [disjoint]; refl
@[simp] theorem disjoint_singleton {l : multiset α} {a : α} : disjoint l (a::0) ↔ a ∉ l :=
by rw disjoint_comm; simp
@[simp] theorem disjoint_add_left {s t u : multiset α} :
disjoint (s + t) u ↔ disjoint s u ∧ disjoint t u :=
by simp [disjoint, or_imp_distrib, forall_and_distrib]
@[simp] theorem disjoint_add_right {s t u : multiset α} :
disjoint s (t + u) ↔ disjoint s t ∧ disjoint s u :=
disjoint_comm.trans $ by simp [disjoint_append_left]
@[simp] theorem disjoint_cons_left {a : α} {s t : multiset α} :
disjoint (a::s) t ↔ a ∉ t ∧ disjoint s t :=
(@disjoint_add_left _ (a::0) s t).trans $ by simp
@[simp] theorem disjoint_cons_right {a : α} {s t : multiset α} :
disjoint s (a::t) ↔ a ∉ s ∧ disjoint s t :=
disjoint_comm.trans $ by simp [disjoint_cons_left]
theorem inter_eq_zero_iff_disjoint [decidable_eq α] {s t : multiset α} : s ∩ t = 0 ↔ disjoint s t :=
by rw ← subset_zero; simp [subset_iff, disjoint]
@[simp] theorem disjoint_union_left [decidable_eq α] {s t u : multiset α} :
disjoint (s ∪ t) u ↔ disjoint s u ∧ disjoint t u :=
by simp [disjoint, or_imp_distrib, forall_and_distrib]
@[simp] theorem disjoint_union_right [decidable_eq α] {s t u : multiset α} :
disjoint s (t ∪ u) ↔ disjoint s t ∧ disjoint s u :=
by simp [disjoint, or_imp_distrib, forall_and_distrib]
lemma disjoint_map_map {f : α → γ} {g : β → γ} {s : multiset α} {t : multiset β} :
disjoint (s.map f) (t.map g) ↔ (∀a∈s, ∀b∈t, f a ≠ g b) :=
begin
simp [disjoint],
split,
from assume h a ha b hb eq, h _ ha rfl _ hb eq.symm,
from assume h c a ha eq₁ b hb eq₂, h _ ha _ hb (eq₂.symm ▸ eq₁)
end
/-- `pairwise r m` states that there exists a list of the elements s.t. `r` holds pairwise on this list. -/
def pairwise (r : α → α → Prop) (m : multiset α) : Prop :=
∃l:list α, m = l ∧ l.pairwise r
lemma pairwise_coe_iff_pairwise {r : α → α → Prop} (hr : symmetric r) {l : list α} :
multiset.pairwise r l ↔ l.pairwise r :=
iff.intro
(assume ⟨l', eq, h⟩, (list.perm_pairwise hr (quotient.exact eq)).2 h)
(assume h, ⟨l, rfl, h⟩)
/- nodup -/
/-- `nodup s` means that `s` has no duplicates, i.e. the multiplicity of
any element is at most 1. -/
def nodup (s : multiset α) : Prop :=
quot.lift_on s nodup (λ s t p, propext $ perm_nodup p)
@[simp] theorem coe_nodup {l : list α} : @nodup α l ↔ l.nodup := iff.rfl
@[simp] theorem forall_mem_ne {a : α} {l : list α} : (∀ (a' : α), a' ∈ l → ¬a = a') ↔ a ∉ l :=
⟨λ h m, h _ m rfl, λ h a' m e, h (e.symm ▸ m)⟩
@[simp] theorem nodup_zero : @nodup α 0 := pairwise.nil
@[simp] theorem nodup_cons {a : α} {s : multiset α} : nodup (a::s) ↔ a ∉ s ∧ nodup s :=
quot.induction_on s $ λ l, nodup_cons
theorem nodup_cons_of_nodup {a : α} {s : multiset α} (m : a ∉ s) (n : nodup s) : nodup (a::s) :=
nodup_cons.2 ⟨m, n⟩
theorem nodup_singleton : ∀ a : α, nodup (a::0) := nodup_singleton
theorem nodup_of_nodup_cons {a : α} {s : multiset α} (h : nodup (a::s)) : nodup s :=
(nodup_cons.1 h).2
theorem not_mem_of_nodup_cons {a : α} {s : multiset α} (h : nodup (a::s)) : a ∉ s :=
(nodup_cons.1 h).1
theorem nodup_of_le {s t : multiset α} (h : s ≤ t) : nodup t → nodup s :=
le_induction_on h $ λ l₁ l₂, nodup_of_sublist
theorem not_nodup_pair : ∀ a : α, ¬ nodup (a::a::0) := not_nodup_pair
theorem nodup_iff_le {s : multiset α} : nodup s ↔ ∀ a : α, ¬ a::a::0 ≤ s :=
quot.induction_on s $ λ l, nodup_iff_sublist.trans $ forall_congr $ λ a,
not_congr (@repeat_le_coe _ a 2 _).symm
theorem nodup_iff_count_le_one [decidable_eq α] {s : multiset α} : nodup s ↔ ∀ a, count a s ≤ 1 :=
quot.induction_on s $ λ l, nodup_iff_count_le_one
@[simp] theorem count_eq_one_of_mem [decidable_eq α] {a : α} {s : multiset α}
(d : nodup s) (h : a ∈ s) : count a s = 1 :=
le_antisymm (nodup_iff_count_le_one.1 d a) (count_pos.2 h)
lemma pairwise_of_nodup {r : α → α → Prop} {s : multiset α} :
(∀a∈s, ∀b∈s, a ≠ b → r a b) → nodup s → pairwise r s :=
quotient.induction_on s $ assume l h hl, ⟨l, rfl, hl.imp_of_mem $ assume a b ha hb, h a ha b hb⟩
lemma forall_of_pairwise {r : α → α → Prop} (H : symmetric r) {s : multiset α}
(hs : pairwise r s) : (∀a∈s, ∀b∈s, a ≠ b → r a b) :=
let ⟨l, hl₁, hl₂⟩ := hs in hl₁.symm ▸ list.forall_of_pairwise H hl₂
theorem nodup_add {s t : multiset α} : nodup (s + t) ↔ nodup s ∧ nodup t ∧ disjoint s t :=
quotient.induction_on₂ s t $ λ l₁ l₂, nodup_append
theorem disjoint_of_nodup_add {s t : multiset α} (d : nodup (s + t)) : disjoint s t :=
(nodup_add.1 d).2.2
theorem nodup_add_of_nodup {s t : multiset α} (d₁ : nodup s) (d₂ : nodup t) : nodup (s + t) ↔ disjoint s t :=
by simp [nodup_add, d₁, d₂]
theorem nodup_of_nodup_map (f : α → β) {s : multiset α} : nodup (map f s) → nodup s :=
quot.induction_on s $ λ l, nodup_of_nodup_map f
theorem nodup_map_on {f : α → β} {s : multiset α} : (∀x∈s, ∀y∈s, f x = f y → x = y) →
nodup s → nodup (map f s) :=
quot.induction_on s $ λ l, nodup_map_on
theorem nodup_map {f : α → β} {s : multiset α} (hf : function.injective f) : nodup s → nodup (map f s) :=
nodup_map_on (λ x _ y _ h, hf h)
theorem nodup_filter (p : α → Prop) [decidable_pred p] {s} : nodup s → nodup (filter p s) :=
quot.induction_on s $ λ l, nodup_filter p
@[simp] theorem nodup_attach {s : multiset α} : nodup (attach s) ↔ nodup s :=
quot.induction_on s $ λ l, nodup_attach
theorem nodup_pmap {p : α → Prop} {f : Π a, p a → β} {s : multiset α} {H}
(hf : ∀ a ha b hb, f a ha = f b hb → a = b) : nodup s → nodup (pmap f s H) :=
quot.induction_on s (λ l H, nodup_pmap hf) H
instance nodup_decidable [decidable_eq α] (s : multiset α) : decidable (nodup s) :=
quotient.rec_on_subsingleton s $ λ l, l.nodup_decidable
theorem nodup_erase_eq_filter [decidable_eq α] (a : α) {s} : nodup s → s.erase a = filter (≠ a) s :=
quot.induction_on s $ λ l d, congr_arg coe $ nodup_erase_eq_filter a d
theorem nodup_erase_of_nodup [decidable_eq α] (a : α) {l} : nodup l → nodup (l.erase a) :=
nodup_of_le (erase_le _ _)
theorem mem_erase_iff_of_nodup [decidable_eq α] {a b : α} {l} (d : nodup l) :
a ∈ l.erase b ↔ a ≠ b ∧ a ∈ l :=
by rw nodup_erase_eq_filter b d; simp [and_comm]
theorem mem_erase_of_nodup [decidable_eq α] {a : α} {l} (h : nodup l) : a ∉ l.erase a :=
by rw mem_erase_iff_of_nodup h; simp
theorem nodup_product {s : multiset α} {t : multiset β} : nodup s → nodup t → nodup (product s t) :=
quotient.induction_on₂ s t $ λ l₁ l₂ d₁ d₂, by simp [nodup_product d₁ d₂]
theorem nodup_sigma {σ : α → Type*} {s : multiset α} {t : Π a, multiset (σ a)} :
nodup s → (∀ a, nodup (t a)) → nodup (s.sigma t) :=
quot.induction_on s $ assume l₁,
begin
choose f hf using assume a, quotient.exists_rep (t a),
rw show t = λ a, f a, from (eq.symm $ funext $ λ a, hf a),
simpa using nodup_sigma
end
theorem nodup_filter_map (f : α → option β) {s : multiset α}
(H : ∀ (a a' : α) (b : β), b ∈ f a → b ∈ f a' → a = a') :
nodup s → nodup (filter_map f s) :=
quot.induction_on s $ λ l, nodup_filter_map H
theorem nodup_range (n : ℕ) : nodup (range n) := nodup_range _
theorem nodup_inter_left [decidable_eq α] {s : multiset α} (t) : nodup s → nodup (s ∩ t) :=
nodup_of_le $ inter_le_left _ _
theorem nodup_inter_right [decidable_eq α] (s) {t : multiset α} : nodup t → nodup (s ∩ t) :=
nodup_of_le $ inter_le_right _ _
@[simp] theorem nodup_union [decidable_eq α] {s t : multiset α} : nodup (s ∪ t) ↔ nodup s ∧ nodup t :=
⟨λ h, ⟨nodup_of_le (le_union_left _ _) h, nodup_of_le (le_union_right _ _) h⟩,
λ ⟨h₁, h₂⟩, nodup_iff_count_le_one.2 $ λ a, by rw [count_union]; exact
max_le (nodup_iff_count_le_one.1 h₁ a) (nodup_iff_count_le_one.1 h₂ a)⟩
@[simp] theorem nodup_powerset {s : multiset α} : nodup (powerset s) ↔ nodup s :=
⟨λ h, nodup_of_nodup_map _ (nodup_of_le (map_single_le_powerset _) h),
quotient.induction_on s $ λ l h,
by simp; refine list.nodup_map_on _ (nodup_sublists'.2 h); exact
λ x sx y sy e,
(perm_ext_sublist_nodup h (mem_sublists'.1 sx) (mem_sublists'.1 sy)).1
(quotient.exact e)⟩
@[simp] lemma nodup_bind {s : multiset α} {t : α → multiset β} :
nodup (bind s t) ↔ ((∀a∈s, nodup (t a)) ∧ (s.pairwise (λa b, disjoint (t a) (t b)))) :=
have h₁ : ∀a, ∃l:list β, t a = l, from
assume a, quot.induction_on (t a) $ assume l, ⟨l, rfl⟩,
let ⟨t', h'⟩ := classical.axiom_of_choice h₁ in
have t = λa, t' a, from funext h',
have hd : symmetric (λa b, list.disjoint (t' a) (t' b)), from assume a b h, h.symm,
quot.induction_on s $ by simp [this, list.nodup_bind, pairwise_coe_iff_pairwise hd]
theorem nodup_ext {s t : multiset α} : nodup s → nodup t → (s = t ↔ ∀ a, a ∈ s ↔ a ∈ t) :=
quotient.induction_on₂ s t $ λ l₁ l₂ d₁ d₂, quotient.eq.trans $ perm_ext d₁ d₂
theorem le_iff_subset {s t : multiset α} : nodup s → (s ≤ t ↔ s ⊆ t) :=
quotient.induction_on₂ s t $ λ l₁ l₂ d, ⟨subset_of_le, subperm_of_subset_nodup d⟩
theorem range_le {m n : ℕ} : range m ≤ range n ↔ m ≤ n :=
(le_iff_subset (nodup_range _)).trans range_subset
theorem mem_sub_of_nodup [decidable_eq α] {a : α} {s t : multiset α} (d : nodup s) :
a ∈ s - t ↔ a ∈ s ∧ a ∉ t :=
⟨λ h, ⟨mem_of_le (sub_le_self _ _) h, λ h',
by refine count_eq_zero.1 _ h; rw [count_sub a s t, nat.sub_eq_zero_iff_le];
exact le_trans (nodup_iff_count_le_one.1 d _) (count_pos.2 h')⟩,
λ ⟨h₁, h₂⟩, or.resolve_right (mem_add.1 $ mem_of_le (le_sub_add _ _) h₁) h₂⟩
section
variable [decidable_eq α]
/- erase_dup -/
/-- `erase_dup s` removes duplicates from `s`, yielding a `nodup` multiset. -/
def erase_dup (s : multiset α) : multiset α :=
quot.lift_on s (λ l, (l.erase_dup : multiset α))
(λ s t p, quot.sound (perm_erase_dup_of_perm p))
@[simp] theorem coe_erase_dup (l : list α) : @erase_dup α _ l = l.erase_dup := rfl
@[simp] theorem erase_dup_zero : @erase_dup α _ 0 = 0 := rfl
@[simp] theorem mem_erase_dup {a : α} {s : multiset α} : a ∈ erase_dup s ↔ a ∈ s :=
quot.induction_on s $ λ l, mem_erase_dup
@[simp] theorem erase_dup_cons_of_mem {a : α} {s : multiset α} : a ∈ s →
erase_dup (a::s) = erase_dup s :=
quot.induction_on s $ λ l m, @congr_arg _ _ _ _ coe $ erase_dup_cons_of_mem m
@[simp] theorem erase_dup_cons_of_not_mem {a : α} {s : multiset α} : a ∉ s →
erase_dup (a::s) = a :: erase_dup s :=
quot.induction_on s $ λ l m, congr_arg coe $ erase_dup_cons_of_not_mem m
theorem erase_dup_le (s : multiset α) : erase_dup s ≤ s :=
quot.induction_on s $ λ l, subperm_of_sublist $ erase_dup_sublist _
theorem erase_dup_subset (s : multiset α) : erase_dup s ⊆ s :=
subset_of_le $ erase_dup_le _
theorem subset_erase_dup (s : multiset α) : s ⊆ erase_dup s :=
λ a, mem_erase_dup.2
@[simp] theorem erase_dup_subset' {s t : multiset α} : erase_dup s ⊆ t ↔ s ⊆ t :=
⟨subset.trans (subset_erase_dup _), subset.trans (erase_dup_subset _)⟩
@[simp] theorem subset_erase_dup' {s t : multiset α} : s ⊆ erase_dup t ↔ s ⊆ t :=
⟨λ h, subset.trans h (erase_dup_subset _), λ h, subset.trans h (subset_erase_dup _)⟩
@[simp] theorem nodup_erase_dup (s : multiset α) : nodup (erase_dup s) :=
quot.induction_on s nodup_erase_dup
theorem erase_dup_eq_self {s : multiset α} : erase_dup s = s ↔ nodup s :=
⟨λ e, e ▸ nodup_erase_dup s,
quot.induction_on s $ λ l h, congr_arg coe $ erase_dup_eq_self.2 h⟩
theorem erase_dup_eq_zero {s : multiset α} : erase_dup s = 0 ↔ s = 0 :=
⟨λ h, eq_zero_of_subset_zero $ h ▸ subset_erase_dup _,
λ h, h.symm ▸ erase_dup_zero⟩
@[simp] theorem erase_dup_singleton {a : α} : erase_dup (a :: 0) = a :: 0 :=
erase_dup_eq_self.2 $ nodup_singleton _
theorem le_erase_dup {s t : multiset α} : s ≤ erase_dup t ↔ s ≤ t ∧ nodup s :=
⟨λ h, ⟨le_trans h (erase_dup_le _), nodup_of_le h (nodup_erase_dup _)⟩,
λ ⟨l, d⟩, (le_iff_subset d).2 $ subset.trans (subset_of_le l) (subset_erase_dup _)⟩
theorem erase_dup_ext {s t : multiset α} : erase_dup s = erase_dup t ↔ ∀ a, a ∈ s ↔ a ∈ t :=
by simp [nodup_ext]
theorem erase_dup_map_erase_dup_eq [decidable_eq β] (f : α → β) (s : multiset α) :
erase_dup (map f (erase_dup s)) = erase_dup (map f s) := by simp [erase_dup_ext]
/- finset insert -/
/-- `ndinsert a s` is the lift of the list `insert` operation. This operation
does not respect multiplicities, unlike `cons`, but it is suitable as
an insert operation on `finset`. -/
def ndinsert (a : α) (s : multiset α) : multiset α :=
quot.lift_on s (λ l, (l.insert a : multiset α))
(λ s t p, quot.sound (perm_insert a p))
@[simp] theorem coe_ndinsert (a : α) (l : list α) : ndinsert a l = (insert a l : list α) := rfl
@[simp] theorem ndinsert_zero (a : α) : ndinsert a 0 = a::0 := rfl
@[simp] theorem ndinsert_of_mem {a : α} {s : multiset α} : a ∈ s → ndinsert a s = s :=
quot.induction_on s $ λ l h, congr_arg coe $ insert_of_mem h
@[simp] theorem ndinsert_of_not_mem {a : α} {s : multiset α} : a ∉ s → ndinsert a s = a :: s :=
quot.induction_on s $ λ l h, congr_arg coe $ insert_of_not_mem h
@[simp] theorem mem_ndinsert {a b : α} {s : multiset α} : a ∈ ndinsert b s ↔ a = b ∨ a ∈ s :=
quot.induction_on s $ λ l, mem_insert_iff
@[simp] theorem le_ndinsert_self (a : α) (s : multiset α) : s ≤ ndinsert a s :=
quot.induction_on s $ λ l, subperm_of_sublist $ sublist_of_suffix $ suffix_insert _ _
@[simp] theorem mem_ndinsert_self (a : α) (s : multiset α) : a ∈ ndinsert a s :=
mem_ndinsert.2 (or.inl rfl)
@[simp] theorem mem_ndinsert_of_mem {a b : α} {s : multiset α} (h : a ∈ s) : a ∈ ndinsert b s :=
mem_ndinsert.2 (or.inr h)
@[simp] theorem length_ndinsert_of_mem {a : α} [decidable_eq α] {s : multiset α} (h : a ∈ s) :
card (ndinsert a s) = card s :=
by simp [h]
@[simp] theorem length_ndinsert_of_not_mem {a : α} [decidable_eq α] {s : multiset α} (h : a ∉ s) :
card (ndinsert a s) = card s + 1 :=
by simp [h]
theorem erase_dup_cons {a : α} {s : multiset α} :
erase_dup (a::s) = ndinsert a (erase_dup s) :=
by by_cases a ∈ s; simp [h]
theorem nodup_ndinsert (a : α) {s : multiset α} : nodup s → nodup (ndinsert a s) :=
quot.induction_on s $ λ l, nodup_insert
theorem ndinsert_le {a : α} {s t : multiset α} : ndinsert a s ≤ t ↔ s ≤ t ∧ a ∈ t :=
⟨λ h, ⟨le_trans (le_ndinsert_self _ _) h, mem_of_le h (mem_ndinsert_self _ _)⟩,
λ ⟨l, m⟩, if h : a ∈ s then by simp [h, l] else
by rw [ndinsert_of_not_mem h, ← cons_erase m, cons_le_cons_iff,
← le_cons_of_not_mem h, cons_erase m]; exact l⟩
lemma attach_ndinsert (a : α) (s : multiset α) :
(s.ndinsert a).attach =
ndinsert ⟨a, mem_ndinsert_self a s⟩ (s.attach.map $ λp, ⟨p.1, mem_ndinsert_of_mem p.2⟩) :=
have eq : ∀h : ∀(p : {x // x ∈ s}), p.1 ∈ s,
(λ (p : {x // x ∈ s}), ⟨p.val, h p⟩ : {x // x ∈ s} → {x // x ∈ s}) = id, from
assume h, funext $ assume p, subtype.eq rfl,
have ∀t (eq : s.ndinsert a = t), t.attach = ndinsert ⟨a, eq ▸ mem_ndinsert_self a s⟩
(s.attach.map $ λp, ⟨p.1, eq ▸ mem_ndinsert_of_mem p.2⟩),
begin
intros t ht,
by_cases a ∈ s,
{ rw [ndinsert_of_mem h] at ht,
subst ht,
rw [eq, map_id, ndinsert_of_mem (mem_attach _ _)] },
{ rw [ndinsert_of_not_mem h] at ht,
subst ht,
simp [attach_cons, h] }
end,
this _ rfl
@[simp] theorem disjoint_ndinsert_left {a : α} {s t : multiset α} :
disjoint (ndinsert a s) t ↔ a ∉ t ∧ disjoint s t :=
iff.trans (by simp [disjoint]) disjoint_cons_left
@[simp] theorem disjoint_ndinsert_right {a : α} {s t : multiset α} :
disjoint s (ndinsert a t) ↔ a ∉ s ∧ disjoint s t :=
disjoint_comm.trans $ by simp
/- finset union -/
/-- `ndunion s t` is the lift of the list `union` operation. This operation
does not respect multiplicities, unlike `s ∪ t`, but it is suitable as
a union operation on `finset`. (`s ∪ t` would also work as a union operation
on finset, but this is more efficient.) -/
def ndunion (s t : multiset α) : multiset α :=
quotient.lift_on₂ s t (λ l₁ l₂, (l₁.union l₂ : multiset α)) $ λ v₁ v₂ w₁ w₂ p₁ p₂,
quot.sound $ perm_union p₁ p₂
@[simp] theorem coe_ndunion (l₁ l₂ : list α) : @ndunion α _ l₁ l₂ = (l₁ ∪ l₂ : list α) := rfl
@[simp] theorem zero_ndunion (s : multiset α) : ndunion 0 s = s :=
quot.induction_on s $ λ l, rfl
@[simp] theorem cons_ndunion (s t : multiset α) (a : α) : ndunion (a :: s) t = ndinsert a (ndunion s t) :=
quotient.induction_on₂ s t $ λ l₁ l₂, rfl
@[simp] theorem mem_ndunion {s t : multiset α} {a : α} : a ∈ ndunion s t ↔ a ∈ s ∨ a ∈ t :=
quotient.induction_on₂ s t $ λ l₁ l₂, list.mem_union
theorem le_ndunion_right (s t : multiset α) : t ≤ ndunion s t :=
quotient.induction_on₂ s t $ λ l₁ l₂,
subperm_of_sublist $ sublist_of_suffix $ suffix_union_right _ _
theorem ndunion_le_add (s t : multiset α) : ndunion s t ≤ s + t :=
quotient.induction_on₂ s t $ λ l₁ l₂, subperm_of_sublist $ union_sublist_append _ _
theorem ndunion_le {s t u : multiset α} : ndunion s t ≤ u ↔ s ⊆ u ∧ t ≤ u :=
multiset.induction_on s (by simp) (by simp [ndinsert_le, and_comm, and.left_comm] {contextual := tt})
theorem subset_ndunion_left (s t : multiset α) : s ⊆ ndunion s t :=
λ a h, mem_ndunion.2 $ or.inl h
theorem le_ndunion_left {s} (t : multiset α) (d : nodup s) : s ≤ ndunion s t :=
(le_iff_subset d).2 $ subset_ndunion_left _ _
theorem ndunion_le_union (s t : multiset α) : ndunion s t ≤ s ∪ t :=
ndunion_le.2 ⟨subset_of_le (le_union_left _ _), le_union_right _ _⟩
theorem nodup_ndunion (s : multiset α) {t : multiset α} : nodup t → nodup (ndunion s t) :=
quotient.induction_on₂ s t $ λ l₁ l₂, list.nodup_union _
@[simp] theorem ndunion_eq_union {s t : multiset α} (d : nodup s) : ndunion s t = s ∪ t :=
le_antisymm (ndunion_le_union _ _) $ union_le (le_ndunion_left _ d) (le_ndunion_right _ _)
theorem erase_dup_add (s t : multiset α) : erase_dup (s + t) = ndunion s (erase_dup t) :=
quotient.induction_on₂ s t $ λ l₁ l₂, congr_arg coe $ erase_dup_append _ _
/- finset inter -/
/-- `ndinter s t` is the lift of the list `∩` operation. This operation
does not respect multiplicities, unlike `s ∩ t`, but it is suitable as
an intersection operation on `finset`. (`s ∩ t` would also work as a union operation
on finset, but this is more efficient.) -/
def ndinter (s t : multiset α) : multiset α := filter (∈ t) s
@[simp] theorem coe_ndinter (l₁ l₂ : list α) : @ndinter α _ l₁ l₂ = (l₁ ∩ l₂ : list α) := rfl
@[simp] theorem zero_ndinter (s : multiset α) : ndinter 0 s = 0 := rfl
@[simp] theorem cons_ndinter_of_mem {a : α} (s : multiset α) {t : multiset α} (h : a ∈ t) :
ndinter (a::s) t = a :: (ndinter s t) := by simp [ndinter, h]
@[simp] theorem ndinter_cons_of_not_mem {a : α} (s : multiset α) {t : multiset α} (h : a ∉ t) :
ndinter (a::s) t = ndinter s t := by simp [ndinter, h]
@[simp] theorem mem_ndinter {s t : multiset α} {a : α} : a ∈ ndinter s t ↔ a ∈ s ∧ a ∈ t :=
mem_filter
theorem nodup_ndinter {s : multiset α} (t : multiset α) : nodup s → nodup (ndinter s t) :=
nodup_filter _
theorem le_ndinter {s t u : multiset α} : s ≤ ndinter t u ↔ s ≤ t ∧ s ⊆ u :=
by simp [ndinter, le_filter, subset_iff]
theorem ndinter_le_left (s t : multiset α) : ndinter s t ≤ s :=
(le_ndinter.1 (le_refl _)).1
theorem ndinter_subset_right (s t : multiset α) : ndinter s t ⊆ t :=
(le_ndinter.1 (le_refl _)).2
theorem ndinter_le_right {s} (t : multiset α) (d : nodup s) : ndinter s t ≤ t :=
(le_iff_subset $ nodup_ndinter _ d).2 (ndinter_subset_right _ _)
theorem inter_le_ndinter (s t : multiset α) : s ∩ t ≤ ndinter s t :=
le_ndinter.2 ⟨inter_le_left _ _, subset_of_le $ inter_le_right _ _⟩
@[simp] theorem ndinter_eq_inter {s t : multiset α} (d : nodup s) : ndinter s t = s ∩ t :=
le_antisymm (le_inter (ndinter_le_left _ _) (ndinter_le_right _ d)) (inter_le_ndinter _ _)
theorem ndinter_eq_zero_iff_disjoint {s t : multiset α} : ndinter s t = 0 ↔ disjoint s t :=
by rw ← subset_zero; simp [subset_iff, disjoint]
end
/- fold -/
section fold
variables (op : α → α → α) [hc : is_commutative α op] [ha : is_associative α op]
local notation a * b := op a b
include hc ha
/-- `fold op b s` folds a commutative associative operation `op` over
the multiset `s`. -/
def fold : α → multiset α → α := foldr op (left_comm _ hc.comm ha.assoc)
theorem fold_eq_foldr (b : α) (s : multiset α) : fold op b s = foldr op (left_comm _ hc.comm ha.assoc) b s := rfl
@[simp] theorem coe_fold_r (b : α) (l : list α) : fold op b l = l.foldr op b := rfl
theorem coe_fold_l (b : α) (l : list α) : fold op b l = l.foldl op b :=
(coe_foldr_swap op _ b l).trans $ by simp [hc.comm]
theorem fold_eq_foldl (b : α) (s : multiset α) : fold op b s = foldl op (right_comm _ hc.comm ha.assoc) b s :=
quot.induction_on s $ λ l, coe_fold_l _ _ _
@[simp] theorem fold_zero (b : α) : (0 : multiset α).fold op b = b := rfl
@[simp] theorem fold_cons_left : ∀ (b a : α) (s : multiset α),
(a :: s).fold op b = a * s.fold op b := foldr_cons _ _
theorem fold_cons_right (b a : α) (s : multiset α) : (a :: s).fold op b = s.fold op b * a :=
by simp [hc.comm]
theorem fold_cons'_right (b a : α) (s : multiset α) : (a :: s).fold op b = s.fold op (b * a) :=
by rw [fold_eq_foldl, foldl_cons, ← fold_eq_foldl]
theorem fold_cons'_left (b a : α) (s : multiset α) : (a :: s).fold op b = s.fold op (a * b) :=
by rw [fold_cons'_right, hc.comm]
theorem fold_add (b₁ b₂ : α) (s₁ s₂ : multiset α) : (s₁ + s₂).fold op (b₁ * b₂) = s₁.fold op b₁ * s₂.fold op b₂ :=
multiset.induction_on s₂
(by rw [add_zero, fold_zero, ← fold_cons'_right, ← fold_cons_right op])
(by simp {contextual := tt}; cc)
theorem fold_singleton (b a : α) : (a::0 : multiset α).fold op b = a * b := by simp
theorem fold_distrib {f g : β → α} (u₁ u₂ : α) (s : multiset β) :
(s.map (λx, f x * g x)).fold op (u₁ * u₂) = (s.map f).fold op u₁ * (s.map g).fold op u₂ :=
multiset.induction_on s (by simp) (by simp {contextual := tt}; cc)
theorem fold_hom {op' : β → β → β} [is_commutative β op'] [is_associative β op']
{m : α → β} (hm : ∀x y, m (op x y) = op' (m x) (m y)) (b : α) (s : multiset α) :
(s.map m).fold op' (m b) = m (s.fold op b) :=
multiset.induction_on s (by simp) (by simp [hm] {contextual := tt})
theorem fold_union_inter [decidable_eq α] (s₁ s₂ : multiset α) (b₁ b₂ : α) :
(s₁ ∪ s₂).fold op b₁ * (s₁ ∩ s₂).fold op b₂ = s₁.fold op b₁ * s₂.fold op b₂ :=
by rw [← fold_add op, union_add_inter, fold_add op]
@[simp] theorem fold_erase_dup_idem [decidable_eq α] [hi : is_idempotent α op] (s : multiset α) (b : α) :
(erase_dup s).fold op b = s.fold op b :=
multiset.induction_on s (by simp) $ λ a s IH, begin
by_cases a ∈ s; simp [IH, h],
show fold op b s = op a (fold op b s),
rw [← cons_erase h, fold_cons_left, ← ha.assoc, hi.idempotent],
end
end fold
theorem le_smul_erase_dup [decidable_eq α] (s : multiset α) :
∃ n : ℕ, s ≤ n • erase_dup s :=
⟨(s.map (λ a, count a s)).fold max 0, le_iff_count.2 $ λ a, begin
rw count_smul, by_cases a ∈ s,
{ refine le_trans _ (mul_le_mul_left _ $ count_pos.2 $ mem_erase_dup.2 h),
have : count a s ≤ fold max 0 (map (λ a, count a s) (a :: erase s a));
[simp [le_max_left], simpa [cons_erase h]] },
{ simp [count_eq_zero.2 h, nat.zero_le] }
end⟩
section sup
variables [semilattice_sup_bot α]
/-- Supremum of a multiset: `sup {a, b, c} = a ⊔ b ⊔ c` -/
def sup (s : multiset α) : α := s.fold (⊔) ⊥
@[simp] lemma sup_zero : (0 : multiset α).sup = ⊥ :=
fold_zero _ _
@[simp] lemma sup_cons (a : α) (s : multiset α) :
(a :: s).sup = a ⊔ s.sup :=
fold_cons_left _ _ _ _
@[simp] lemma sup_singleton {a : α} : (a::0).sup = a := by simp
@[simp] lemma sup_add (s₁ s₂ : multiset α) : (s₁ + s₂).sup = s₁.sup ⊔ s₂.sup :=
eq.trans (by simp [sup]) (fold_add _ _ _ _ _)
variables [decidable_eq α]
@[simp] lemma sup_erase_dup (s : multiset α) : (erase_dup s).sup = s.sup :=
fold_erase_dup_idem _ _ _
@[simp] lemma sup_ndunion (s₁ s₂ : multiset α) :
(ndunion s₁ s₂).sup = s₁.sup ⊔ s₂.sup :=
by rw [← sup_erase_dup, erase_dup_ext.2, sup_erase_dup, sup_add]; simp
@[simp] lemma sup_union (s₁ s₂ : multiset α) :
(s₁ ∪ s₂).sup = s₁.sup ⊔ s₂.sup :=
by rw [← sup_erase_dup, erase_dup_ext.2, sup_erase_dup, sup_add]; simp
@[simp] lemma sup_ndinsert (a : α) (s : multiset α) :
(ndinsert a s).sup = a ⊔ s.sup :=
by rw [← sup_erase_dup, erase_dup_ext.2, sup_erase_dup, sup_cons]; simp
lemma sup_le {s : multiset α} {a : α} : s.sup ≤ a ↔ (∀b ∈ s, b ≤ a) :=
multiset.induction_on s (by simp)
(by simp [or_imp_distrib, forall_and_distrib] {contextual := tt})
lemma le_sup {s : multiset α} {a : α} (h : a ∈ s) : a ≤ s.sup :=
sup_le.1 (le_refl _) _ h
lemma sup_mono {s₁ s₂ : multiset α} (h : s₁ ⊆ s₂) : s₁.sup ≤ s₂.sup :=
sup_le.2 $ assume b hb, le_sup (h hb)
end sup
section inf
variables [semilattice_inf_top α]
/-- Infimum of a multiset: `inf {a, b, c} = a ⊓ b ⊓ c` -/
def inf (s : multiset α) : α := s.fold (⊓) ⊤
@[simp] lemma inf_zero : (0 : multiset α).inf = ⊤ :=
fold_zero _ _
@[simp] lemma inf_cons (a : α) (s : multiset α) :
(a :: s).inf = a ⊓ s.inf :=
fold_cons_left _ _ _ _
@[simp] lemma inf_singleton {a : α} : (a::0).inf = a := by simp
@[simp] lemma inf_add (s₁ s₂ : multiset α) : (s₁ + s₂).inf = s₁.inf ⊓ s₂.inf :=
eq.trans (by simp [inf]) (fold_add _ _ _ _ _)
variables [decidable_eq α]
@[simp] lemma inf_erase_dup (s : multiset α) : (erase_dup s).inf = s.inf :=
fold_erase_dup_idem _ _ _
@[simp] lemma inf_ndunion (s₁ s₂ : multiset α) :
(ndunion s₁ s₂).inf = s₁.inf ⊓ s₂.inf :=
by rw [← inf_erase_dup, erase_dup_ext.2, inf_erase_dup, inf_add]; simp
@[simp] lemma inf_union (s₁ s₂ : multiset α) :
(s₁ ∪ s₂).inf = s₁.inf ⊓ s₂.inf :=
by rw [← inf_erase_dup, erase_dup_ext.2, inf_erase_dup, inf_add]; simp
@[simp] lemma inf_ndinsert (a : α) (s : multiset α) :
(ndinsert a s).inf = a ⊓ s.inf :=
by rw [← inf_erase_dup, erase_dup_ext.2, inf_erase_dup, inf_cons]; simp
lemma le_inf {s : multiset α} {a : α} : a ≤ s.inf ↔ (∀b ∈ s, a ≤ b) :=
multiset.induction_on s (by simp)
(by simp [or_imp_distrib, forall_and_distrib] {contextual := tt})
lemma inf_le {s : multiset α} {a : α} (h : a ∈ s) : s.inf ≤ a :=
le_inf.1 (le_refl _) _ h
lemma inf_mono {s₁ s₂ : multiset α} (h : s₁ ⊆ s₂) : s₂.inf ≤ s₁.inf :=
le_inf.2 $ assume b hb, inf_le (h hb)
end inf
section sort
variables (r : α → α → Prop) [decidable_rel r]
[is_trans α r] [is_antisymm α r] [is_total α r]
/-- `sort s` constructs a sorted list from the multiset `s`.
(Uses merge sort algorithm.) -/
def sort (s : multiset α) : list α :=
quot.lift_on s (merge_sort r) $ λ a b h,
eq_of_sorted_of_perm
((perm_merge_sort _ _).trans $ h.trans (perm_merge_sort _ _).symm)
(sorted_merge_sort r _)
(sorted_merge_sort r _)
@[simp] theorem coe_sort (l : list α) : sort r l = merge_sort r l := rfl
@[simp] theorem sort_sorted (s : multiset α) : sorted r (sort r s) :=
quot.induction_on s $ λ l, sorted_merge_sort r _
@[simp] theorem sort_eq (s : multiset α) : ↑(sort r s) = s :=
quot.induction_on s $ λ l, quot.sound $ perm_merge_sort _ _
@[simp] theorem mem_sort {s : multiset α} {a : α} : a ∈ sort r s ↔ a ∈ s :=
by rw [← mem_coe, sort_eq]
end sort
instance [has_repr α] : has_repr (multiset α) :=
⟨λ s, "{" ++ string.intercalate ", " ((s.map repr).sort (≤)) ++ "}"⟩
section sections
def sections (s : multiset (multiset α)) : multiset (multiset α) :=
multiset.rec_on s {0} (λs _ c, s.bind $ λa, c.map ((::) a))
(assume a₀ a₁ s pi, by simp [map_bind, bind_bind a₀ a₁, cons_swap])
@[simp] lemma sections_zero : sections (0 : multiset (multiset α)) = 0::0 :=
rfl
@[simp] lemma sections_cons (s : multiset (multiset α)) (m : multiset α) :
sections (m :: s) = m.bind (λa, (sections s).map ((::) a)) :=
rec_on_cons m s
lemma coe_sections : ∀(l : list (list α)),
sections ((l.map (λl:list α, (l : multiset α))) : multiset (multiset α)) =
((l.sections.map (λl:list α, (l : multiset α))) : multiset (multiset α))
| [] := rfl
| (a :: l) :=
begin
simp,
rw [← cons_coe, sections_cons, bind_map_comm, coe_sections l],
simp [list.sections, (∘), list.bind]
end
@[simp] lemma sections_add (s t : multiset (multiset α)) :
sections (s + t) = (sections s).bind (λm, (sections t).map ((+) m)) :=
multiset.induction_on s (by simp)
(assume a s ih, by simp [ih, bind_assoc, map_bind, bind_map, -add_comm])
lemma mem_sections {s : multiset (multiset α)} :
∀{a}, a ∈ sections s ↔ s.rel (λs a, a ∈ s) a :=
multiset.induction_on s (by simp)
(assume a s ih a',
by simp [ih, rel_cons_left, -exists_and_distrib_left, exists_and_distrib_left.symm, eq_comm])
lemma card_sections {s : multiset (multiset α)} : card (sections s) = prod (s.map card) :=
multiset.induction_on s (by simp) (by simp {contextual := tt})
lemma prod_map_sum [comm_semiring α] {s : multiset (multiset α)} :
prod (s.map sum) = sum ((sections s).map prod) :=
multiset.induction_on s (by simp)
(assume a s ih, by simp [ih, map_bind, sum_map_mul_left, sum_map_mul_right])
end sections
section pi
variables [decidable_eq α] {δ : α → Type*}
open function
def pi.cons (m : multiset α) (a : α) (b : δ a) (f : Πa∈m, δ a) : Πa'∈a::m, δ a' :=
λa' ha', if h : a' = a then eq.rec b h.symm else f a' $ (mem_cons.1 ha').resolve_left h
def pi.empty (δ : α → Type*) : (Πa∈(0:multiset α), δ a) .
lemma pi.cons_same {m : multiset α} {a : α} {b : δ a} {f : Πa∈m, δ a} (h : a ∈ a :: m) :
pi.cons m a b f a h = b :=
dif_pos rfl
lemma pi.cons_ne {m : multiset α} {a a' : α} {b : δ a} {f : Πa∈m, δ a} (h' : a' ∈ a :: m) (h : a' ≠ a) :
pi.cons m a b f a' h' = f a' ((mem_cons.1 h').resolve_left h) :=
dif_neg h
lemma pi.cons_swap {a a' : α} {b : δ a} {b' : δ a'} {m : multiset α} {f : Πa∈m, δ a} (h : a ≠ a') :
pi.cons (a' :: m) a b (pi.cons m a' b' f) == pi.cons (a :: m) a' b' (pi.cons m a b f) :=
begin
apply hfunext, { refl }, intros a'' _ h, subst h,
apply hfunext, { rw [cons_swap] }, intros ha₁ ha₂ h,
by_cases h₁ : a'' = a; by_cases h₂ : a'' = a';
simp [*, pi.cons_same, pi.cons_ne] at *,
{ subst h₁, rw [pi.cons_same, pi.cons_same] },
{ subst h₂, rw [pi.cons_same, pi.cons_same] }
end
/-- `pi m t` constructs the Cartesian product over `t` indexed by `m`. -/
def pi (m : multiset α) (t : Πa, multiset (δ a)) : multiset (Πa∈m, δ a) :=
m.rec_on {pi.empty δ} (λa m (p : multiset (Πa∈m, δ a)), (t a).bind $ λb, p.map $ pi.cons m a b)
begin
intros a a' m n,
by_cases eq : a = a',
{ subst eq },
{ simp [map_bind, bind_bind (t a') (t a)],
apply bind_hcongr, { rw [cons_swap a a'] },
intros b hb,
apply bind_hcongr, { rw [cons_swap a a'] },
intros b' hb',
apply map_hcongr, { rw [cons_swap a a'] },
intros f hf,
exact pi.cons_swap eq }
end
@[simp] lemma pi_zero (t : Πa, multiset (δ a)) : pi 0 t = pi.empty δ :: 0 := rfl
@[simp] lemma pi_cons (m : multiset α) (t : Πa, multiset (δ a)) (a : α) :
pi (a :: m) t = ((t a).bind $ λb, (pi m t).map $ pi.cons m a b) :=
rec_on_cons a m
lemma injective_pi_cons {a : α} {b : δ a} {s : multiset α} (hs : a ∉ s) :
function.injective (pi.cons s a b) :=
assume f₁ f₂ eq, funext $ assume a', funext $ assume h',
have ne : a ≠ a', from assume h, hs $ h.symm ▸ h',
have a' ∈ a :: s, from mem_cons_of_mem h',
calc f₁ a' h' = pi.cons s a b f₁ a' this : by rw [pi.cons_ne this ne.symm]
... = pi.cons s a b f₂ a' this : by rw [eq]
... = f₂ a' h' : by rw [pi.cons_ne this ne.symm]
lemma card_pi (m : multiset α) (t : Πa, multiset (δ a)) :
card (pi m t) = prod (m.map $ λa, card (t a)) :=
multiset.induction_on m (by simp) (by simp [mul_comm] {contextual := tt})
lemma nodup_pi {s : multiset α} {t : Πa, multiset (δ a)} :
nodup s → (∀a∈s, nodup (t a)) → nodup (pi s t) :=
multiset.induction_on s (assume _ _, nodup_singleton _)
begin
assume a s ih hs ht,
have has : a ∉ s, by simp at hs; exact hs.1,
have hs : nodup s, by simp at hs; exact hs.2,
simp,
split,
{ assume b hb,
from nodup_map (injective_pi_cons has) (ih hs $ assume a' h', ht a' $ mem_cons_of_mem h') },
{ apply pairwise_of_nodup _ (ht a $ mem_cons_self _ _),
from assume b₁ hb₁ b₂ hb₂ neb, disjoint_map_map.2 (assume f hf g hg eq,
have pi.cons s a b₁ f a (mem_cons_self _ _) = pi.cons s a b₂ g a (mem_cons_self _ _),
by rw [eq],
neb $ show b₁ = b₂, by rwa [pi.cons_same, pi.cons_same] at this) }
end
lemma mem_pi (m : multiset α) (t : Πa, multiset (δ a)) :
∀f:Πa∈m, δ a, (f ∈ pi m t) ↔ (∀a (h : a ∈ m), f a h ∈ t a) :=
begin
refine multiset.induction_on m (λ f, _) (λ a m ih f, _),
{ simpa using show f = pi.empty δ, by funext a ha; exact ha.elim },
simp, split,
{ rintro ⟨b, hb, f', hf', rfl⟩ a' ha',
rw [ih] at hf',
by_cases a' = a,
{ subst h, rwa [pi.cons_same] },
{ rw [pi.cons_ne _ h], apply hf' } },
{ intro hf,
refine ⟨_, hf a (mem_cons_self a _), λa ha, f a (mem_cons_of_mem ha),
(ih _).2 (λ a' h', hf _ _), _⟩,
funext a' h',
by_cases a' = a,
{ subst h, rw [pi.cons_same] },
{ rw [pi.cons_ne _ h] } }
end
end pi
end multiset
namespace multiset
instance : functor multiset :=
{ map := @map }
instance : is_lawful_functor multiset :=
by refine { .. }; intros; simp
open is_lawful_traversable is_comm_applicative
variables {F : Type u_1 → Type u_1} [applicative F] [is_comm_applicative F]
variables {α' β' : Type u_1} (f : α' → F β')
def traverse : multiset α' → F (multiset β') :=
quotient.lift (functor.map coe ∘ traversable.traverse f)
begin
introv p, unfold function.comp,
induction p,
case perm.nil { refl },
case perm.skip {
have : multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₁) =
multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₂),
{ rw [p_ih] },
simpa with functor_norm },
case perm.swap {
have : (λa b (l:list β'), (↑(a :: b :: l) : multiset β')) <$> f p_y <*> f p_x =
(λa b l, ↑(a :: b :: l)) <$> f p_x <*> f p_y,
{ rw [is_comm_applicative.commutative_map],
congr, funext a b l, simpa [flip] using perm.swap b a l },
simp [(∘), this] with functor_norm },
case perm.trans { simp [*] }
end
instance : monad multiset :=
{ pure := λ α x, x::0,
bind := @bind,
.. multiset.functor }
instance : is_lawful_monad multiset :=
{ bind_pure_comp_eq_map := λ α β f s, multiset.induction_on s rfl $ λ a s ih,
by rw [bind_cons, map_cons, bind_zero, add_zero],
pure_bind := λ α β x f, by simp only [cons_bind, zero_bind, add_zero],
bind_assoc := @bind_assoc }
open functor
open traversable is_lawful_traversable
@[simp]
lemma lift_beta {α β : Type*} (x : list α) (f : list α → β)
(h : ∀ a b : list α, a ≈ b → f a = f b) :
quotient.lift f h (x : multiset α) = f x :=
quotient.lift_beta _ _ _
@[simp]
lemma map_comp_coe {α β} (h : α → β) :
functor.map h ∘ coe = (coe ∘ functor.map h : list α → multiset β) :=
by funext; simp [functor.map]
lemma id_traverse {α : Type*} (x : multiset α) :
traverse id.mk x = x :=
quotient.induction_on x
(by { intro, rw [traverse,quotient.lift_beta,function.comp],
simp, congr })
lemma comp_traverse {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
{α β γ : Type*}
(g : α → G β) (h : β → H γ) (x : multiset α) :
traverse (comp.mk ∘ functor.map h ∘ g) x =
comp.mk (functor.map (traverse h) (traverse g x)) :=
quotient.induction_on x
(by intro;
simp [traverse,comp_traverse] with functor_norm;
simp [(<$>),(∘)] with functor_norm)
lemma map_traverse {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → G β) (h : β → γ)
(x : multiset α) :
functor.map (functor.map h) (traverse g x) =
traverse (functor.map h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse] with functor_norm;
rw [comp_map,map_traverse])
lemma traverse_map {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → β) (h : β → G γ)
(x : multiset α) :
traverse h (map g x) =
traverse (h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse];
rw [← traversable.traverse_map h g];
[ refl, apply_instance ])
lemma naturality {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
(eta : applicative_transformation G H)
{α β : Type*} (f : α → G β) (x : multiset α) :
eta (traverse f x) = traverse (@eta _ ∘ f) x :=
quotient.induction_on x
(by intro; simp [traverse,is_lawful_traversable.naturality] with functor_norm)
section choose
variables (p : α → Prop) [decidable_pred p] (l : multiset α)
def choose_x : Π hp : (∃! a, a ∈ l ∧ p a), { a // a ∈ l ∧ p a } :=
quotient.rec_on l (λ l' ex_unique, list.choose_x p l' (exists_of_exists_unique ex_unique)) begin
intros,
funext hp,
suffices all_equal : ∀ x y : { t // t ∈ b ∧ p t }, x = y,
{ apply all_equal },
{ rintros ⟨x, px⟩ ⟨y, py⟩,
rcases hp with ⟨z, ⟨z_mem_l, pz⟩, z_unique⟩,
congr,
calc x = z : z_unique x px
... = y : (z_unique y py).symm }
end
def choose (hp : ∃! a, a ∈ l ∧ p a) : α := choose_x p l hp
lemma choose_spec (hp : ∃! a, a ∈ l ∧ p a) : choose p l hp ∈ l ∧ p (choose p l hp) :=
(choose_x p l hp).property
lemma choose_mem (hp : ∃! a, a ∈ l ∧ p a) : choose p l hp ∈ l := (choose_spec _ _ _).1
lemma choose_property (hp : ∃! a, a ∈ l ∧ p a) : p (choose p l hp) := (choose_spec _ _ _).2
end choose
/- Ico -/
/-- `Ico n m` is the multiset lifted from the list `Ico n m`, e.g. the set `{n, n+1, ..., m-1}`. -/
def Ico (n m : ℕ) : multiset ℕ := Ico n m
namespace Ico
theorem map_add (n m k : ℕ) : (Ico n m).map ((+) k) = Ico (n + k) (m + k) :=
congr_arg coe $ list.Ico.map_add _ _ _
theorem map_sub (n m k : ℕ) (h : k ≤ n) : (Ico n m).map (λ x, x - k) = Ico (n - k) (m - k) :=
congr_arg coe $ list.Ico.map_sub _ _ _ h
theorem zero_bot (n : ℕ) : Ico 0 n = range n :=
congr_arg coe $ list.Ico.zero_bot _
@[simp] theorem card (n m : ℕ) : (Ico n m).card = m - n :=
list.Ico.length _ _
theorem nodup (n m : ℕ) : nodup (Ico n m) := Ico.nodup _ _
@[simp] theorem mem {n m l : ℕ} : l ∈ Ico n m ↔ n ≤ l ∧ l < m :=
list.Ico.mem
theorem eq_zero_of_le {n m : ℕ} (h : m ≤ n) : Ico n m = 0 :=
congr_arg coe $ list.Ico.eq_nil_of_le h
@[simp] theorem self_eq_zero {n : ℕ} : Ico n n = 0 :=
eq_zero_of_le $ le_refl n
@[simp] theorem eq_zero_iff {n m : ℕ} : Ico n m = 0 ↔ m ≤ n :=
iff.trans (coe_eq_zero _) list.Ico.eq_empty_iff
lemma add_consecutive {n m l : ℕ} (hnm : n ≤ m) (hml : m ≤ l) :
Ico n m + Ico m l = Ico n l :=
congr_arg coe $ list.Ico.append_consecutive hnm hml
@[simp] lemma inter_consecutive (n m l : ℕ) : Ico n m ∩ Ico m l = 0 :=
congr_arg coe $ list.Ico.bag_inter_consecutive n m l
@[simp] theorem succ_singleton {n : ℕ} : Ico n (n+1) = {n} :=
congr_arg coe $ list.Ico.succ_singleton
theorem succ_top {n m : ℕ} (h : n ≤ m) : Ico n (m + 1) = m :: Ico n m :=
by rw [Ico, list.Ico.succ_top h, ← coe_add, add_comm]; refl
theorem eq_cons {n m : ℕ} (h : n < m) : Ico n m = n :: Ico (n + 1) m :=
congr_arg coe $ list.Ico.eq_cons h
@[simp] theorem pred_singleton {m : ℕ} (h : m > 0) : Ico (m - 1) m = {m - 1} :=
congr_arg coe $ list.Ico.pred_singleton h
@[simp] theorem not_mem_top {n m : ℕ} : m ∉ Ico n m :=
list.Ico.not_mem_top
lemma filter_lt_of_top_le {n m l : ℕ} (hml : m ≤ l) : (Ico n m).filter (λ x, x < l) = Ico n m :=
congr_arg coe $ list.Ico.filter_lt_of_top_le hml
lemma filter_lt_of_le_bot {n m l : ℕ} (hln : l ≤ n) : (Ico n m).filter (λ x, x < l) = ∅ :=
congr_arg coe $ list.Ico.filter_lt_of_le_bot hln
lemma filter_lt_of_ge {n m l : ℕ} (hlm : l ≤ m) : (Ico n m).filter (λ x, x < l) = Ico n l :=
congr_arg coe $ list.Ico.filter_lt_of_ge hlm
@[simp] lemma filter_lt (n m l : ℕ) : (Ico n m).filter (λ x, x < l) = Ico n (min m l) :=
congr_arg coe $ list.Ico.filter_lt n m l
lemma filter_ge_of_le_bot {n m l : ℕ} (hln : l ≤ n) : (Ico n m).filter (λ x, x ≥ l) = Ico n m :=
congr_arg coe $ list.Ico.filter_ge_of_le_bot hln
lemma filter_ge_of_top_le {n m l : ℕ} (hml : m ≤ l) : (Ico n m).filter (λ x, x ≥ l) = ∅ :=
congr_arg coe $ list.Ico.filter_ge_of_top_le hml
lemma filter_ge_of_ge {n m l : ℕ} (hnl : n ≤ l) : (Ico n m).filter (λ x, x ≥ l) = Ico l m :=
congr_arg coe $ list.Ico.filter_ge_of_ge hnl
@[simp] lemma filter_ge (n m l : ℕ) : (Ico n m).filter (λ x, x ≥ l) = Ico (max n l) m :=
congr_arg coe $ list.Ico.filter_ge n m l
end Ico
end multiset
|
println("loading mumps lib ...")
using MPI
using MUMPS # needs to be intialized before Ipopt don't understand why
println("mumps lib loaded.")
type linear_solver_MUMPS <: abstract_linear_system_solver
_factor::MUMPS.Mumps{Float64}
M::SparseMatrixCSC{Float64,Int64}
sym::Symbol
safe_mode::Bool
function linear_solver_MUMPS(sym::Symbol, safe_mode::Bool)
this = new();
this.sym = sym
this.safe_mode = safe_mode
return this
end
end
# intialize and finalize function???
# MPI ???
function mumps_sym(sym::Symbol)
if sym == :symmetric
return MUMPS.mumps_symmetric
elseif sym == :unsymmetric
return MUMPS.mumps_unsymmetric
elseif sym == :definite
return MUMPS.mumps_definite
else
error("this symmetry symbol is not understood")
end
end
function create_mumps_factor(solver::linear_solver_MUMPS)
cntl = MUMPS.default_cntl64[:]; # copy
cntl[1] = 0.01
icntl = MUMPS.default_icntl[:]; # copy
icntl[4] = 1;
icntl[10] = 1; # 2 iterative refinement steps
icntl[14] = 200.0 #1000.0;
return MUMPS.Mumps{Float64}(mumps_sym(solver.sym), icntl, cntl ); # Real, general unsymmetric
end
function create_mumps_factor2(solver::linear_solver_MUMPS)
icntl = get_icntl(verbose=false);
return MUMPS.Mumps{Float64}(mumps_sym(solver.sym), icntl, MUMPS.default_cntl64); # Real, general unsymmetric
end
function initialize!(solver::linear_solver_MUMPS)
if ~MPI.Initialized()
MPI.Init()
end
solver._factor = create_mumps_factor2(solver)
end
function finalize!(solver::linear_solver_MUMPS)
MUMPS.finalize(solver._factor);
end
function ls_factor!(solver::linear_solver_MUMPS, SparseMatrix::SparseMatrixCSC{Float64,Int64}, n::Int64, m::Int64, timer::class_advanced_timer)
#if
return ls_factor_LBL!(solver, SparseMatrix, n, m, timer)
end
# A-matrix
function ls_factor_LBL!(solver::linear_solver_MUMPS, SparseMatrix::SparseMatrixCSC{Float64,Int64}, n::Int64, m::Int64, timer::class_advanced_timer)
start_advanced_timer(timer,"MUMPS")
@assert(size(SparseMatrix,1) == n + m)
@assert(size(SparseMatrix,2) == n + m)
solver.M = deepcopy(SparseMatrix)
start_advanced_timer(timer,"MUMPS/associate_matrix")
associate_matrix!(solver._factor, solver.M);
pause_advanced_timer(timer,"MUMPS/associate_matrix")
start_advanced_timer(timer,"MUMPS/factorize")
factorize!(solver._factor);
pause_advanced_timer(timer,"MUMPS/factorize")
zero_eigs = solver._factor.infog[1] == -10 || solver._factor.infog[1] == -2
pause_advanced_timer(timer,"MUMPS")
if solver._factor.infog[12] > m
return -1
end
if zero_eigs
return 0
end
return 1
end
function ls_solve(solver::linear_solver_MUMPS, my_rhs::AbstractArray, timer::class_advanced_timer)
start_advanced_timer(timer,"MUMPS")
start_advanced_timer(timer,"MUMPS/solve")
if solver.safe_mode
MUMPS.associate_matrix!(solver._factor, solver.M);
MUMPS.factorize!(solver._factor);
end
associate_rhs!(solver._factor, reshape(my_rhs,length(my_rhs),1))
solve!(solver._factor)
sol = deepcopy(get_solution(solver._factor))
#sol = solve(solver._factor, my_rhs)
pause_advanced_timer(timer,"MUMPS/solve")
pause_advanced_timer(timer,"MUMPS")
return sol[:]
end
|
module Main
counts : String -> (Nat, Nat)
counts str = (length (words str), length str)
main : IO ()
main = repl "Enter a string: " show_counts
where
show_counts : String -> String
show_counts x = show (counts x) ++ "\n"
|
[STATEMENT]
lemma ftype_preservation'':
"\<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path;
find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk>
\<Longrightarrow> ftype_in_path_f P path f = Some ty"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> ftype_in_path_f P path f = Some ty
[PROOF STEP]
apply(cut_tac ftype_preservation'''[rule_format, of _ _ _ _ _ _ "[]"])
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty; ftype_in_path_f ?P1 ?suffix1 ?f1 = Some ?ty1\<rbrakk> \<Longrightarrow> ftype_in_path_f P path f = Some ty
2. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> find_path_f ?P1 ?ctx'1 ?cl1 = Some ?suffix1
3. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> wf_program ?P1
4. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> (?ctx1, ?cld1) \<in> set ?suffix1
5. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> find_path_rec_f ?P1 ?ctx1 (cl_fqn (fqn_def (class_name_f ?cld1))) [] = Some ([] @ ?suffix'1) \<and> ftype_in_path_f ?P1 ?suffix'1 ?f1 = Some ?ty1
[PROOF STEP]
apply(assumption+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some ([] @ ?suffix'1) \<and> ftype_in_path_f P ?suffix'1 f = Some ty
[PROOF STEP]
apply(unfold find_path_f_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>find_path_rec_f P ctx' cl [] = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some ([] @ ?suffix'1) \<and> ftype_in_path_f P ?suffix'1 f = Some ty
[PROOF STEP]
apply(rule)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>find_path_rec_f P ctx' cl [] = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some ([] @ ?suffix'1)
2. \<lbrakk>find_path_rec_f P ctx' cl [] = Some path; wf_program P; (ctx, cld) \<in> set path; find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) [] = Some path'; ftype_in_path_f P path' f = Some ty\<rbrakk> \<Longrightarrow> ftype_in_path_f P ?suffix'1 f = Some ty
[PROOF STEP]
apply(simp+)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Another of my longstanding desires for iOS is interactive (what some call actionable) notifications, which takes interface from being pull �*I have to go find what I want to do �*to push �the system brings what I want to do right to me. Home screens, widgets, apps are all pull interface. I have to go to switch out of what I'm doing in order to go do something else. Interactive notifications are push interface. No matter what I'm doing, they come right to me. Depending on implementation and settings, that can be convenient or annoying, but it's inarguably powerful. OS X Mavericks got it last year. It'd be great of iOS 8 got interactive notifications this year. |
[STATEMENT]
lemma sound_set_fold:
assumes "set ss \<subseteq> closure S" and "ss \<noteq> []"
shows "supremum (set ss) \<in> closure S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. supremum (set ss) \<in> closure S
[PROOF STEP]
using sound_fold[OF assms]
[PROOF STATE]
proof (prove)
using this:
fold f ss e \<in> closure S
goal (1 subgoal):
1. supremum (set ss) \<in> closure S
[PROOF STEP]
by (auto simp: cl.fold_set_fold) |
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
prelude
import Init.Data.Nat.Div
import Init.Data.Nat.Bitwise
import Init.Coe
open Nat
namespace Fin
instance coeToNat : CoeOut (Fin n) Nat :=
⟨fun v => v.val⟩
def elim0.{u} {α : Sort u} : Fin 0 → α
| ⟨_, h⟩ => absurd h (not_lt_zero _)
def succ : Fin n → Fin n.succ
| ⟨i, h⟩ => ⟨i+1, Nat.succ_lt_succ h⟩
variable {n : Nat}
protected def ofNat {n : Nat} (a : Nat) : Fin n.succ :=
⟨a % (n+1), Nat.mod_lt _ (Nat.zero_lt_succ _)⟩
protected def ofNat' {n : Nat} (a : Nat) (h : n > 0) : Fin n :=
⟨a % n, Nat.mod_lt _ h⟩
private theorem mlt {b : Nat} : {a : Nat} → a < n → b % n < n
| 0, h => Nat.mod_lt _ h
| _+1, h =>
have : n > 0 := Nat.lt_trans (Nat.zero_lt_succ _) h;
Nat.mod_lt _ this
protected def add : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a + b) % n, mlt h⟩
protected def mul : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a * b) % n, mlt h⟩
protected def sub : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a + (n - b)) % n, mlt h⟩
/-!
Remark: mod/div/modn/land/lor can be defined without using (% n), but
we are trying to minimize the number of Nat theorems
needed to boostrap Lean.
-/
protected def mod : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a % b) % n, mlt h⟩
protected def div : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a / b) % n, mlt h⟩
def modn : Fin n → Nat → Fin n
| ⟨a, h⟩, m => ⟨(a % m) % n, mlt h⟩
def land : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(Nat.land a b) % n, mlt h⟩
def lor : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(Nat.lor a b) % n, mlt h⟩
def xor : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(Nat.xor a b) % n, mlt h⟩
def shiftLeft : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a <<< b) % n, mlt h⟩
def shiftRight : Fin n → Fin n → Fin n
| ⟨a, h⟩, ⟨b, _⟩ => ⟨(a >>> b) % n, mlt h⟩
instance : Add (Fin n) where
add := Fin.add
instance : Sub (Fin n) where
sub := Fin.sub
instance : Mul (Fin n) where
mul := Fin.mul
instance : Mod (Fin n) where
mod := Fin.mod
instance : Div (Fin n) where
div := Fin.div
instance : AndOp (Fin n) where
and := Fin.land
instance : OrOp (Fin n) where
or := Fin.lor
instance : Xor (Fin n) where
xor := Fin.xor
instance : ShiftLeft (Fin n) where
shiftLeft := Fin.shiftLeft
instance : ShiftRight (Fin n) where
shiftRight := Fin.shiftRight
instance : OfNat (Fin (no_index (n+1))) i where
ofNat := Fin.ofNat i
instance : Inhabited (Fin (no_index (n+1))) where
default := 0
theorem val_ne_of_ne {i j : Fin n} (h : i ≠ j) : val i ≠ val j :=
fun h' => absurd (eq_of_val_eq h') h
theorem modn_lt : ∀ {m : Nat} (i : Fin n), m > 0 → (modn i m).val < m
| _, ⟨_, _⟩, hp => Nat.lt_of_le_of_lt (mod_le _ _) (mod_lt _ hp)
theorem val_lt_of_le (i : Fin b) (h : b ≤ n) : i.val < n :=
Nat.lt_of_lt_of_le i.isLt h
end Fin
instance [GetElem cont Nat elem dom] : GetElem cont (Fin n) elem fun xs i => dom xs i where
getElem xs i h := getElem xs i.1 h
macro_rules
| `(tactic| get_elem_tactic_trivial) => `(tactic| apply Fin.val_lt_of_le; get_elem_tactic_trivial; done)
|
\documentclass[12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{float}
\usepackage{amsmath}
\usepackage[hmargin=3cm,vmargin=6.0cm]{geometry}
%\topmargin=0cm
\topmargin=-2cm
\addtolength{\textheight}{6.5cm}
\addtolength{\textwidth}{2.0cm}
%\setlength{\leftmargin}{-5cm}
\setlength{\oddsidemargin}{0.0cm}
\setlength{\evensidemargin}{0.0cm}
\newcommand{\HRule}{\rule{\linewidth}{1mm}}
%misc libraries goes here
\usepackage{tikz}
\usetikzlibrary{automata,positioning}
\begin{document}
\noindent
\HRule \\[3mm]
\begin{flushright}
\LARGE \textbf{CENG 222} \\[4mm]
\Large Statistical Methods for Computer Engineering \\[4mm]
\normalsize Spring '2018-2019 \\
\Large Homework 3 \\
\end{flushright}
\HRule
\section*{Student Information }
%Write your full name and id number between the colon and newline
%Put one empty space character after colon and before newline
Full Name : Yavuz Selim Yesilyurt \\
Id Number : 2259166
% Write your answers below the section tags
\section*{Answer a}
I have conducted a Monte Carlo study using Matlab, after which I have used this study for estimating the probability that the total weight of all vehicles that pass over the bridge in the village in a day is more than 220 tons, for estimating expected weight and calculating the standard deviation of it. \\
To conduct such a study I have first used Normal approximation with $\alpha = 0.01$ and $\epsilon = 0.02$, namely (since no estimator for $p$ has been given I have directly used the following):
\begin{align*}
N &\geq 0.25(\frac{z_{\alpha/2}}{\epsilon})^2 \\
&= 0.25(\frac{2.575}{0.02})^2 \\
&\approx 4144
\end{align*}
I have created some variables for holding the values of distribution parameters and I have also created a vector named $TotalWeight$ for keeping the total weight of vehicles that use the bridge for each Monte Carlo run and initialized it to 0 for all $N$.\\
Next, to find number of vehicles for each type, I have generated samples ($NMotors$, $NCars$ and $NTrucks$) for all vehicles with their corresponding Poisson parameters using sampling from Poisson. \\
Then, to find weights of each vehicle according to its type, I have used the samples that correspond to numbers for each type of vehicles together with their corresponding Gamma parameters. With this way I was able to generate the sample weights for all vehicles ($WMotors$, $WCars$ and $WTrucks$) and after summing them up at the end I have calculated the total weight for 1 Monte Carlo run and filled the corresponding place in my $TotalWeight$ vector. I have repeated this study $N=4144$ times and filled the $TotalWeight$ vector accordingly. \\
For the answer of \textit{part a}; after construction of $TotalWeight$ vector with desired Monte Carlo runs, I have calculated the \textit{mean} of the proportion of runs with the total weight more than 220 tons. With this way I have estimated the probability that the total weight of all the vehicles that pass over the bridge in a day is more than 220 tons; in other words, I have found our estimator for the desired probability. \\
I have simulated my solution in Octave Online a number of times and I was able to determine that my estimated probability is always in between 0.35 and 0.38 (But in general 0.36). I share a sample output (which I will refer in other parts of the answer) in below:
\begin{center}
Estimated probability = 0.364865 \\
Expected weight = 208441.367130 \\
Standard deviation = 38401.600168
\end{center}
\section*{Answer b}
For estimation of the total weight of all the vehicles that pass over the bridge in a day $X$, I have simply got the \textit{mean} of $TotalWeight$ and found the Expected weight. Expected weight for a sample simulation can be seen from the sample output shared in part a.
\section*{Answer c}
For estimation of $Std(X)$, I have simply got the \textit{std} of $TotalWeight$ and found the Standard deviation of $X$. Standard deviation for a sample simulation can be seen from the sample output shared in part a.\\
Since initially we have created a Monte Carlo study with size $N$ that attains our desired accuracy ($\alpha = 0.01$ and $\epsilon = 0.02$), We have guaranteed a Monte Carlo study of size $N$ with an error not exceeding $\epsilon$ with high probability $(1-\alpha)$ and created an estimator $X$ with that accuracy.
\end{document}
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Data.Empty.Base where
open import Cubical.Core.Everything
data ⊥ : Type₀ where
rec : ∀ {ℓ} {A : Type ℓ} → ⊥ → A
rec ()
elim : ∀ {ℓ} {A : ⊥ → Type ℓ} → (x : ⊥) → A x
elim ()
|
/******************************************************************************
* Author(s): Christopher J. Havlicek
*
* See LICENSE and CONTACTS.
******************************************************************************/
#include "../../kmap.hpp"
#include "../master.hpp"
#include <boost/test/unit_test.hpp>
namespace utf = boost::unit_test;
namespace kmap::test {
BOOST_AUTO_TEST_SUITE( kmap_iface )
// TODO: Test negative cases to (those that result in ECs).
// TODO: Decide whether or not node creation belongs in "node_manip". I would say that it does.
BOOST_AUTO_TEST_CASE( create_node
,
* utf::fixture< ClearMapFixture >() )
{
auto& kmap = Singleton::instance();
BOOST_TEST( kmap.exists( "/" ) );
BOOST_TEST( kmap.fetch_children( kmap.root_node_id() ).size() == 1 );
auto const c1 = kmap.create_child( kmap.root_node_id()
, "1" );
BOOST_REQUIRE( c1 );
BOOST_TEST( kmap.exists( c1.value() ) );
BOOST_TEST( kmap.exists( "/1" ) );
BOOST_TEST( kmap.fetch_children( kmap.root_node_id() ).size() == 2 );
BOOST_TEST( kmap.selected_node() == kmap.root_node_id() );
auto const c2 = kmap.create_child( kmap.root_node_id()
, "2" );
BOOST_REQUIRE( c2 );
BOOST_TEST( kmap.exists( c2.value() ) );
BOOST_TEST( kmap.exists( "/2" ) );
BOOST_TEST( kmap.fetch_children( kmap.root_node_id() ).size() == 3 );
auto const c3 = kmap.create_child( c1.value()
, "3" );
BOOST_REQUIRE( c3 );
BOOST_TEST( kmap.exists( c3.value() ) );
BOOST_TEST( kmap.exists( "/1.3" ) );
BOOST_TEST( kmap.fetch_children( *kmap.fetch_leaf( "/1" ) ).size() == 1 );
}
BOOST_AUTO_TEST_SUITE_END( /* kmap_iface */ )
} // namespace kmap::test |
/*****************************************************************
*
* This file is part of the Botmark benchmark.
*
* Copyright (c) 2019 Andrew Murtagh, Patrick Lynch,
* and Conor McGinn.
*
* This work is licensed under the "Creative Commons
* (Attribution-NonCommercial-ShareAlike 4.0 International)
* License" and is copyrighted by Andrew Murtagh, Patrich Lynch,
* and Conor McGinn.
*
* To view a copy of this license, visit
* http://creativecommons.org/licenses/by-nc-sa/4.0/ or
* send a letter to Creative Commons, PO Box 1866,
* Mountain View, CA 94042, USA.
*
* Botmark is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE.
*
*****************************************************************/
#include <iostream>
#include <time.h>
#include <vector>
#include <memory>
#include <caffe/caffe.hpp>
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif // USE_OPENCV
#include <algorithm>
#include <iosfwd>
#include <string>
#include <utility>
#include <vector>
#include <Eigen/Geometry>
#include <pcl/common/transforms.h>
#include <pcl/point_cloud.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/common/common.h>
#include <pcl/filters/passthrough.h>
#include <pcl/search/kdtree.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/segmentation/extract_clusters.h>
#include "botmarkcommon.h"
using namespace caffe;
using std::string;
const float KINECT_FOCAL_LENGTH = 575.8157496f;
const cv::Scalar RED(0, 0, 255);
const cv::Scalar BLUE(255, 0, 0);
const float APRON = 1.1f;
/* Pair (label, confidence) representing a prediction. */
typedef std::pair<string, float> Prediction;
typedef std::vector<boost::filesystem::path> path_vector;
struct timespec start_time;
struct timespec end_time;
float time_accumulator=0.0f;
class Classifier {
public:
Classifier(const string& model_file,
const string& trained_file,
const string& mean_file,
const string& label_file);
std::vector<Prediction> Classify(const cv::Mat& img, int N = 5);
private:
void SetMean(const string& mean_file);
std::vector<float> Predict(const cv::Mat& img);
void WrapInputLayer(std::vector<cv::Mat>* input_channels);
void Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels);
private:
shared_ptr<Net<float> > net_;
cv::Size input_geometry_;
int num_channels_;
cv::Mat mean_;
std::vector<string> labels_;
};
Classifier::Classifier(const string& model_file,
const string& trained_file,
const string& mean_file,
const string& label_file) {
Caffe::set_mode(Caffe::CPU);
/* Load the network. */
net_.reset(new Net<float>(model_file, TEST));
net_->CopyTrainedLayersFrom(trained_file);
CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";
Blob<float>* input_layer = net_->input_blobs()[0];
num_channels_ = input_layer->channels();
CHECK(num_channels_ == 3 || num_channels_ == 1)
<< "Input layer should have 1 or 3 channels.";
input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
/* Load the binaryproto mean file. */
SetMean(mean_file);
/* Load labels. */
std::ifstream labels(label_file.c_str());
CHECK(labels) << "Unable to open labels file " << label_file;
string line;
while (std::getline(labels, line))
labels_.push_back(string(line));
Blob<float>* output_layer = net_->output_blobs()[0];
CHECK_EQ(labels_.size(), output_layer->channels())
<< "Number of labels is different from the output layer dimension.";
}
static bool PairCompare(const std::pair<float, int>& lhs,
const std::pair<float, int>& rhs) {
return lhs.first > rhs.first;
}
/* Return the indices of the top N values of vector v. */
static std::vector<int> Argmax(const std::vector<float>& v, int N) {
std::vector<std::pair<float, int> > pairs;
for (size_t i = 0; i < v.size(); ++i)
pairs.push_back(std::make_pair(v[i], i));
std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);
std::vector<int> result;
for (int i = 0; i < N; ++i)
result.push_back(pairs[i].second);
return result;
}
/* Return the top N predictions. */
std::vector<Prediction> Classifier::Classify(const cv::Mat& img, int N) {
std::vector<float> output = Predict(img);
N = std::min<int>(labels_.size(), N);
std::vector<int> maxN = Argmax(output, N);
std::vector<Prediction> predictions;
for (int i = 0; i < N; ++i) {
int idx = maxN[i];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
}
return predictions;
}
/* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file) {
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
/* Convert from BlobProto to Blob<float> */
Blob<float> mean_blob;
mean_blob.FromProto(blob_proto);
CHECK_EQ(mean_blob.channels(), num_channels_)
<< "Number of channels of mean file doesn't match input layer.";
/* The format of the mean file is planar 32-bit float BGR or grayscale. */
std::vector<cv::Mat> channels;
float* data = mean_blob.mutable_cpu_data();
for (int i = 0; i < num_channels_; ++i) {
/* Extract an individual channel. */
cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
channels.push_back(channel);
data += mean_blob.height() * mean_blob.width();
}
/* Merge the separate channels into a single image. */
cv::Mat mean;
cv::merge(channels, mean);
/* Compute the global mean pixel value and create a mean image
* filled with this value. */
cv::Scalar channel_mean = cv::mean(mean);
mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
}
std::vector<float> Classifier::Predict(const cv::Mat& img) {
Blob<float>* input_layer = net_->input_blobs()[0];
input_layer->Reshape(1, num_channels_,
input_geometry_.height, input_geometry_.width);
/* Forward dimension change to all layers. */
net_->Reshape();
std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels);
Preprocess(img, &input_channels);
net_->Forward();
/* Copy the output layer to a std::vector */
Blob<float>* output_layer = net_->output_blobs()[0];
const float* begin = output_layer->cpu_data();
const float* end = begin + output_layer->channels();
return std::vector<float>(begin, end);
}
void Classifier::WrapInputLayer(std::vector<cv::Mat>* input_channels) {
Blob<float>* input_layer = net_->input_blobs()[0];
int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = 0; i < input_layer->channels(); ++i) {
cv::Mat channel(height, width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
}
void Classifier::Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels) {
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == 3 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == 4 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry_)
cv::resize(sample, sample_resized, input_geometry_);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels_ == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
cv::Mat sample_normalized;
cv::subtract(sample_float, mean_, sample_normalized);
cv::split(sample_normalized, *input_channels);
CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
== net_->input_blobs()[0]->cpu_data())
<< "Input channels are not wrapping the input layer of the network.";
}
int main(int argc, char** argv) {
std::cout << "Botmark version: " << Botmark_VERSION_MAJOR << "." <<
Botmark_VERSION_MINOR << "." << Botmark_VERSION_PATCH << std::endl;
if(BENCHMARK_RUNS < 4) {
std::cout << "[Botmark Error] Number of runs is less than 4, please change in botmarkcommon.h" << std::endl << std::endl;
return -1;
}
std::cout << "Starting Object Recognition Workload" << std::endl << std::endl;
std::ofstream results_file;
results_file.open(RESULTS_FILE.c_str(), std::ofstream::app);
if(!results_file) {
std::cout << "[Botmark Error] Failed to open results file." << std::endl;
return -1;
}
double times[BENCHMARK_RUNS];
//init
//disable logging - Caffe is pretty chatty
::google::InitGoogleLogging(argv[0]);
//initialise directory iterators
path_vector rgb_path_vec, pc_path_vec;
copy(boost::filesystem::directory_iterator(OBJECT_RECOGNITION_DATA_PATH+"rgb/"),
boost::filesystem::directory_iterator(),
back_inserter(rgb_path_vec));
copy(boost::filesystem::directory_iterator(OBJECT_RECOGNITION_DATA_PATH+"point_clouds/"),
boost::filesystem::directory_iterator(),
back_inserter(pc_path_vec));
path_vector::const_iterator rgb_iterator(rgb_path_vec.begin());
path_vector::const_iterator pc_iterator(pc_path_vec.begin());
sort(rgb_path_vec.begin(), rgb_path_vec.end());
sort(pc_path_vec.begin(), pc_path_vec.end());
pcl::PointCloud<pcl::PointXYZ>::Ptr original_cloud(new pcl::PointCloud<pcl::PointXYZ> ()),
two_cloud(new pcl::PointCloud<pcl::PointXYZ>),
final_cloud(new pcl::PointCloud<pcl::PointXYZ>);
pcl::PCDReader reader;
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer;
if(VISUALISE_MODE) {
viewer = boost::make_shared<pcl::visualization::PCLVisualizer>("Object Recognition Viewer");
viewer->setBackgroundColor(0.05, 0.05, 0.05, 0);
viewer->setCameraPosition(0, -0.1, -2.5, 0, -1, 0);
}
Classifier classifier(CAFFE_MODEL_FILE, CAFFE_TRAINED_FILE, CAFFE_MEAN_FILE, CAFFE_LABEL_FILE);
//start benchmarking runs
for(int run=0; run<BENCHMARK_RUNS; run++) {
if(VERBOSE_MODE)
std::cout << "Run: " << run+1 << " of " << BENCHMARK_RUNS << std::endl;
//cycle over frames
for(int i=0; i<OBJECT_RECOGNITION_FRAMES; i++) {
// read in data
if(reader.read<pcl::PointXYZ> ((*pc_iterator).string(), *original_cloud) != 0) {
std::cout << "[Botmark Error] Failed to open file." << std::endl;
return -1;
}
string rgb_filename = (*rgb_iterator).string();
cv::Mat img = cv::imread(rgb_filename, cv::IMREAD_COLOR);
//start clock
if(clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
std::cout << "[Botmark Error] Can't use clock timing routine." << std::endl;
return -1;
}
//pass through filter on pc
pcl::PassThrough<pcl::PointXYZ> pass;
pass.setFilterFieldName("z");
pass.setFilterLimits(0.0, 1.2);
pass.setInputCloud(original_cloud);
pass.filter(*two_cloud);
//remove most 'prominent' plane
pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients);
pcl::SACSegmentation<pcl::PointXYZ> segmentation;
segmentation.setInputCloud(two_cloud);
segmentation.setModelType(pcl::SACMODEL_PLANE);
segmentation.setMethodType(pcl::SAC_RANSAC);
segmentation.setDistanceThreshold(0.015);
segmentation.setOptimizeCoefficients(true);
pcl::PointIndices::Ptr planeIndices(new pcl::PointIndices);
segmentation.segment(*planeIndices, *coefficients);
if(planeIndices->indices.size() == 0) {
std::cout << "[Botmark Error] Could not find a plane in the scene." << std::endl;
} else {
// Copy the points of the plane to a new cloud.
pcl::ExtractIndices<pcl::PointXYZ> extract;
extract.setInputCloud(two_cloud);
extract.setNegative(true);
extract.setIndices(planeIndices);
extract.filter(*final_cloud);
}
// cluster remaining pc
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZ>);
tree->setInputCloud(final_cloud);
std::vector<pcl::PointIndices> cluster_indices;
pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
ec.setClusterTolerance(0.04);
ec.setMinClusterSize(100);
ec.setMaxClusterSize(25000);
ec.setSearchMethod(tree);
ec.setInputCloud(final_cloud);
ec.extract(cluster_indices);
int centerX = img.cols/2;
int centerY = img.rows/2;
//cycle over clusters
std::vector<cv::Rect> object_rects;
int j = 0;
for(std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it) {
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster(new pcl::PointCloud<pcl::PointXYZ>);
for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit) {
cloud_cluster->points.push_back(final_cloud->points[*pit]);
}
cloud_cluster->width = cloud_cluster->points.size();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
std::stringstream stream;
stream << j;
std::string shape_name = "shape_" + stream.str();
pcl::PointXYZ proj_min;
pcl::PointXYZ proj_max;
pcl::getMinMax3D(*cloud_cluster, proj_min, proj_max);
if(proj_min.x > 0) {
proj_min.z = proj_max.z;
proj_max.z = proj_min.z;
}
Eigen::Vector4f centroid;
pcl::compute3DCentroid(*cloud_cluster, centroid);
Eigen::Vector3f translation = centroid.head<3>();
Eigen::Quaternionf rotation(0, 0, 0, 0);
if(VISUALISE_MODE) {
viewer->addCube(translation, rotation, proj_max.x-proj_min.x, proj_max.y-proj_min.y, proj_max.z-proj_min.z, shape_name);
}
object_rects.push_back(cv::Rect(
cv::Point((proj_max.x)*(KINECT_FOCAL_LENGTH/proj_max.z)+centerX, (proj_max.y)*(KINECT_FOCAL_LENGTH/proj_max.z)+centerY),
cv::Point((proj_min.x)*(KINECT_FOCAL_LENGTH/proj_min.z)+centerX, (proj_min.y)*(KINECT_FOCAL_LENGTH/proj_min.z)+centerY))
);
//visualisation
if(VISUALISE_MODE) {
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> cluster_cloud_color_handler (cloud_cluster, 100 + (std::rand() % (255 - 100 + 1)), 100 + (std::rand() % (255 - 100 + 1)), 100 + (std::rand() % (255 - 100 + 1)));
//std::stringstream stream;
stream << j;
std::string name = "cloud_" + stream.str();
viewer->addPointCloud(cloud_cluster, cluster_cloud_color_handler, name);
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, name);
}
j++;
} // loop over clusters
for(size_t i = 0; i < object_rects.size(); i++) {
cv::Size deltaSize(object_rects[i].width*APRON, object_rects[i].height*APRON );
cv::Point offset(deltaSize.width/2, deltaSize.height/2);
object_rects[i] += deltaSize;
object_rects[i] -= offset;
object_rects[i] &= cv::Rect(cv::Point(0, 0), img.size()); //get intersection to ensure the r.o.i. is in bounds
}
/*** caffe ***/
for(size_t i = 0; i < object_rects.size(); i++) {
cv::Mat this_object_image = img(object_rects[i]);
CHECK(!this_object_image.empty()) << "Error decoding image " << rgb_filename;
std::vector<Prediction> predictions = classifier.Classify(this_object_image);
Prediction p = predictions[0];
if(VERBOSE_MODE) {
std::cout << "predicted: " << std::fixed << std::setprecision(4) << p.second << " - \"" << p.first << "\"" << std::endl;
}
if(VISUALISE_MODE) {
cv::rectangle(img, object_rects[i], RED, 2);
cv::putText(img, p.first, cv::Point(object_rects[i].x-10, object_rects[i].y-10), cv::FONT_HERSHEY_PLAIN, 1.0, RED);
}
}
//stop clock
if(clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
std::cout << "[Botmark Error] Can't use clock timing routine." << std::endl;
return -1;
}
//calculate time
double start_milli = (double) 1.0e3*start_time.tv_sec + 1.0e-6*start_time.tv_nsec;
double end_milli = (double) 1.0e3*end_time.tv_sec + 1.0e-6*end_time.tv_nsec;
time_accumulator += end_milli - start_milli;
if(VERBOSE_MODE) {
std::cout << "Elapsed time [ms]: " << end_milli - start_milli << std::endl << std::endl;
}
if(VISUALISE_MODE) {
viewer->spinOnce();
cv::namedWindow("Recognised objects", cv::WINDOW_AUTOSIZE);
cv::imshow("Recognised objects", img);
cv::waitKey(1);
viewer->removeAllPointClouds();
viewer->removeAllShapes();
}
++rgb_iterator;
++pc_iterator;
} // end loop over frames
float avr_time_per_frame = time_accumulator/OBJECT_RECOGNITION_FRAMES;
if(VERBOSE_MODE) {
std::cout << "Frame rate [Hz]: " << 1000.0f/avr_time_per_frame << std::endl << std::endl;
}
times[run] = 1000.0f/avr_time_per_frame;
rgb_iterator = rgb_path_vec.begin();
pc_iterator = pc_path_vec.begin();
time_accumulator = 0;
} //end benchmarking runs
//compute and output statistics
run_stats_s stats = computeStats(times, BENCHMARK_RUNS);
if(VERBOSE_MODE) {
std::cout << "Statistics" << std::endl;
std::cout << "Min. frame rate: " << stats.min << " [ms]" << std::endl;
std::cout << "Max. frame rate: " << stats.max << " [ms]" << std::endl;
std::cout << "Range: " << stats.range << " [ms]" << std::endl;
std::cout << "Mean frame rate: " << stats.mean << " [ms]" << std::endl;
std::cout << "1st quartile: " << stats.first_quartile << " [ms]" << std::endl;
std::cout << "Median: " << stats.median << " [ms]" << std::endl;
std::cout << "3rd quartile: " << stats.third_quartile << " [ms]" << std::endl;
std::cout << "IQR: " << stats.iqr << " [ms]" << std::endl;
std::cout << "Pop. Standard Deviation: " << stats.std_dev << " [ms]" << std::endl;
std::cout << "Writing Results to File." << std::endl << std::endl;
}
//write results to file
results_file << "#Results from Object Recognition workload\n";
results_file << "Metric: time to recognise objects in a frame\n";
results_file << "Runs: " << BENCHMARK_RUNS << "\n";
results_file << "Unit: [Hz]\n\n";
results_file << "##Raw frame rates:\n";
for(int j=0; j<BENCHMARK_RUNS; j++) {
results_file << "run: " << j+1 << ",\t\t\tframe rate: " << times[j] << " [Hz]\n";
}
results_file << "\n##Statistics:\n";
results_file << "Min. frame rate: \t\t\t" << stats.min << " [Hz]\n";
results_file << "Max. frame rate: \t\t\t" << stats.max << " [Hz]\n";
results_file << "Range: \t\t\t\t\t\t" << stats.range << " [Hz]\n";
results_file << "Mean frame rate: \t\t\t" << stats.mean << " [Hz]\n";
results_file << "1st quartile: \t\t\t\t" << stats.first_quartile << " [Hz]\n";
results_file << "Median: \t\t\t\t\t" << stats.median << " [Hz]\n";
results_file << "3rd quartile: \t\t\t\t" << stats.third_quartile << " [Hz]\n";
results_file << "IQR: \t\t\t\t\t\t" << stats.iqr << " [Hz]\n";
results_file << "Pop. Standard Deviation: \t" << stats.std_dev << " [Hz]\n";
results_file << "\n\n\n";
results_file.close();
std::cout << "Ending Object Recognition Workload" << std::endl << std::endl;
return 0;
}
|
# syntax: proto3
using ProtoBuf
import ProtoBuf.meta
import ProtoBuf.google.protobuf
mutable struct Value_Size <: ProtoType
width::Int64
height::Int64
Value_Size(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Value_Size
mutable struct ArrayValue <: ProtoType
values::Base.Any
ArrayValue(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct ArrayValue (has cyclic type dependency)
const __ftype_ArrayValue = Dict(:values => "Base.Vector{Value}")
meta(t::Type{ArrayValue}) = meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, ProtoBuf.DEF_VAL, true, ProtoBuf.DEF_PACK, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES, __ftype_ArrayValue)
mutable struct Value <: ProtoType
boolean_value::Bool
integer_value::Int64
double_value::Float64
entity_value::Base.Any
array_value::ArrayValue
timestamp_value::ProtoBuf.google.protobuf.Timestamp
string_value::AbstractString
blob_value::Array{UInt8,1}
size_value::Value_Size
Value(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Value (has cyclic type dependency)
const __fnum_Value = Int[1,2,3,6,9,10,17,18,19]
const __ftype_Value = Dict(:entity_value => "Entity")
const __oneofs_Value = Int[1,1,1,1,1,1,1,1,1]
const __oneof_names_Value = [Symbol("value_type")]
meta(t::Type{Value}) = meta(t, ProtoBuf.DEF_REQ, __fnum_Value, ProtoBuf.DEF_VAL, true, ProtoBuf.DEF_PACK, ProtoBuf.DEF_WTYPES, __oneofs_Value, __oneof_names_Value, __ftype_Value)
mutable struct Entity_PropertiesEntry <: ProtoType
key::AbstractString
value::Value
Entity_PropertiesEntry(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Entity_PropertiesEntry (mapentry) (has cyclic type dependency)
mutable struct Entity <: ProtoType
properties::Base.Dict # map entry
Entity(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Entity (has cyclic type dependency)
const __ftype_Entity = Dict(:properties => "Base.Dict{AbstractString,Value}")
meta(t::Type{Entity}) = meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, ProtoBuf.DEF_VAL, true, ProtoBuf.DEF_PACK, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES, __ftype_Entity)
export ArrayValue, Value_Size, Value, Entity_PropertiesEntry, Entity, ArrayValue, Value, Entity_PropertiesEntry, Entity
# mapentries: "Entity_PropertiesEntry" => ("AbstractString", "Value")
|
\chapter{The \caps{ABAQUS} Output File Format} \label{appx:abaqus}
The \caps{ABAQUS} file format is described in the \caps{ABAQUS} Manual
Section 10. However, the manual is unclear as to the order of the record
types. This appendix defines the assumed record order determined by
examining actual \caps{ABAQUS} output files. These assumptions are
incorporated in \caps{\PROGRAM}, and if incorrect they may involve
program changes.
If the program encounters a record of an unexpected type, a warning is
printed and the record is ignored. Records that are out of order may
confuse the program.
The header records are in the following order:
\setlength{\itemsep}{\medskipamount} \begin{itemize}
\item a version record (1921) and a heading record (1922);
\item a series of element connectivity records (1900), which may not be
in numerical order;
\item a series of nodal coordinate records (1901), which may not be in
numerical order;
\item a start time steps record (1902), which is ignored; and
\item an end time step record (2001), which is ignored.
\end{itemize}
A series of time steps follow. The records for a time step are in the
following order:
\setlength{\itemsep}{\medskipamount} \begin{itemize}
\item a times record (2000) or an eigen record (?);
\item an output set request record (1911), which is ignored;
\item a series of element variable records (1..100), which may not be in
numerical order, but all variables for one element are together;
\item a series of nodal variable records (101..1000), which may not be in
numerical order, and all variables for a node may not be together;
\item an end time step record (2001), which is ignored.
\end{itemize}
|
""" Grid sampling (Only use for low dimensional spaces)"""
import itertools
import numpy as np
from verifai.samplers.domain_sampler import (BoxSampler, DiscreteBoxSampler,
DomainSampler, SplitSampler, IteratorSampler, TerminationException)
from verifai.samplers.random_sampler import RandomSampler
class GridSampler(IteratorSampler):
def __init__(self, domain, grid_params=None):
if grid_params is None:
grid_params = {}
self.cont_N = grid_params.get('N', 21)
repeat = grid_params.get('repeat', False)
super().__init__(domain, repeat=repeat)
cont_grid = lambda domain: ContinuousGridSampler(domain=domain,
N=self.cont_N)
disc_grid = lambda domain: DiscreteGridSampler(domain=domain)
partition = (
(lambda d: d.standardizedDimension >= 0, cont_grid),
(lambda d: d.standardizedIntervals, disc_grid)
)
self.split_sampler = SplitSampler.fromPartition(domain, partition)
self.cont_sampler, self.disc_sampler = \
self.split_sampler.samplersForPredicates
def __iter__(self):
while True:
for subpoints in itertools.product(*self.split_sampler.samplers):
yield self.domain.rejoinPoints(*subpoints)
if not self.repeat:
return
class ContinuousGridSampler(BoxSampler):
def __init__(self, domain, N):
super().__init__(domain)
D = self.domain.standardizedDimension
if isinstance(N, int):
self.N = np.ones(D) * N
else:
if len(N) != D:
raise RuntimeError(
f'grid specification for continuous space of dimension {D}'
f' has wrong length {len(N)}'
)
if not all(isinstance(k, int) and k > 0 for k in N):
raise RuntimeError(
f'grid specification {N} must consist of positive integers'
)
self.N = N
self.iters = 0
self.max_iters = np.prod(self.N)
def nextVector(self, feedback=None):
sample_vec = []
t = self.iters
if t == self.max_iters:
self.iters = 0
raise TerminationException('finished continuous grid sampling')
for k in self.N:
sample_vec.append(1./(k-1)*(t%k))
t = t//k
self.iters+=1
return tuple(sample_vec)
class DiscreteGridSampler(DiscreteBoxSampler):
def __init__(self, domain):
super().__init__(domain)
self.start_N = 1
self.iters = 0
self.max_iters = 1
for (left, right) in self.domain.standardizedIntervals:
self.max_iters *= (right - left) + 1
def nextVector(self, feedback=None):
if self.iters == self.max_iters:
self.iters = 0
raise TerminationException('finished discrete grid sampling')
sample_vec = []
t = self.iters
for (left, right) in self.domain.standardizedIntervals:
sample_vec.append(left + t%(right-left+1))
t = t//(right-left +1)
self.iters+=1
return tuple(sample_vec)
|
/* gsl_multifit_ndlinear.h
*
* Copyright (C) 2006, 2007 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_MULTIFIT_NDLINEAR_H__
#define __GSL_MULTIFIT_NDLINEAR_H__
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
typedef struct
{
size_t n_dim; /* dimension of fit function */
size_t *N; /* number of terms in fit sums N[i] = N_i */
size_t n_coeffs; /* number of fit coefficients */
gsl_vector *work; /* scratch array of size n_coeffs */
gsl_vector *work2; /* scratch array */
/*
* Views into the 'work' array which will be used to store
* the results of calling the basis functions, so that
* (v[i])_j = u^{(i)}_j(x_i)
*/
gsl_vector_view *v;
/* pointer to basis functions and parameters */
int (**u)(double x, double y[], void *p);
void *params;
} gsl_multifit_ndlinear_workspace;
/*
* Prototypes
*/
gsl_multifit_ndlinear_workspace *
gsl_multifit_ndlinear_alloc(size_t n, size_t N[],
int (**u)(double x, double y[], void *p),
void *params);
void gsl_multifit_ndlinear_free(gsl_multifit_ndlinear_workspace *w);
int gsl_multifit_ndlinear_design(const gsl_matrix *data, gsl_matrix *X,
gsl_multifit_ndlinear_workspace *w);
int gsl_multifit_ndlinear_est(const gsl_vector *x, const gsl_vector *c,
const gsl_matrix *cov, double *y,
double *y_err,
gsl_multifit_ndlinear_workspace *w);
double gsl_multifit_ndlinear_calc(const gsl_vector *x, const gsl_vector *c,
gsl_multifit_ndlinear_workspace *w);
size_t gsl_multifit_ndlinear_ncoeffs(gsl_multifit_ndlinear_workspace *w);
#endif /* __GSL_MULTIFIT_NDLINEAR_H__ */
|
[STATEMENT]
lemma section_map_eq:
"\<lbrakk>section_map X Y f; \<And>x. x \<in> topspace X \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> section_map X Y g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>section_map X Y f; \<And>x. x \<in> topspace X \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> section_map X Y g
[PROOF STEP]
unfolding section_map_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<exists>g. retraction_maps Y X g f; \<And>x. x \<in> topspace X \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> \<exists>ga. retraction_maps Y X ga g
[PROOF STEP]
using retraction_maps_eq
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>retraction_maps ?X ?Y ?f ?g; \<And>x. x \<in> topspace ?X \<Longrightarrow> ?f x = ?f' x; \<And>x. x \<in> topspace ?Y \<Longrightarrow> ?g x = ?g' x\<rbrakk> \<Longrightarrow> retraction_maps ?X ?Y ?f' ?g'
goal (1 subgoal):
1. \<lbrakk>\<exists>g. retraction_maps Y X g f; \<And>x. x \<in> topspace X \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> \<exists>ga. retraction_maps Y X ga g
[PROOF STEP]
by blast |
Formal statement is: lemma contour_integral_local_primitive_lemma: fixes f :: "complex\<Rightarrow>complex" assumes gpd: "g piecewise_differentiable_on {a..b}" and dh: "\<And>x. x \<in> S \<Longrightarrow> (f has_field_derivative f' x) (at x within S)" and gs: "\<And>x. x \<in> {a..b} \<Longrightarrow> g x \<in> S" shows "(\<lambda>x. f' (g x) * vector_derivative g (at x within {a..b})) integrable_on {a..b}" Informal statement is: If $g$ is piecewise differentiable on $[a,b]$ and $f$ is differentiable on $g([a,b])$, then the function $x \mapsto f'(g(x)) \cdot g'(x)$ is integrable on $[a,b]$. |
read "../ComputeIdentifiableFunctions.mpl":
cases := [
[
[[a * b, b], [a]],
[1 / a]
],
[
[[a + b, a * b], [b]],
[]
],
[
[[a * b + c, b], [a, c]],
[]
],
[
[[a * b, c * b, d * b], [a, c, d]],
[a / c, a / d]
],
[
[[a + b + c, a^2 + b^2 + c^2, a^3 + b^3 + c^3, c], [a, b]],
[a + b, a * b]
],
[
[[a + b, a * b, x, a * x + b * c], [a, b, c]],
[a + b, a * b]
]
]:
num_passed := 0:
num_failed := 0:
for case in cases do
input := case[1]:
correct := case[2]:
if IdealsEq(FieldIntersection(op(input)), FieldToIdeal(correct)) then
printf("PASSED\n");
num_passed := num_passed + 1:
else
printf("FAILED\n");
num_failed := num_failed + 1:
print("Expected: ", correct);
print("Got: ", FieldIntersection(op(input)));
end if:
end do:
printf("Passed: %a, failed %a \n", num_passed, num_failed);
|
include defs
# dsdbiu --- dump contents of block-in-use
subroutine dsdbiu (b, form)
pointer b
character form
DS_DECL(Mem, MEMSIZE)
integer l, s, lmax
string blanks " "
call putint (b, 5, ERROUT)
call putch (BLANK, ERROUT)
call putint (Mem (b + DS_SIZE), 0, ERROUT)
call remark (" words in use.")
l = 0
s = b + Mem (b + DS_SIZE)
if (form == DIGIT)
lmax = 5
else
lmax = 50
for (b = b + DS_OHEAD; b < s; b = b + 1) {
if (l == 0)
call putlin (blanks, ERROUT)
if (form == DIGIT)
call putint (Mem (b), 10, ERROUT)
elif (form == LETTER)
call putch (Mem (b), ERROUT)
l = l + 1
if (l >= lmax) {
l = 0
call putch (NEWLINE, ERROUT)
}
}
if (l != 0)
call putch (NEWLINE, ERROUT)
return
end
|
# Script to download metadata of NEON ITS sequences (DP1.10108.001)
# (next script) we'll use that metadata to access fungal sequence data from MG-RAST
# load packages and set file paths
rm(list=ls())
library(zoo)
library(neonUtilities)
# change to local file path
foldername <- "/usr3/graduate/zrwerbin/NEON_data/"
# set working directory to above file path
#setwd(foldername)
# list of our 5 sites
sites <- c("DSNY", "HARV", "OSBS", "CPER", "STER")
# first product: core-level sequence metadata. includes the following 5 products:
# mmg_soilDnaExtraction, mmg_soilMarkerGeneSequencing_16S, mmg_soilMarkerGeneSequencing_ITS,
# mmg_soilPcrAmplification_16S, mmg_soilPcrAmplification_ITS
# check if data has already been downloaded
if (!file.exists("filesToStack10108")) {
# loop through 5 sites to download data from each
for (s in 1:length(sites)){
zipsByProduct(dpID="DP1.10108.001", site=sites[s], package="expanded", check.size = T)
}
# combine them all into fewer files
stackByTable("filesToStack10108", folder = T)
}
# now let's format all this metadata!
pcr <- read.csv("filesToStack10108/stackedFiles/mmg_soilPcrAmplification_ITS.csv")
dna.data <- read.csv("filesToStack10108/stackedFiles/mmg_soilDnaExtraction.csv")
pcrITS <- pcr[which(pcr$targetGene=="ITS"),]
# merge the DNA data into the PCR data.
dna.merge <- dna.data[,!(colnames(dna.data) %in% colnames(pcrITS))]
dna.merge$dnaSampleID <- dna.data$dnaSampleID
dna.metadata <- merge(pcrITS,dna.merge, by="dnaSampleID", all = T)
dim(dna.metadata) # check dimensions
# remove some extra characters
dna.metadata$dateID <- substr(dna.metadata$collectDate,1,7)
dna.metadata$geneticSampleID <- as.character(dna.metadata$geneticSampleID)
dna.metadata$geneticSampleID <- substr(dna.metadata$geneticSampleID,1,nchar(dna.metadata$geneticSampleID)-4)
# save metadata
saveRDS(dna.metadata,"ITS_metadata.rds")
#get nested lsited of sites and dates sampled.
sites <- unique(dna.metadata$siteID)
sites <- sites[!is.na(sites)]
site_dates <- list()
for(i in 1:length(sites)){
dates <- unique(dna.metadata[dna.metadata$siteID == sites[i],]$dateID)
dates <- dates[!is.na(dates)]
site_dates[[i]] <- dates
}
names(site_dates) <- sites
#save site_dates output.
saveRDS(site_dates,ITS_site_dates.path)
|
module Yet.Another.Path
%default total
public export
val : Nat
val = 5
|
If $f$ is a Borel measurable function on $\mathbb{R}^n$, then $f$ is a Borel measurable function on any Borel subset of $\mathbb{R}^n$. |
<div align="right">
Massimo Nocentini<br>
<br>June 27, 2016: add external references
<br>May 31, 2016: primality testing by sets (<i>broken</i>)
<br>May 22, 2016: subsets sums with Gray codes
<br>May 21, 2016: basics, subsets sums
</div>
<br>
<div align="center">
<b>Abstract</b><br>
This document collect some examples about *bit manipulation techniques* and some application of them to contest problems
</div>
# Intro
The current notebook born with the aim to collect introductory material, references, tutorial, code snippet and challenging problems around the topic of *bit manipulation techniques*. It follows to fullfil our need to get a deep understanding of this topic to be competitive in programming (contests).
First of all a collection of general references follows:
- [__The Aggregate Magic Algorithms__][aggregate], by _Department of Electrical and Computer Engineering at the University of Kentucky_
- [__Bit Twiddling Hacks__][seander], by _Sean Eron Anderson_
- [__Hacker's delight__][hd], by _Warren_
On the other hand, the following pages have a spirit toward programming contests:
- [__CPSC 490 202 - Problem Solving in Computer Science__][CPSC 490 202 current] ([old][CPSC 490 202 old] version)
A focused application about compressed representation can be found here:
- https://code.google.com/archive/p/compressedbitset/
- https://sdm.lbl.gov/fastbit/
- http://crd-legacy.lbl.gov/~kewu/fastbit/compression.html
Blog posts that inspired this document are the following:
- http://redrating.blogspot.it/2013/08/algo-and-techniques-bitmask.html
- https://www.quora.com/What-is-bitmasking-What-kind-of-problems-can-be-solved-using-it
[aggregate]:http://aggregate.org/MAGIC/
[seander]:http://graphics.stanford.edu/~seander/bithacks.html
[hd]:http://www.hackersdelight.org/
[CPSC 490 202 old]:http://www.ugrad.cs.ubc.ca/~cs490/sec202/
[CPSC 490 202 current]:http://www.ugrad.cs.ubc.ca/~cs490/2015W2/
# *bit masking* core idea
In this section we study [this][basic] introductory document, where the first paragraph reads:
>Suppose you have a set of objects and you want some way to represent which objects to pick and
which ones not to pick. How do you represent that in in a program? More generally, *how do you
represent a subest of a set*? We can represent whether an object *is picked or not by a single bit*! Using a boolean to represent this is an overkill in terms of memory usage. However, neither C++ nor Java has any data type representing a single bit, so how can we cut down the memory usage?
**The answer is to use an integer!** We know an integer is just a bunch of bits stringed together, so
why don’t we use the integer to represent the entire set?
> For example, suppose in a set of 5 objects, we have picked the 1st, 3rd, and 4th object. The bitmask to represent this in binary is 01101 or 13 in decimal (in the notes, the 1st bit will always be the least significant bit and
will always appear at the very right). *We have just cut down the memory usage from five booleans to
a single integer*!
Then follows a list of basic manipulations, which we coded in [this][our:bits] Python module, loaded in the next cell; two functions deserve attention, namely `subsets` and `subsets_of`, read the docstring for info.
[basic]:http://www.ugrad.cs.ubc.ca/~cs490/sec202/notes/intro/bitmask.pdf
[our:bits]:https://github.com/massimo-nocentini/competitive-programming/blob/master/python-libs/bits.py
```python
%matplotlib inline
%run ../python-libs/bits.py
%run ../python-libs/timing.py
%run ../python-libs/graycodes.py
%run ../python-libs/symbols.py
```
# Sum of subsets: a first example
## With "usual" subsets generation
Previous doc finishes with an first application of bitmasking, here the request:
>Bitmask is an efficient and convenient way to represent subsets. For example, **given a set of numbers,
we want to find the sum of all subsets**. This is easy to code using bitmasks. Furthermore, we can use
an array to store all the results (imagine storing the results when you are using an array to represent
the subset)!
The following cell contains a pythonic implementation of their code, which is given in `C`: generator `subsets` yields subsets one by one, such that a successor subset `s` is computed by a previous subset `p` by *increment*, namely `s = p + 1`.
```python
def subsets_sums(S):
"""
Returns a map of `(s, v)`, where `s` is a subset of `S`, and `v` is `sum(i in S & s)`.
"""
n = len(S)
sums = [0 for _ in range(1 << n)]
for s in subsets(n):
for j, v in enumerate(S):
sums[s] += is_on(s, j, return_int=True) * v
return sums
```
As a first experiment, let `S` be a list of 4 random integers, each one less than $2^{8}$.
```python
from random import randrange
S = [randrange(1 << 8) for _ in range(1 << 2)]
S
```
[184, 150, 30, 153]
We time the execution of the function `subsets_sums` applied to `S` and bind `sums` to the resulting mapping:
```python
with timing(lambda: subsets_sums(S)) as (sums, start, end):
print("Elapsed time: {:.5} secs".format(end-start))
```
Elapsed time: 0.00037456 secs
Have a look at results, after the definition of the pretty printing utility `pretty_dict`:
```python
def pretty(sums):
"""
Utility pretty printer.
"""
return {pretty_mask(j, coding='little', width=len(S)): v for j, v in enumerate(sums)}
```
```python
pretty(sums)
```
{'0000': 0,
'0001': 153,
'0010': 30,
'0011': 183,
'0100': 150,
'0101': 303,
'0110': 180,
'0111': 333,
'1000': 184,
'1001': 337,
'1010': 214,
'1011': 367,
'1100': 334,
'1101': 487,
'1110': 364,
'1111': 517}
### Repeating experiments
```python
def plotter(do, n):
import matplotlib.pyplot as plt
def doer(i):
"""
Repeat the previous experiment with lists of integers lengths `i`.
"""
S = [randrange(1 << 8) for _ in range(i)]
with timing(lambda: do(S)) as (sums, start, end):
return end - start
l = range(n)
line, = plt.plot(l, list(map(doer, l)), '--', linewidth=2)
plt.xlabel("lengths")
plt.ylabel("secs")
plt.show()
```
Moreover, we repeat the above experiment for lists of integers with different lengths to understand the *exponential* complexity of *subsets* enumeration: the longer list will have **24 integers** and it took aproximately **5 minutes** to return an answer.
```python
plotter(subsets_sums, n=25)
```
## Subsets enumeration as Gray codes
Another order of generation can be used for subsets, namely we see each subset as a [Gray code][gray]. In this way we know that consecutive subsets differs *exactly* by one bit, which is the same to say that they have the same objects but one, so *one* object goes away or comes in.
Here is the a new function `subsets_sums_gray` that uses two hooks; given two consecutive subsets and `p` is the position of the bit `b` that toggles, if `b` switches *off* then `off` hook removes the corresponding value `S[p]` from the sum cumulated up to now, otherwise `add` hook adds `S[p]` to the sum.
[gray]:http://nbviewer.jupyter.org/github/massimo-nocentini/competitive-programming/blob/master/tutorials/graycodes.ipynb?flush_cache=true
```python
def subsets_sums_gray(S):
"""
Returns a map of `(s, v)`, where `s` is a subset of `S`, and `v` is `sum(i in S & s)`.
"""
return list(high(gray_codes(length=len(S)),
on= lambda p, r: r + S[p],
off=lambda p, r: r - S[p],
redux=0))
```
We time the new experiment applying it to the same list of integers `S`:
```python
with timing(lambda: subsets_sums_gray(S)) as (res, start, end):
print("Elapsed time: {:.5} secs".format(end-start))
sums_gray = [0 for _ in range(1 << len(S))]
for o, s, p, r in res:
sums_gray[s] = r
```
Elapsed time: 0.00019288 secs
and we pretty print the subsets sums
```python
pretty(sums_gray)
```
{'0000': 0,
'0001': 153,
'0010': 30,
'0011': 183,
'0100': 150,
'0101': 303,
'0110': 180,
'0111': 333,
'1000': 184,
'1001': 337,
'1010': 214,
'1011': 367,
'1100': 334,
'1101': 487,
'1110': 364,
'1111': 517}
checking that the answers are the same
```python
assert sums == sums_gray
```
We repeat the experiment for lists of integers of different lengths as done before:
```python
plotter(subsets_sums_gray, n=25)
```
using this order of subsets generations we gain approximately a **10-factor** respect the "usual" generation order.
# Primality testing - **not working**
The following is a very **unstable** attempt to test primality of an integer $n$, working on subsets instead of integer values.
```python
from sympy import IndexedBase, Eq, solve, init_printing, Xor
from math import sqrt, ceil
from itertools import product
from collections import defaultdict
init_printing()
def prefix_dictionaries(n, indexes): # be the integer under study, we would like to decide if is a *prime* or not
x, y = indexes
ones = ones_of(n) # is the list of positions in the binary representation of `n` where bits are 1
bits = ones[-1] + 1 # bits is the number of bits necessary to represent `n` in binary
for prefix in range(ceil(sqrt(bits)), bits):
compl = bits - prefix
pairs = defaultdict(lambda: 0)
for i, j in product(range(0,prefix+1), range(0,compl)): # compl
pairs[i+j] += x[i]*y[j]
yield compl, pairs
def prefix_systems(n, dicts, init={}):
for compl, dictionary in dicts:
yield { (k, compl) : Eq(is_on(n, k, return_int=True), v).subs(init)
for k, v in dictionary.items() if k}
def rec(eqs, subscripts_sum, substitutions):
if not eqs: yield substitutions
eq = eqs.pop(subscripts_sum)
substitutions[y[subscripts_sum]] = 0
subst_eq = eq.subs(substitutions)
if not subst_eq:
yield False
elif subst_eq == True:
yield from rec(dict(eqs), subscripts_sum + 1, substitutions)
else:
sol = solve(subst_eq, x[subscripts_sum]).pop()
new_substitutions = dict(substitutions)
new_substitutions[x[subscripts_sum]] = sol.rhs % 2
def diofantine(systems):
for s in systems:
for sol in solve(s):
for (k, compl), v in sol.items():
for i in range(1, compl):
eq = v[i, compl]
substitutions = {y[i]:1}
if not v.is_integer: break
else:
yield sol
def factorize(n, sols, indexes, init={}):
x, y = indexes
for sol in sols:
working_sol = defaultdict(lambda: 0)
working_sol.update(init)
working_sol.update(sol)
p, q = 0, 0
for i in range(n.bit_length()):
p = set_bit(p, i) if working_sol[x[i]] else clear_bit(p, i)
q = set_bit(q, i) if working_sol[y[i]] else clear_bit(q, i)
yield p, q
```
```python
x, y = IndexedBase('x'), IndexedBase('y')
n = 35
g = prefix_dictionaries(n, (x,y))
```
```python
next(g)
```
```python
init = {x[0]:1, y[0]:1}
eqs = prefix_systems(n, g, init)
```
```python
next(eqs)
```
```python
sols = diofantine(eqs)
```
```python
next(sols)
```
```python
factors = factorize(n, sols, (x,y), init)
```
```python
next(factors)
```
---
<a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Bitmasking tutorial</span> by <a xmlns:cc="http://creativecommons.org/ns#" href="[email protected]" property="cc:attributionName" rel="cc:attributionURL">Massimo Nocentini</a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>.<br />Based on a work at <a xmlns:dct="http://purl.org/dc/terms/" href="https://github.com/massimo-nocentini/competitive-programming/blob/master/tutorials/bitmasking.ipynb" rel="dct:source">https://github.com/massimo-nocentini/competitive-programming/blob/master/tutorials/bitmasking.ipynb</a>.
|
State Before: R : Type u_2
inst✝⁶ : CommSemiring R
M : Submonoid R
S : Type u_1
inst✝⁵ : CommSemiring S
inst✝⁴ : Algebra R S
P : Type ?u.70150
inst✝³ : CommSemiring P
A : Type ?u.70156
inst✝² : CommRing A
inst✝¹ : IsDomain A
I : Ideal R
hI : Ideal.IsPrime I
inst✝ : IsLocalization.AtPrime S I
h : optParam (LocalRing S) (_ : LocalRing S)
x : R
⊢ x ∈ Ideal.comap (algebraMap R S) (LocalRing.maximalIdeal S) ↔ x ∈ I State After: no goals Tactic: simpa only [Ideal.mem_comap] using to_map_mem_maximal_iff _ I x |
//
// Created by david on 2018-11-30.
//
#ifndef DMRG_CLASS_XDMRG_FULL_FUNCTOR_H
#define DMRG_CLASS_XDMRG_FULL_FUNCTOR_H
#ifdef OpenMP_AVAILABLE
#include <omp.h>
#endif
#ifdef OpenBLAS_AVAILABLE
#include <cblas.h>
#endif
#include <Eigen/Core>
#include <unsupported/Eigen/CXX11/Tensor>
#include <general/nmspc_tensor_extra.h>
#include <general/class_tic_toc.h>
template<typename Scalar>
class class_xDMRG_full_functor {
private:
double variance;
double energy ;
double energy_lower_bound;
double energy_upper_bound;
double energy_target;
double energy_window;
public:
template <typename T>
int sgn(const T val) const {
return (T(0) < val) - (val < T(0));
}
using MatrixType_ = Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic>;
using VectorType_ = Eigen::Matrix<Scalar,Eigen::Dynamic, 1>;
size_t counter = 0;
// const size_t shape;
void set_energy_bounds(double E_lower, double E_upper);
bool have_bounds_on_energy = false;
double get_variance(){return variance;}
double get_energy (){return energy ;}
size_t get_count (){return counter;}
Eigen::Tensor<double,4> HA_MPO;
Eigen::Tensor<double,4> HB_MPO;
Eigen::Tensor<double,3> Lblock;
Eigen::Tensor<double,3> Rblock;
Eigen::Tensor<double,4> Lblock2;
Eigen::Tensor<double,4> Rblock2;
Eigen::Tensor<double,6> HAHB;
Eigen::Tensor<double,8> HAHB2;
Eigen::DSizes<long,4> dsizes;
class_tic_toc t_lbfgs;
class_xDMRG_full_functor(
const Eigen::Tensor<Scalar,4> &HA_MPO_,
const Eigen::Tensor<Scalar,4> &HB_MPO_,
const Eigen::Tensor<Scalar,3> &Lblock_,
const Eigen::Tensor<Scalar,3> &Rblock_,
const Eigen::Tensor<Scalar,4> &Lblock2_,
const Eigen::Tensor<Scalar,4> &Rblock2_,
const Eigen::DSizes<long,4> &dsizes_
);
double get_vH2v(const Eigen::Matrix<double,Eigen::Dynamic,1> &v);
double get_vHv(const Eigen::Matrix<double,Eigen::Dynamic,1> &v);
Eigen::VectorXd get_vH2 (const Eigen::Matrix<double,Eigen::Dynamic,1> &v);
Eigen::VectorXd get_vH (const Eigen::Matrix<double,Eigen::Dynamic,1> &v);
double operator()(const Eigen::Matrix<double,Eigen::Dynamic,1> &v, Eigen::Matrix<double,Eigen::Dynamic,1> &grad);
};
#endif //DMRG_CLASS_XDMRG_FULL_FUNCTOR_H
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory Padding
imports Main
begin
definition padup :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"padup align n \<equiv> (align - n mod align) mod align"
lemma padup_dvd:
"0 < b \<Longrightarrow> (padup b n = 0) = (b dvd n)"
unfolding padup_def
apply(subst dvd_eq_mod_eq_0)
apply(subst mod_if [where m="b - n mod b"])
apply clarsimp
apply(insert mod_less_divisor [of b n])
apply arith
done
lemma dvd_padup_add:
"0 < x \<Longrightarrow> x dvd y + padup x y"
apply(clarsimp simp: padup_def)
apply(subst mod_if [where m="x - y mod x"])
apply(clarsimp split: if_split_asm)
apply(rule conjI)
apply clarsimp
apply(subst ac_simps)
apply(subst diff_add_assoc)
apply(rule mod_less_eq_dividend)
apply(rule dvd_add)
apply simp
apply(subst minus_div_mult_eq_mod[symmetric])
apply(subst diff_diff_right)
apply(subst ac_simps)
apply(subst minus_mod_eq_mult_div[symmetric])
apply simp
apply simp
apply(auto simp: dvd_eq_mod_eq_0)
done
end
|
{-# OPTIONS --universe-polymorphism #-}
module Categories.Support.EqReasoning where
open import Categories.Support.Equivalence using (Setoid; module Setoid)
open import Relation.Binary.PropositionalEquality using () renaming (_≡_ to _≣_; trans to ≣-trans; sym to ≣-sym; refl to ≣-refl)
module SetoidReasoning {s₁ s₂} (S : Setoid s₁ s₂) where
open Setoid S
infix 4 _IsRelatedTo_
infix 1 begin_
infixr 2 _≈⟨_⟩_ _↓⟨_⟩_ _↑⟨_⟩_ _↓≣⟨_⟩_ _↑≣⟨_⟩_ _↕_
infix 3 _∎
-- This seemingly unnecessary type is used to make it possible to
-- infer arguments even if the underlying equality evaluates.
data _IsRelatedTo_ (x y : Carrier) : Set s₂ where
relTo : (x∼y : x ≈ y) → x IsRelatedTo y
.begin_ : ∀ {x y} → x IsRelatedTo y → x ≈ y
begin relTo x∼y = x∼y
._↓⟨_⟩_ : ∀ x {y z} → x ≈ y → y IsRelatedTo z → x IsRelatedTo z
_ ↓⟨ x∼y ⟩ relTo y∼z = relTo (trans x∼y y∼z)
-- where open IsEquivalence isEquivalence
._↑⟨_⟩_ : ∀ x {y z} → y ≈ x → y IsRelatedTo z → x IsRelatedTo z
_ ↑⟨ y∼x ⟩ relTo y∼z = relTo (trans (sym y∼x) y∼z)
-- where open IsEquivalence isEquivalence
-- the syntax of the ancients, for compatibility
._≈⟨_⟩_ : ∀ x {y z} → x ≈ y → y IsRelatedTo z → x IsRelatedTo z
_≈⟨_⟩_ = _↓⟨_⟩_
._↓≣⟨_⟩_ : ∀ x {y z} → x ≣ y → y IsRelatedTo z → x IsRelatedTo z
_ ↓≣⟨ ≣-refl ⟩ y∼z = y∼z
._↑≣⟨_⟩_ : ∀ x {y z} → y ≣ x → y IsRelatedTo z → x IsRelatedTo z
_ ↑≣⟨ ≣-refl ⟩ y∼z = y∼z
._↕_ : ∀ x {z} → x IsRelatedTo z → x IsRelatedTo z
_ ↕ x∼z = x∼z
._∎ : ∀ x → x IsRelatedTo x
_∎ _ = relTo refl
-- where open IsEquivalence isEquivalence
module ≣-reasoning {ℓ} (S : Set ℓ) where
infix 4 _IsRelatedTo_
infix 2 _∎
infixr 2 _≈⟨_⟩_
infixr 2 _↓⟨_⟩_
infixr 2 _↑⟨_⟩_
infixr 2 _↕_
infix 1 begin_
-- This seemingly unnecessary type is used to make it possible to
-- infer arguments even if the underlying equality evaluates.
data _IsRelatedTo_ (x y : S) : Set ℓ where
relTo : (x∼y : x ≣ y) → x IsRelatedTo y
begin_ : ∀ {x y} → x IsRelatedTo y → x ≣ y
begin relTo x∼y = x∼y
-- the syntax of the ancients, for compatibility
_≈⟨_⟩_ : ∀ x {y z} → x ≣ y → y IsRelatedTo z → x IsRelatedTo z
_ ≈⟨ x∼y ⟩ relTo y∼z = relTo (≣-trans x∼y y∼z)
_↓⟨_⟩_ : ∀ x {y z} → x ≣ y → y IsRelatedTo z → x IsRelatedTo z
_ ↓⟨ x∼y ⟩ relTo y∼z = relTo (≣-trans x∼y y∼z)
_↑⟨_⟩_ : ∀ x {y z} → y ≣ x → y IsRelatedTo z → x IsRelatedTo z
_ ↑⟨ y∼x ⟩ relTo y∼z = relTo (≣-trans (≣-sym y∼x) y∼z)
_↕_ : ∀ x {z} → x IsRelatedTo z → x IsRelatedTo z
_ ↕ x∼z = x∼z
_∎ : ∀ x → x IsRelatedTo x
_∎ _ = relTo ≣-refl
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** The whole compiler and its proof of semantic preservation *)
(** Libraries. *)
Require Import Coqlib.
Require Import Errors.
Require Import AST.
Require Import Smallstep.
Require Import Memory.
(** Languages (syntax and semantics). *)
Require Csyntax.
Require Csem.
Require Cstrategy.
Require Cexec.
Require Clight.
Require Csharpminor.
Require Cminor.
Require CminorSel.
Require RTL.
Require LTL.
Require LTLin.
Require Linear.
Require Mach.
Require Asm.
(** Translation passes. *)
Require Initializers.
Require SimplExpr.
Require SimplLocals.
Require Cshmgen.
Require Cminorgen.
Require Selection.
Require RTLgen.
Require Tailcall.
Require Inlining.
Require Renumber.
Require Constprop.
Require CSE.
Require Allocation.
Require Tunneling.
Require Linearize.
Require CleanupLabels.
Require Reload.
Require RRE.
Require Stacking.
Require Asmgen.
(** Type systems. *)
Require RTLtyping.
Require LTLtyping.
Require LTLintyping.
Require Lineartyping.
(** Proofs of semantic preservation and typing preservation. *)
Require SimplExprproof.
Require SimplLocalsproof.
Require Cshmgenproof.
Require Cminorgenproof.
Require Selectionproof.
Require RTLgenproof.
Require Tailcallproof.
Require Inliningproof.
Require Renumberproof.
Require Constpropproof.
Require CSEproof.
Require Allocproof.
Require Alloctyping.
Require Tunnelingproof.
Require Tunnelingtyping.
Require Linearizeproof.
Require Linearizetyping.
Require CleanupLabelsproof.
Require CleanupLabelstyping.
Require Reloadproof.
Require Reloadtyping.
Require RREproof.
Require RREtyping.
Require Stackingproof.
Require Asmgenproof.
Require Import ExtFunImpl ExtCallImpl.
Local Existing Instances ef_ops sc_ops ef_spec ec_ops cc_ops ec_spec.
(** Pretty-printers (defined in Caml). *)
Parameter print_Clight: Clight.program -> unit.
Parameter print_Cminor: Cminor.program -> unit.
Parameter print_RTL: RTL.program -> unit.
Parameter print_RTL_tailcall: RTL.program -> unit.
Parameter print_RTL_inline: RTL.program -> unit.
Parameter print_RTL_constprop: RTL.program -> unit.
Parameter print_RTL_cse: RTL.program -> unit.
Parameter print_LTLin: LTLin.program -> unit.
Parameter print_Mach: Mach.program -> unit.
Open Local Scope string_scope.
(** * Composing the translation passes *)
Section WITHMEM.
Context `{Hmem: Mem.MemoryModel}.
(** We first define useful monadic composition operators,
along with funny (but convenient) notations. *)
Definition apply_total (A B: Type) (x: res A) (f: A -> B) : res B :=
match x with Error msg => Error msg | OK x1 => OK (f x1) end.
Definition apply_partial (A B: Type)
(x: res A) (f: A -> res B) : res B :=
match x with Error msg => Error msg | OK x1 => f x1 end.
Notation "a @@@ b" :=
(apply_partial _ _ a b) (at level 50, left associativity).
Notation "a @@ b" :=
(apply_total _ _ a b) (at level 50, left associativity).
Definition print {A: Type} (printer: A -> unit) (prog: A) : A :=
let unused := printer prog in prog.
(** We define three translation functions for whole programs: one
starting with a C program, one with a Cminor program, one with an
RTL program. The three translations produce Asm programs ready for
pretty-printing and assembling. *)
Definition transf_rtl_program (f: RTL.program) : res Asm.program :=
OK f
@@ print print_RTL
@@ Tailcall.transf_program
@@ print print_RTL_tailcall
@@@ Inlining.transf_program
@@ Renumber.transf_program
@@ print print_RTL_inline
@@ Constprop.transf_program
@@ Renumber.transf_program
@@ print print_RTL_constprop
@@@ CSE.transf_program
@@ print print_RTL_cse
@@@ Allocation.transf_program
@@ Tunneling.tunnel_program
@@@ Linearize.transf_program
@@ CleanupLabels.transf_program
@@ print print_LTLin
@@ Reload.transf_program
@@ RRE.transf_program
@@@ Stacking.transf_program
@@ print print_Mach
@@@ Asmgen.transf_program.
Definition transf_cminor_program (p: Cminor.program) : res Asm.program :=
OK p
@@ print print_Cminor
@@ Selection.sel_program
@@@ RTLgen.transl_program
@@@ transf_rtl_program.
Definition transf_clight_program (p: Clight.program) : res Asm.program :=
OK p
@@ print print_Clight
@@@ SimplLocals.transf_program
@@@ Cshmgen.transl_program
@@@ Cminorgen.transl_program
@@@ transf_cminor_program.
Definition transf_c_program (p: Csyntax.program) : res Asm.program :=
OK p
@@@ SimplExpr.transl_program
@@@ transf_clight_program.
(** Force [Initializers] and [Cexec] to be extracted as well. *)
Definition transl_init := Initializers.transl_init.
Definition cexec_do_step := Cexec.do_step.
(** The following lemmas help reason over compositions of passes. *)
Lemma print_identity:
forall (A: Type) (printer: A -> unit) (prog: A),
print printer prog = prog.
Proof.
intros; unfold print. destruct (printer prog); auto.
Qed.
Lemma compose_print_identity:
forall (A: Type) (x: res A) (f: A -> unit),
x @@ print f = x.
Proof.
intros. destruct x; simpl. rewrite print_identity. auto. auto.
Qed.
(** * Semantic preservation *)
(** We prove that the [transf_program] translations preserve semantics
by constructing the following simulations:
- Forward simulations from [Cstrategy] / [Cminor] / [RTL] to [Asm]
(composition of the forward simulations for each pass).
- Backward simulations for the same languages
(derived from the forward simulation, using receptiveness of the source
language and determinacy of [Asm]).
- Backward simulation from [Csem] to [Asm]
(composition of two backward simulations).
These results establish the correctness of the whole compiler! *)
Theorem transf_rtl_program_correct:
forall p tp,
transf_rtl_program p = OK tp ->
forward_simulation (RTL.semantics p) (Asm.semantics tp)
* backward_simulation (RTL.semantics p) (Asm.semantics tp).
Proof.
intros.
assert (F: forward_simulation (RTL.semantics p) (Asm.semantics tp)).
unfold transf_rtl_program in H.
repeat rewrite compose_print_identity in H.
simpl in H.
set (p1 := Tailcall.transf_program p) in *.
destruct (Inlining.transf_program p1) as [p11|] eqn:?; simpl in H; try discriminate.
set (p12 := Renumber.transf_program p11) in *.
set (p2 := Constprop.transf_program p12) in *.
set (p21 := Renumber.transf_program p2) in *.
destruct (CSE.transf_program p21) as [p3|] eqn:?; simpl in H; try discriminate.
destruct (Allocation.transf_program p3) as [p4|] eqn:?; simpl in H; try discriminate.
set (p5 := Tunneling.tunnel_program p4) in *.
destruct (Linearize.transf_program p5) as [p6|] eqn:?; simpl in H; try discriminate.
set (p7 := CleanupLabels.transf_program p6) in *.
set (p8 := Reload.transf_program p7) in *.
set (p9 := RRE.transf_program p8) in *.
destruct (Stacking.transf_program p9) as [p10|] eqn:?; simpl in H; try discriminate.
assert(TY1: LTLtyping.wt_program p5).
eapply Tunnelingtyping.program_typing_preserved.
eapply Alloctyping.program_typing_preserved; eauto.
assert(TY2: LTLintyping.wt_program p7).
eapply CleanupLabelstyping.program_typing_preserved.
eapply Linearizetyping.program_typing_preserved; eauto.
assert(TY3: Lineartyping.wt_program p9).
eapply RREtyping.program_typing_preserved.
eapply Reloadtyping.program_typing_preserved; eauto.
eapply compose_forward_simulation. apply Tailcallproof.transf_program_correct.
eapply compose_forward_simulation. apply Inliningproof.transf_program_correct. eassumption.
eapply compose_forward_simulation. apply Renumberproof.transf_program_correct.
eapply compose_forward_simulation. apply Constpropproof.transf_program_correct.
eapply compose_forward_simulation. apply Renumberproof.transf_program_correct.
eapply compose_forward_simulation. apply CSEproof.transf_program_correct. eassumption.
eapply compose_forward_simulation. apply Allocproof.transf_program_correct. eassumption.
eapply compose_forward_simulation. apply Tunnelingproof.transf_program_correct.
eapply compose_forward_simulation. apply Linearizeproof.transf_program_correct. eassumption. eauto.
eapply compose_forward_simulation. apply CleanupLabelsproof.transf_program_correct.
eapply compose_forward_simulation. apply Reloadproof.transf_program_correct. eauto.
eapply compose_forward_simulation. apply RREproof.transf_program_correct. eauto.
eapply compose_forward_simulation. apply Stackingproof.transf_program_correct.
eexact Asmgenproof.return_address_exists. eassumption. eauto.
apply Asmgenproof.transf_program_correct; eauto.
split. auto.
apply forward_to_backward_simulation. auto.
apply RTL.semantics_receptive.
apply Asm.semantics_determinate.
Qed.
Theorem transf_cminor_program_correct:
forall p tp,
transf_cminor_program p = OK tp ->
forward_simulation (Cminor.semantics p) (Asm.semantics tp)
* backward_simulation (Cminor.semantics p) (Asm.semantics tp).
Proof.
intros.
assert (F: forward_simulation (Cminor.semantics p) (Asm.semantics tp)).
unfold transf_cminor_program in H.
repeat rewrite compose_print_identity in H.
simpl in H.
set (p1 := Selection.sel_program p) in *.
destruct (RTLgen.transl_program p1) as [p2|] eqn:?; simpl in H; try discriminate.
eapply compose_forward_simulation. apply Selectionproof.transf_program_correct.
eapply compose_forward_simulation. apply RTLgenproof.transf_program_correct. eassumption.
exact (fst (transf_rtl_program_correct _ _ H)).
split. auto.
apply forward_to_backward_simulation. auto.
apply Cminor.semantics_receptive.
apply Asm.semantics_determinate.
Qed.
Theorem transf_clight_program_correct:
forall p tp,
transf_clight_program p = OK tp ->
forward_simulation (Clight.semantics1 p) (Asm.semantics tp)
* backward_simulation (Clight.semantics1 p) (Asm.semantics tp).
Proof.
intros.
assert (F: forward_simulation (Clight.semantics1 p) (Asm.semantics tp)).
revert H; unfold transf_clight_program; simpl.
rewrite print_identity.
caseEq (SimplLocals.transf_program p); simpl; try congruence; intros p0 EQ0.
caseEq (Cshmgen.transl_program p0); simpl; try congruence; intros p1 EQ1.
caseEq (Cminorgen.transl_program p1); simpl; try congruence; intros p2 EQ2.
intros EQ3.
eapply compose_forward_simulation. apply SimplLocalsproof.transf_program_correct. eauto.
eapply compose_forward_simulation. apply Cshmgenproof.transl_program_correct. eauto.
eapply compose_forward_simulation. apply Cminorgenproof.transl_program_correct. eauto.
exact (fst (transf_cminor_program_correct _ _ EQ3)).
split. auto.
apply forward_to_backward_simulation. auto.
apply Clight.semantics_receptive.
apply Asm.semantics_determinate.
Qed.
Theorem transf_cstrategy_program_correct:
forall p tp,
transf_c_program p = OK tp ->
forward_simulation (Cstrategy.semantics p) (Asm.semantics tp)
* backward_simulation (atomic (Cstrategy.semantics p)) (Asm.semantics tp).
Proof.
intros.
assert (F: forward_simulation (Cstrategy.semantics p) (Asm.semantics tp)).
revert H; unfold transf_c_program; simpl.
caseEq (SimplExpr.transl_program p); simpl; try congruence; intros p0 EQ0.
intros EQ1.
eapply compose_forward_simulation. apply SimplExprproof.transl_program_correct. eauto.
exact (fst (transf_clight_program_correct _ _ EQ1)).
split. auto.
apply forward_to_backward_simulation.
apply factor_forward_simulation. auto. eapply sd_traces. eapply Asm.semantics_determinate.
apply atomic_receptive. apply Cstrategy.semantics_strongly_receptive.
apply Asm.semantics_determinate.
Qed.
Theorem transf_c_program_correct:
forall p tp,
transf_c_program p = OK tp ->
backward_simulation (Csem.semantics p) (Asm.semantics tp).
Proof.
intros.
apply compose_backward_simulation with (atomic (Cstrategy.semantics p)).
eapply sd_traces; eapply Asm.semantics_determinate.
apply factor_backward_simulation.
apply Cstrategy.strategy_simulation.
apply Csem.semantics_single_events.
eapply ssr_well_behaved; eapply Cstrategy.semantics_strongly_receptive.
exact (snd (transf_cstrategy_program_correct _ _ H)).
Qed.
End WITHMEM.
|
[STATEMENT]
lemma reachable_sweep_loop_free:
"mut_m.reachable m r (s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>))
\<Longrightarrow> mut_m.reachable m r s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mut_m.reachable m r (s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>)) \<Longrightarrow> mut_m.reachable m r s
[PROOF STEP]
unfolding mut_m.reachable_def reaches_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. x \<in> roots ((s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>)) (mutator m)) \<union> ghost_honorary_root ((s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>)) (mutator m)) \<union> mut_m.tso_store_refs m (s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>)) \<and> (\<lambda>x y. (x points_to y) (s(sys := s sys\<lparr>heap := (sys_heap s)(r' := None)\<rparr>)))\<^sup>*\<^sup>* x r \<Longrightarrow> \<exists>x. x \<in> roots (s (mutator m)) \<union> ghost_honorary_root (s (mutator m)) \<union> mut_m.tso_store_refs m s \<and> (\<lambda>x y. (x points_to y) s)\<^sup>*\<^sup>* x r
[PROOF STEP]
by (clarsimp simp: fun_upd_apply) (metis (no_types, lifting) mono_rtranclp) |
The monomial $x^{n+1}$ is equal to $x \cdot x^n$. |
% === [ Variable Recovery ] ====================================================
\section{Variable Recovery}
\label{sec:variable_recovery}
Variable and typing information is lost during the process of compilation, as local and global variables, function arguments and function parameters are lowered from source code to machine code and mapped onto type-less registers and memory locations. Debug information of binary executables may record this mapping. When debug information is limited or absent however, the source variables have to be recovered from low-level code using variable recovery methods. Since type analysis is based on inference between the typing relations of variables, variable recovery is required for type recovery.
% === [ Subsections ] ==========================================================
% TODO: include sections.
\input{sections/3_variable_recovery/1_value_set_analysis}
%\input{sections/3_variable_recovery/2_function_signature_recovery}
|
State Before: α : Type ?u.5138263
β : Type u_1
E : Type ?u.5138269
F : Type ?u.5138272
inst✝¹ : MeasurableSpace α
ι : Type ?u.5138278
inst✝ : NormedAddCommGroup E
f : β → ℝ
m m0 : MeasurableSpace β
μ : Measure β
hm : m ≤ m0
g : SimpleFunc β ℝ
hf : Integrable f
⊢ Integrable (↑g * f) State After: α : Type ?u.5138263
β : Type u_1
E : Type ?u.5138269
F : Type ?u.5138272
inst✝¹ : MeasurableSpace α
ι : Type ?u.5138278
inst✝ : NormedAddCommGroup E
f : β → ℝ
m m0 : MeasurableSpace β
μ : Measure β
hm : m ≤ m0
g : SimpleFunc β ℝ
hf : Integrable f
⊢ Integrable (↑(SimpleFunc.toLargerSpace hm g) * f) Tactic: rw [← SimpleFunc.coe_toLargerSpace_eq hm g] State Before: α : Type ?u.5138263
β : Type u_1
E : Type ?u.5138269
F : Type ?u.5138272
inst✝¹ : MeasurableSpace α
ι : Type ?u.5138278
inst✝ : NormedAddCommGroup E
f : β → ℝ
m m0 : MeasurableSpace β
μ : Measure β
hm : m ≤ m0
g : SimpleFunc β ℝ
hf : Integrable f
⊢ Integrable (↑(SimpleFunc.toLargerSpace hm g) * f) State After: no goals Tactic: exact hf.simpleFunc_mul (g.toLargerSpace hm) |
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Bhavik Mehta
-/
import category_theory.limits.preserves.basic
/-!
# Isomorphisms about functors which preserve (co)limits
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
If `G` preserves limits, and `C` and `D` have limits, then for any diagram `F : J ⥤ C` we have a
canonical isomorphism `preserves_limit_iso : G.obj (limit F) ≅ limit (F ⋙ G)`.
We also show that we can commute `is_limit.lift` of a preserved limit with `functor.map_cone`:
`(preserves_limit.preserves t).lift (G.map_cone c₂) = G.map (t.lift c₂)`.
The duals of these are also given. For functors which preserve (co)limits of specific shapes, see
`preserves/shapes.lean`.
-/
universes w' w v₁ v₂ u₁ u₂
noncomputable theory
namespace category_theory
open category limits
variables {C : Type u₁} [category.{v₁} C]
variables {D : Type u₂} [category.{v₂} D]
variables (G : C ⥤ D)
variables {J : Type w} [category.{w'} J]
variables (F : J ⥤ C)
section
variables [preserves_limit F G]
@[simp]
lemma preserves_lift_map_cone (c₁ c₂ : cone F) (t : is_limit c₁) :
(preserves_limit.preserves t).lift (G.map_cone c₂) = G.map (t.lift c₂) :=
((preserves_limit.preserves t).uniq (G.map_cone c₂) _ (by simp [← G.map_comp])).symm
variables [has_limit F] [has_limit (F ⋙ G)]
/--
If `G` preserves limits, we have an isomorphism from the image of the limit of a functor `F`
to the limit of the functor `F ⋙ G`.
-/
def preserves_limit_iso : G.obj (limit F) ≅ limit (F ⋙ G) :=
(preserves_limit.preserves (limit.is_limit _)).cone_point_unique_up_to_iso (limit.is_limit _)
@[simp, reassoc]
lemma preserves_limits_iso_hom_π (j) :
(preserves_limit_iso G F).hom ≫ limit.π _ j = G.map (limit.π F j) :=
is_limit.cone_point_unique_up_to_iso_hom_comp _ _ j
@[simp, reassoc]
lemma preserves_limits_iso_inv_π (j) :
(preserves_limit_iso G F).inv ≫ G.map (limit.π F j) = limit.π _ j :=
is_limit.cone_point_unique_up_to_iso_inv_comp _ _ j
@[simp, reassoc]
lemma lift_comp_preserves_limits_iso_hom (t : cone F) :
G.map (limit.lift _ t) ≫ (preserves_limit_iso G F).hom = limit.lift (F ⋙ G) (G.map_cone _) :=
by { ext, simp [← G.map_comp] }
variables [preserves_limits_of_shape J G] [has_limits_of_shape J D] [has_limits_of_shape J C]
/-- If `C, D` has all limits of shape `J`, and `G` preserves them, then `preserves_limit_iso` is
functorial wrt `F`. -/
@[simps] def preserves_limit_nat_iso : lim ⋙ G ≅ (whiskering_right J C D).obj G ⋙ lim :=
nat_iso.of_components (λ F, preserves_limit_iso G F)
begin
intros _ _ f,
ext,
dsimp,
simp only [preserves_limits_iso_hom_π, whisker_right_app, lim_map_π, category.assoc,
preserves_limits_iso_hom_π_assoc, ← G.map_comp]
end
end
section
variables [preserves_colimit F G]
@[simp]
lemma preserves_desc_map_cocone (c₁ c₂ : cocone F) (t : is_colimit c₁) :
(preserves_colimit.preserves t).desc (G.map_cocone _) = G.map (t.desc c₂) :=
((preserves_colimit.preserves t).uniq (G.map_cocone _) _ (by simp [← G.map_comp])).symm
variables [has_colimit F] [has_colimit (F ⋙ G)]
/--
If `G` preserves colimits, we have an isomorphism from the image of the colimit of a functor `F`
to the colimit of the functor `F ⋙ G`.
-/
-- TODO: think about swapping the order here
def preserves_colimit_iso : G.obj (colimit F) ≅ colimit (F ⋙ G) :=
(preserves_colimit.preserves (colimit.is_colimit _)).cocone_point_unique_up_to_iso
(colimit.is_colimit _)
@[simp, reassoc]
lemma ι_preserves_colimits_iso_inv (j : J) :
colimit.ι _ j ≫ (preserves_colimit_iso G F).inv = G.map (colimit.ι F j) :=
is_colimit.comp_cocone_point_unique_up_to_iso_inv _ (colimit.is_colimit (F ⋙ G)) j
@[simp, reassoc]
lemma ι_preserves_colimits_iso_hom (j : J) :
G.map (colimit.ι F j) ≫ (preserves_colimit_iso G F).hom = colimit.ι (F ⋙ G) j :=
(preserves_colimit.preserves (colimit.is_colimit _)).comp_cocone_point_unique_up_to_iso_hom _ j
@[simp, reassoc]
lemma preserves_colimits_iso_inv_comp_desc (t : cocone F) :
(preserves_colimit_iso G F).inv ≫ G.map (colimit.desc _ t) = colimit.desc _ (G.map_cocone t) :=
by { ext, simp [← G.map_comp] }
variables [preserves_colimits_of_shape J G] [has_colimits_of_shape J D] [has_colimits_of_shape J C]
/-- If `C, D` has all colimits of shape `J`, and `G` preserves them, then `preserves_colimit_iso`
is functorial wrt `F`. -/
@[simps] def preserves_colimit_nat_iso : colim ⋙ G ≅ (whiskering_right J C D).obj G ⋙ colim :=
nat_iso.of_components (λ F, preserves_colimit_iso G F)
begin
intros _ _ f,
rw [← iso.inv_comp_eq, ← category.assoc, ← iso.eq_comp_inv],
ext,
dsimp,
erw ι_colim_map_assoc,
simp only [ι_preserves_colimits_iso_inv, whisker_right_app, category.assoc,
ι_preserves_colimits_iso_inv_assoc, ← G.map_comp],
erw ι_colim_map
end
end
end category_theory
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.category.BoundedOrder
import order.category.Lattice
import order.category.Semilattice
/-!
# The category of bounded lattices
This file defines `BoundedLattice`, the category of bounded lattices.
In literature, this is sometimes called `Lat`, the category of lattices, because being a lattice is
understood to entail having a bottom and a top element.
-/
universes u
open category_theory
/-- The category of bounded lattices with bounded lattice morphisms. -/
structure BoundedLattice :=
(to_Lattice : Lattice)
[is_bounded_order : bounded_order to_Lattice]
namespace BoundedLattice
instance : has_coe_to_sort BoundedLattice Type* := ⟨λ X, X.to_Lattice⟩
instance (X : BoundedLattice) : lattice X := X.to_Lattice.str
attribute [instance] BoundedLattice.is_bounded_order
/-- Construct a bundled `BoundedLattice` from `lattice` + `bounded_order`. -/
def of (α : Type*) [lattice α] [bounded_order α] : BoundedLattice := ⟨⟨α⟩⟩
@[simp] lemma coe_of (α : Type*) [lattice α] [bounded_order α] : ↥(of α) = α := rfl
instance : inhabited BoundedLattice := ⟨of punit⟩
instance : large_category.{u} BoundedLattice :=
{ hom := λ X Y, bounded_lattice_hom X Y,
id := λ X, bounded_lattice_hom.id X,
comp := λ X Y Z f g, g.comp f,
id_comp' := λ X Y, bounded_lattice_hom.comp_id,
comp_id' := λ X Y, bounded_lattice_hom.id_comp,
assoc' := λ W X Y Z _ _ _, bounded_lattice_hom.comp_assoc _ _ _ }
instance : concrete_category BoundedLattice :=
{ forget := ⟨coe_sort, λ X Y, coe_fn, λ X, rfl, λ X Y Z f g, rfl⟩,
forget_faithful := ⟨λ X Y, by convert fun_like.coe_injective⟩ }
instance has_forget_to_BoundedOrder : has_forget₂ BoundedLattice BoundedOrder :=
{ forget₂ := { obj := λ X, BoundedOrder.of X,
map := λ X Y, bounded_lattice_hom.to_bounded_order_hom } }
instance has_forget_to_Lattice : has_forget₂ BoundedLattice Lattice :=
{ forget₂ := { obj := λ X, ⟨X⟩, map := λ X Y, bounded_lattice_hom.to_lattice_hom } }
instance has_forget_to_SemilatticeSup : has_forget₂ BoundedLattice SemilatticeSup :=
{ forget₂ := { obj := λ X, ⟨X⟩, map := λ X Y, bounded_lattice_hom.to_sup_bot_hom } }
instance has_forget_to_SemilatticeInf : has_forget₂ BoundedLattice SemilatticeInf :=
{ forget₂ := { obj := λ X, ⟨X⟩, map := λ X Y, bounded_lattice_hom.to_inf_top_hom } }
@[simp] lemma coe_forget_to_BoundedOrder (X : BoundedLattice) :
↥((forget₂ BoundedLattice BoundedOrder).obj X) = ↥X := rfl
@[simp] lemma coe_forget_to_Lattice (X : BoundedLattice) :
↥((forget₂ BoundedLattice Lattice).obj X) = ↥X := rfl
@[simp] lemma coe_forget_to_SemilatticeSup (X : BoundedLattice) :
↥((forget₂ BoundedLattice SemilatticeSup).obj X) = ↥X := rfl
@[simp] lemma coe_forget_to_SemilatticeInf (X : BoundedLattice) :
↥((forget₂ BoundedLattice SemilatticeInf).obj X) = ↥X := rfl
lemma forget_Lattice_PartialOrder_eq_forget_BoundedOrder_PartialOrder :
forget₂ BoundedLattice Lattice ⋙ forget₂ Lattice PartialOrder =
forget₂ BoundedLattice BoundedOrder ⋙ forget₂ BoundedOrder PartialOrder := rfl
lemma forget_SemilatticeSup_PartialOrder_eq_forget_BoundedOrder_PartialOrder :
forget₂ BoundedLattice SemilatticeSup ⋙ forget₂ SemilatticeSup PartialOrder =
forget₂ BoundedLattice BoundedOrder ⋙ forget₂ BoundedOrder PartialOrder := rfl
lemma forget_SemilatticeInf_PartialOrder_eq_forget_BoundedOrder_PartialOrder :
forget₂ BoundedLattice SemilatticeInf ⋙ forget₂ SemilatticeInf PartialOrder =
forget₂ BoundedLattice BoundedOrder ⋙ forget₂ BoundedOrder PartialOrder := rfl
/-- Constructs an equivalence between bounded lattices from an order isomorphism
between them. -/
@[simps] def iso.mk {α β : BoundedLattice.{u}} (e : α ≃o β) : α ≅ β :=
{ hom := e,
inv := e.symm,
hom_inv_id' := by { ext, exact e.symm_apply_apply _ },
inv_hom_id' := by { ext, exact e.apply_symm_apply _ } }
/-- `order_dual` as a functor. -/
@[simps] def dual : BoundedLattice ⥤ BoundedLattice :=
{ obj := λ X, of (order_dual X), map := λ X Y, bounded_lattice_hom.dual }
/-- The equivalence between `BoundedLattice` and itself induced by `order_dual` both ways. -/
@[simps functor inverse] def dual_equiv : BoundedLattice ≌ BoundedLattice :=
equivalence.mk dual dual
(nat_iso.of_components (λ X, iso.mk $ order_iso.dual_dual X) $ λ X Y f, rfl)
(nat_iso.of_components (λ X, iso.mk $ order_iso.dual_dual X) $ λ X Y f, rfl)
end BoundedLattice
lemma BoundedLattice_dual_comp_forget_to_BoundedOrder :
BoundedLattice.dual ⋙ forget₂ BoundedLattice BoundedOrder =
forget₂ BoundedLattice BoundedOrder ⋙ BoundedOrder.dual := rfl
lemma BoundedLattice_dual_comp_forget_to_Lattice :
BoundedLattice.dual ⋙ forget₂ BoundedLattice Lattice =
forget₂ BoundedLattice Lattice ⋙ Lattice.dual := rfl
lemma BoundedLattice_dual_comp_forget_to_SemilatticeSup :
BoundedLattice.dual ⋙ forget₂ BoundedLattice SemilatticeSup =
forget₂ BoundedLattice SemilatticeInf ⋙ SemilatticeInf.dual := rfl
lemma BoundedLattice_dual_comp_forget_to_SemilatticeInf :
BoundedLattice.dual ⋙ forget₂ BoundedLattice SemilatticeInf =
forget₂ BoundedLattice SemilatticeSup ⋙ SemilatticeSup.dual := rfl
|
[STATEMENT]
lemma iso_bij_betw_individual_blocks: "bl \<in># \<B> \<Longrightarrow> bij_betw \<pi> bl (\<pi> ` bl)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bl \<in># \<B> \<Longrightarrow> bij_betw \<pi> bl (\<pi> ` bl)
[PROOF STEP]
using bij bij_betw_subset source.wellformed
[PROOF STATE]
proof (prove)
using this:
bij_betw \<pi> \<V> \<V>'
\<lbrakk>bij_betw ?f ?A ?A'; ?B \<subseteq> ?A; ?f ` ?B = ?B'\<rbrakk> \<Longrightarrow> bij_betw ?f ?B ?B'
?b \<in># \<B> \<Longrightarrow> ?b \<subseteq> \<V>
goal (1 subgoal):
1. bl \<in># \<B> \<Longrightarrow> bij_betw \<pi> bl (\<pi> ` bl)
[PROOF STEP]
by blast |
-- {-# OPTIONS -v tc.meta:100 #-}
-- Andreas, 2011-04-20
-- see Abel Pientka TLCA 2011
module PruningNonMillerPattern where
data _≡_ {A : Set}(a : A) : A -> Set where
refl : a ≡ a
data Nat : Set where
zero : Nat
suc : Nat -> Nat
-- bad variable y in head position
test : let X : Nat -> Nat -> Nat
X = _
Y : Nat -> Nat -> Nat
Y = _
in (C : Set) ->
(({x y : Nat} -> X x x ≡ suc (Y x y)) ->
({x y : Nat} -> Y x x ≡ x) ->
({x y : Nat} -> X (Y x y) y ≡ X x x) -> C) -> C
test C k = k refl refl refl
{- none of these equations is immediately solvable. However,
from 1. we deduce that Y does not depend on its second argument, thus
from 2. we solve Y x y = x, and then
eqn. 3. simplifies to X x y = X x x, thus, X does not depend on its second arg,
we can then solve using 1. X x y = suc x
-}
-- a variant, where pruning is even triggered from a non-pattern
test' : let X : Nat -> Nat -> Nat
X = _
Y : Nat -> Nat -> Nat
Y = _
in (C : Set) ->
(({x y : Nat} -> X x (suc x) ≡ suc (Y x y)) -> -- non-pattern lhs
({x y : Nat} -> Y x x ≡ x) ->
({x y : Nat} -> X (Y x y) y ≡ X x x) -> C) -> C
test' C k = k refl refl refl
-- another variant, where the pruned argument does not have an offending
-- variable in the head, but in a non-eliminateable position
-- (argument to a datatype)
data Sing {A : Set} : A → Set where
sing : (x : A) -> Sing x
-- bad rigid under a data type constructor
test2 : let X : Nat -> Nat -> Nat
X = _
Y : Nat → Set -> Nat
Y = _
in (C : Set) ->
(({x y : Nat} -> X x x ≡ suc (Y x (Sing (suc y)))) ->
({x y : Nat} -> Y x (Sing x) ≡ x) ->
({x y : Nat} -> X (Y x (Sing y)) y ≡ X x x) -> C) -> C
test2 C k = k refl refl refl
T : Nat → Set
T zero = Nat
T (suc _) = Nat → Nat
-- bad rigid y under a Pi type constructor
test3 : let X : Nat -> Nat -> Nat
X = _
Y : Nat → Set -> Nat
Y = _
in (C : Set) ->
(({x y : Nat} -> X x x ≡ suc (Y x (T y -> T y))) ->
({x y : Nat} -> Y x (Sing x) ≡ x) ->
({x y : Nat} -> X (Y x (Sing y)) y ≡ X x x) -> C) -> C
test3 C k = k refl refl refl
-- bad rigid y in head position under a lambda
test4 : let X : Nat -> Nat -> Nat
X = _
Y : Nat → (Nat → Nat) -> Nat
Y = _
in (C : Set) ->
((∀ {x : Nat} {y : Nat → Nat} -> X x x ≡ suc (Y x (λ k → y zero))) ->
(∀ {x : Nat} {y : Nat → Nat} -> Y x (λ k → y zero) ≡ x) ->
(∀ {x : Nat} {y : Nat } -> X (Y x (λ k → y)) y ≡ X x x) -> C) -> C
test4 C k = k refl refl refl
-- bad variable in irrelevant position
test5 : let X : Nat -> Nat -> Nat
X = _
Y : Nat -> .Nat -> Nat
Y = _
in (C : Set) ->
(({x y : Nat} -> X x (suc x) ≡ suc (Y x (suc y))) -> -- non-pattern lhs
({x y : Nat} -> Y x x ≡ x) ->
({x y : Nat} -> X (Y x (suc y)) y ≡ X x x) -> C) -> C
test5 C k = k refl refl refl
|
function [xi,w]=firstOrderTriangleCubPoints()
%%FIRSTORDERTRIANGLECUBPOINTS Obtain first-order cubature points for
% integration over a triangle in 2D. The points and weights are for the
% triangle with vertices (1,0), (0,1), (0,0), but can be transformed to
% any triangle using transformSimplexTriPoints.
%
%INPUTS: None
%
%OUTPUTS: xi A 2XnumCubPoints set of points for the standard triangle.
% w A 1XnumCubPoints set of cubature weights. This sums to the
% volume of the triangle (1/2).
%
%This function implements the points given in [1] (3 points).
%
%EXAMPLE:
%Given the vertices of the simplex, we compare a first-order moment
%computed using these cubature points to one computed using
%monomialIntSimplex. The results are the same within typical finite
%precision limits.
% [xi,w]=firstOrderTriangleCubPoints();
% alpha=[1;0];
% theMoment=findMomentFromSamp(alpha,xi,w)
% intVal=monomialIntSimplex(alpha)
%
%REFERENCES:
%[1] F. D. Witherden and P. E. Vincent, "On the identification of symmetric
% quadrature rules for finite element methods," Computer and Mathematics
% with Applications, vol. 69, no. 10, pp. 1232-1241, May 2015.
%
%October 2022 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
M=[-0.33333333333333333333333333333333333333, -0.33333333333333333333333333333333333333, 2];
w=M(:,3);
xi=M(:,1:2)';
%Transform the points to the standard triangle.
v1=[-1,-1, 1;
-1, 1,-1];
v2=[1,0,0;
0,1,0];
[A,d]=affineTransBetweenTriangles(v1,v2);
xi=bsxfun(@plus,A*xi,d);
w=w/4;
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
// The MIT License (MIT)
//
// Copyright (c) 2018 Mateusz Pusz
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// Formatting library for C++ - the core API for char/UTF-8
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#include <gsl/gsl-lite.hpp>
#include <units/bits/fmt_hacks.h>
#include <concepts>
#include <limits>
#include <string_view>
// most of the below code is based on/copied from libfmt
namespace units::detail {
struct auto_id {};
enum class fmt_align { none, left, right, center };
enum class fmt_sign { none, minus, plus, space };
enum class arg_id_kind { none, index, name };
template<typename Char>
struct fill_t {
private:
static constexpr size_t max_size = 4 / sizeof(Char);
// At most one codepoint (so one char32_t or four utf-8 char8_t)
Char data_[max_size] = {Char{' '}};
unsigned char size_ = 1;
public:
constexpr void operator=(std::basic_string_view<Char> s)
{
auto size = s.size();
if (size > max_size) return throw STD_FMT::format_error("invalid fill");
for (size_t i = 0; i < size; ++i) data_[i] = s[i];
size_ = static_cast<unsigned char>(size);
}
[[nodiscard]] constexpr size_t size() const { return size_; }
[[nodiscard]] constexpr const Char* data() const { return data_; }
[[nodiscard]] constexpr Char& operator[](size_t index) { return data_[index]; }
[[nodiscard]] constexpr const Char& operator[](size_t index) const { return data_[index]; }
};
template<typename T>
inline constexpr bool is_integer = std::is_integral<T>::value && !std::is_same<T, bool>::value &&
!std::is_same<T, char>::value && !std::is_same<T, wchar_t>::value;
template<typename Char>
[[nodiscard]] constexpr bool is_ascii_letter(Char c)
{
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
// Converts a character to ASCII. Returns a number > 127 on conversion failure.
template<std::integral Char>
[[nodiscard]] constexpr Char to_ascii(Char value)
{
return value;
}
template<typename Char>
requires std::is_enum_v<Char>
[[nodiscard]] constexpr auto to_ascii(Char value) -> std::underlying_type_t<Char> { return value; }
struct width_checker {
template<typename T>
[[nodiscard]] constexpr unsigned long long operator()(T value) const
{
if constexpr (is_integer<T>) {
if constexpr (std::numeric_limits<T>::is_signed) {
if (value < 0) throw STD_FMT::format_error("negative width");
}
return static_cast<unsigned long long>(value);
} else {
throw STD_FMT::format_error("width is not integer");
}
}
};
struct precision_checker {
template<typename T>
[[nodiscard]] constexpr unsigned long long operator()(T value) const
{
if constexpr (is_integer<T>) {
if constexpr (std::numeric_limits<T>::is_signed) {
if (value < 0) throw STD_FMT::format_error("negative precision");
}
return static_cast<unsigned long long>(value);
} else {
throw STD_FMT::format_error("precision is not integer");
}
}
};
// Format specifiers for built-in and string types.
template<typename Char>
struct basic_format_specs {
int width = 0;
int precision = -1;
char type = '\0';
fmt_align align : 4 = fmt_align::none;
fmt_sign sign : 3 = fmt_sign::none;
bool alt : 1 = false; // Alternate form ('#').
bool localized : 1 = false;
fill_t<Char> fill;
};
// Format specifiers with width and precision resolved at formatting rather
// than parsing time to allow re-using the same parsed specifiers with
// different sets of arguments (precompilation of format strings).
template<typename Char>
struct dynamic_format_specs : basic_format_specs<Char> {
int dynamic_width_index = -1;
int dynamic_precision_index = -1;
};
[[nodiscard]] constexpr int verify_dynamic_arg_index_in_range(size_t idx)
{
if (idx > static_cast<size_t>(std::numeric_limits<int>::max())) {
throw STD_FMT::format_error("Dynamic width or precision index too large.");
}
return static_cast<int>(idx);
}
template<typename CharT>
[[nodiscard]] constexpr int on_dynamic_arg(size_t arg_id, STD_FMT::basic_format_parse_context<CharT>& context)
{
context.check_arg_id(FMT_TO_ARG_ID(arg_id));
return verify_dynamic_arg_index_in_range(arg_id);
}
template<typename CharT>
[[nodiscard]] constexpr int on_dynamic_arg(auto_id, STD_FMT::basic_format_parse_context<CharT>& context)
{
return verify_dynamic_arg_index_in_range(FMT_FROM_ARG_ID(context.next_arg_id()));
}
template<class Handler, typename FormatContext>
[[nodiscard]] constexpr int get_dynamic_spec(int index, FormatContext& ctx)
{
const unsigned long long value =
STD_FMT::visit_format_arg(Handler{}, ctx.arg(FMT_TO_ARG_ID(static_cast<size_t>(index))));
if (value > static_cast<unsigned long long>(std::numeric_limits<int>::max())) {
throw STD_FMT::format_error("number is too big");
}
return static_cast<int>(value);
}
// Parses the range [begin, end) as an unsigned integer. This function assumes
// that the range is non-empty and the first character is a digit.
template<std::input_iterator It, std::sentinel_for<It> S>
[[nodiscard]] constexpr It parse_nonnegative_int(It begin, S end, size_t& value)
{
gsl_Expects(begin != end && '0' <= *begin && *begin <= '9');
constexpr auto max_int = static_cast<unsigned>(std::numeric_limits<int>::max());
constexpr auto big_int = max_int / 10u;
value = 0;
do {
if (value > big_int) {
value = max_int + 1;
break;
}
value = value * 10 + static_cast<unsigned int>(*begin - '0');
++begin;
} while (begin != end && '0' <= *begin && *begin <= '9');
if (value > max_int) throw STD_FMT::format_error("Number is too big");
return begin;
}
template<std::input_iterator It, std::sentinel_for<It> S>
[[nodiscard]] constexpr It parse_nonnegative_int(It begin, S end, int& value)
{
size_t val_unsigned = 0;
begin = parse_nonnegative_int(begin, end, val_unsigned);
// Never invalid because parse_nonnegative_integer throws an error for values that don't fit in signed integers
value = static_cast<int>(val_unsigned);
return begin;
}
template<std::input_iterator It, std::sentinel_for<It> S, typename IDHandler>
[[nodiscard]] constexpr It do_parse_arg_id(It begin, S end, IDHandler&& handler)
{
gsl_Expects(begin != end);
auto c = *begin;
if (c >= '0' && c <= '9') {
size_t index = 0;
if (c != '0')
begin = parse_nonnegative_int(begin, end, index);
else
++begin;
if (begin == end || (*begin != '}' && *begin != ':'))
throw STD_FMT::format_error("invalid format string");
else
handler(index);
return begin;
}
throw STD_FMT::format_error("invalid format string");
}
template<std::input_iterator It, std::sentinel_for<It> S, typename IDHandler>
[[nodiscard]] constexpr It parse_arg_id(It begin, S end, IDHandler&& handler)
{
auto c = *begin;
if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler);
handler();
return begin;
}
template<std::input_iterator It, std::sentinel_for<It> S, typename Handler>
[[nodiscard]] constexpr It parse_sign(It begin, S end, Handler&& handler)
{
gsl_Expects(begin != end);
switch (to_ascii(*begin)) {
case '+':
handler.on_sign(fmt_sign::plus);
++begin;
break;
case '-':
handler.on_sign(fmt_sign::minus);
++begin;
break;
case ' ':
handler.on_sign(fmt_sign::space);
++begin;
break;
default:
break;
}
return begin;
}
template<std::input_iterator It, std::sentinel_for<It> S, typename Handler>
[[nodiscard]] constexpr It parse_width(It begin, S end, Handler&& handler)
{
struct width_adapter {
Handler& handler;
constexpr void operator()() { handler.on_dynamic_width(auto_id{}); }
constexpr void operator()(size_t id) { handler.on_dynamic_width(id); }
};
gsl_Expects(begin != end);
if ('0' <= *begin && *begin <= '9') {
int width = 0;
begin = parse_nonnegative_int(begin, end, width);
if (width != -1)
handler.on_width(width);
else
throw STD_FMT::format_error("number is too big");
} else if (*begin == '{') {
++begin;
if (begin != end) begin = parse_arg_id(begin, end, width_adapter{handler});
if (begin == end || *begin != '}') throw STD_FMT::format_error("invalid format string");
++begin;
}
return begin;
}
template<std::input_iterator It, std::sentinel_for<It> S, typename Handler>
[[nodiscard]] constexpr It parse_precision(It begin, S end, Handler&& handler)
{
struct precision_adapter {
Handler& handler;
constexpr void operator()() { handler.on_dynamic_precision(auto_id{}); }
constexpr void operator()(size_t id) { handler.on_dynamic_precision(id); }
};
++begin;
auto c = begin != end ? *begin : std::iter_value_t<It>();
if ('0' <= c && c <= '9') {
auto precision = 0;
begin = parse_nonnegative_int(begin, end, precision);
if (precision != -1)
handler.on_precision(precision);
else
throw STD_FMT::format_error("number is too big");
} else if (c == '{') {
++begin;
if (begin != end) begin = parse_arg_id(begin, end, precision_adapter{handler});
if (begin == end || *begin++ != '}') throw STD_FMT::format_error("invalid format string");
} else {
throw STD_FMT::format_error("missing precision specifier");
}
return begin;
}
template<std::input_iterator It>
constexpr int code_point_length(It begin)
{
if constexpr (sizeof(std::iter_value_t<It>) != 1) return 1;
constexpr char lengths[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 4, 0};
int len = lengths[static_cast<unsigned char>(*begin) >> 3];
// Compute the pointer to the next character early so that the next
// iteration can start working on the next character. Neither Clang
// nor GCC figure out this reordering on their own.
return len + !len;
}
// Parses fill and alignment.
template<std::input_iterator It, std::sentinel_for<It> S, typename Handler>
[[nodiscard]] constexpr It parse_align(It begin, S end, Handler&& handler)
{
gsl_Expects(begin != end);
auto align = fmt_align::none;
auto p = begin + code_point_length(begin);
if (p >= end) p = begin;
for (;;) {
switch (to_ascii(*p)) {
case '<':
align = fmt_align::left;
break;
case '>':
align = fmt_align::right;
break;
case '^':
align = fmt_align::center;
break;
default:
break;
}
if (align != fmt_align::none) {
if (p != begin) {
auto c = *begin;
if (c == '{') throw STD_FMT::format_error("invalid fill character '{'");
handler.on_fill(std::basic_string_view<std::iter_value_t<It>>(begin, static_cast<size_t>(p - begin)));
begin = p + 1;
} else
++begin;
handler.on_align(align);
break;
} else if (p == begin) {
break;
}
p = begin;
}
return begin;
}
// Parses standard format specifiers and sends notifications about parsed
// components to handler.
template<std::input_iterator It, std::sentinel_for<It> S, typename SpecHandler>
[[nodiscard]] constexpr It parse_format_specs(It begin, S end, SpecHandler&& handler)
{
if (begin + 1 < end && begin[1] == '}' && is_ascii_letter(*begin) && *begin != 'L') {
handler.on_type(*begin++);
return begin;
}
if (begin == end) return begin;
begin = ::units::detail::parse_align(begin, end, handler);
if (begin == end) return begin;
// Parse sign.
begin = ::units::detail::parse_sign(begin, end, handler);
if (begin == end) return begin;
if (*begin == '#') {
handler.on_hash();
if (++begin == end) return begin;
}
// Parse zero flag.
if (*begin == '0') {
handler.on_zero();
if (++begin == end) return begin;
}
begin = ::units::detail::parse_width(begin, end, handler);
if (begin == end) return begin;
// Parse precision.
if (*begin == '.') {
begin = ::units::detail::parse_precision(begin, end, handler);
if (begin == end) return begin;
}
if (*begin == 'L') {
handler.on_localized();
++begin;
}
// Parse type.
if (begin != end && *begin != '}') handler.on_type(*begin++);
return begin;
}
// A format specifier handler that sets fields in basic_format_specs.
template<typename Char>
class specs_setter {
protected:
basic_format_specs<Char>& specs_;
public:
constexpr explicit specs_setter(basic_format_specs<Char>& specs) : specs_(specs) {}
constexpr void on_align(fmt_align align) { specs_.align = align; }
constexpr void on_fill(std::basic_string_view<Char> fill) { specs_.fill = fill; }
constexpr void on_sign(fmt_sign s) { specs_.sign = s; }
constexpr void on_hash() { specs_.alt = true; }
constexpr void on_localized() { specs_.localized = true; }
constexpr void on_zero() { specs_.fill[0] = Char('0'); }
constexpr void on_width(int width) { specs_.width = width; }
constexpr void on_precision(int precision) { specs_.precision = precision; }
constexpr void on_type(Char type) { specs_.type = static_cast<char>(type); }
};
// Format spec handler that saves references to arguments representing dynamic
// width and precision to be resolved at formatting time.
template<typename ParseContext>
class dynamic_specs_handler : public specs_setter<typename ParseContext::char_type> {
public:
using char_type = TYPENAME ParseContext::char_type;
constexpr dynamic_specs_handler(dynamic_format_specs<char_type>& specs, ParseContext& ctx) :
specs_setter<char_type>(specs), specs_(specs), context_(ctx)
{}
template<typename T>
constexpr void on_dynamic_width(T t)
{
specs_.dynamic_width_index = on_dynamic_arg(t, context_);
}
template<typename T>
constexpr void on_dynamic_precision(T t)
{
specs_.dynamic_precision_index = on_dynamic_arg(t, context_);
}
private:
dynamic_format_specs<char_type>& specs_;
ParseContext& context_;
};
} // namespace units::detail
|
From VST Require Import floyd.proofauto.
From appliedfm Require Import Int63.model.int63.
From appliedfm Require Import Int63.vst.clightgen.int63.
Definition encode_int63_spec: ident * funspec :=
DECLARE _encode_int63
WITH x: Z, gv: globals
PRE [ tlong ]
PROP ( )
PARAMS (Vlong (Int64.repr x))
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z x)))
SEP ( ).
Definition decode_int63_spec: ident * funspec :=
DECLARE _decode_int63
WITH x: Z, gv: globals
PRE [ tlong ]
PROP (Int64.min_signed <= encode_Z x <= Int64.max_signed)
PARAMS (Vlong (Int64.repr (encode_Z x)))
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr x))
SEP ( ).
Definition int63_zero_spec: ident * funspec :=
DECLARE _int63_zero
WITH gv: globals
PRE [ ]
PROP ()
PARAMS ()
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z 0)))
SEP ( ).
Definition int63_one_spec: ident * funspec :=
DECLARE _int63_one
WITH gv: globals
PRE [ ]
PROP ()
PARAMS ()
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z 1)))
SEP ( ).
Definition int63_neg_spec: ident * funspec :=
DECLARE _int63_neg
WITH x: Z, gv: globals
PRE [ tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z (- x) <= Int64.max_signed
)
PARAMS (Vlong (Int64.repr (encode_Z x)))
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (- x))))
SEP ( ).
Definition int63_abs_spec: ident * funspec :=
DECLARE _int63_abs
WITH x: Z, gv: globals
PRE [ tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z (- x) <= Int64.max_signed
)
PARAMS (Vlong (Int64.repr (encode_Z x)))
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.abs x))))
SEP ( ).
Definition int63_add_spec: ident * funspec :=
DECLARE _int63_add
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
Int64.min_signed <= encode_Z (x + y) < Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (x + y))))
SEP ( ).
Definition int63_sub_spec: ident * funspec :=
DECLARE _int63_sub
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
Int64.min_signed <= encode_Z (x - y) <= Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (x - y))))
SEP ( ).
Definition int63_mul_spec: ident * funspec :=
DECLARE _int63_mul
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
Int64.min_signed <= encode_Z (x * y) <= Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (x * y))))
SEP ( ).
Definition int63_div_spec: ident * funspec :=
DECLARE _int63_div
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
y <> 0
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.quot x y))))
SEP ( ).
Definition int63_rem_spec: ident * funspec :=
DECLARE _int63_rem
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
y <> 0
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.rem x y))))
SEP ( ).
Definition int63_shiftl_spec: ident * funspec :=
DECLARE _int63_shiftl
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
0 <= x;
0 <= y < Int64.zwordsize
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.shiftl x y))))
SEP ( ).
Definition int63_shiftr_spec: ident * funspec :=
DECLARE _int63_shiftr
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed;
0 <= y < Int64.zwordsize
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.shiftr x y))))
SEP ( ).
Definition int63_or_spec: ident * funspec :=
DECLARE _int63_or
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.lor x y))))
SEP ( ).
Definition int63_and_spec: ident * funspec :=
DECLARE _int63_and
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.land x y))))
SEP ( ).
Definition int63_xor_spec: ident * funspec :=
DECLARE _int63_xor
WITH x: Z, y: Z, gv: globals
PRE [ tlong, tlong ]
PROP (
Int64.min_signed <= encode_Z x <= Int64.max_signed;
Int64.min_signed <= encode_Z y <= Int64.max_signed
)
PARAMS (
Vlong (Int64.repr (encode_Z x));
Vlong (Int64.repr (encode_Z y))
)
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.lxor x y))))
SEP ( ).
Definition int63_not_spec: ident * funspec :=
DECLARE _int63_not
WITH x: Z, gv: globals
PRE [ tlong ]
PROP (Int64.min_signed <= encode_Z x <= Int64.max_signed)
PARAMS (Vlong (Int64.repr (encode_Z x)))
GLOBALS(gv)
SEP ( )
POST [ tlong ]
PROP ( )
RETURN (Vlong (Int64.repr (encode_Z (Z.lnot x))))
SEP ( ).
Module int63__specs.
Definition exports: funspecs :=
[ decode_int63_spec
; encode_int63_spec
; int63_zero_spec
; int63_one_spec
; int63_neg_spec
; int63_abs_spec
; int63_add_spec
; int63_sub_spec
; int63_mul_spec
; int63_div_spec
; int63_rem_spec
; int63_shiftl_spec
; int63_shiftr_spec
; int63_or_spec
; int63_and_spec
; int63_xor_spec
; int63_not_spec
].
Definition externs: funspecs := [].
Definition imports: funspecs := [].
Definition private: funspecs := [].
Definition internals: funspecs := private ++ exports.
Definition gprog: funspecs := imports ++ internals.
Definition vprog: varspecs := ltac:(mk_varspecs prog).
End int63__specs.
|
#' @title Imaginary flight network
#'
#' @description Network graph of an imaginary airline's flight routes.
#'
#' @format An igraph graph with 20 nodes and 192 edges
#' \itemize{
#' \item node: An origin or destination city
#' \item edge: A flight between two cities
#' }
"flightGraph"
#' @title Plotting layout for nodes of a network about flight data
#'
#' @description Plotting layout for nodes of a network about flight data
#'
#' @format A matrix of two columns and 20 rows
#' \itemize{
#' \item column 1: X position
#' \item column 2: X position
#' }
"flightLayout"
|
input := StringTools:-Trim(FileTools:-Text:-ReadFile("AoC-2021-15-input.txt"));
grid := Matrix(map(s->parse~(StringTools:-Explode(s)),StringTools:-Split(input,"\n"))):
(gridw,gridl) := upperbound(grid);
nbs := proc(i,j,gridsize)
local out := NULL;
if i < gridsize then out := out, [i+1, j]; end if;
if j < gridsize then out := out, [i, j+1]; end if;
if i > 1 then out := out, [i-1,j]; end if;
if j > 1 then out := out, [i, j-1]; end if;
return [out];
end proc:
# should really do this lazily
inc := a -> (a mod 9) +~ 1;
biggrid:= Matrix([seq([seq((inc@@(i+j-2))(grid),j=1..5)],i=1..5)] );
(biggridw,biggridl) := upperbound(biggrid);
with(GraphTheory):
G := MakeWeighted(MakeDirected(SpecialGraphs:-GridGraph(gridl,gridw)));
for e in Edges(G) do
SetEdgeWeight(G, e, grid[parse(e[2])]);
end do:
DijkstrasAlgorithm(G, "1,1", cat("",gridl,",",gridw))[2];
BG := MakeWeighted(MakeDirected(SpecialGraphs:-GridGraph(biggridl,biggridw)));
for i from 1 to biggridl do
for j from 1 to biggridw do
a := j + (i-1)*(biggridw);
for n in nbs(i,j,biggridw) do
b := n[2] + (n[1]-1)*biggridw;
W[b,a] := biggrid[i,j];
end do;
end do;
end do:
DijkstrasAlgorithm(BG, "1,1", cat("",biggridl,",",biggridw))[2];
|
lemma sets_vimage_algebra_cong: "sets M = sets N \<Longrightarrow> sets (vimage_algebra X f M) = sets (vimage_algebra X f N)" |
# Copyright 2016 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# group_func.py
# Created by Eric W Bridgeford on 2016-04-03.
# Email: [email protected]
import warnings
warnings.simplefilter("ignore")
import pickle
from ndmg.stats.qa_mri import qa_mri as mqa
import numpy as np
import ndmg.utils as mgu
import os
import plotly.offline as pyo
from plotly.tools import FigureFactory as ff
from ndmg.utils import loadGraphs
from ndmg.stats.qa_graphs import compute_metrics
from ndmg.stats.qa_graphs_plotting import make_panel_plot
import networkx as nx
from ndmg.stats.plotly_helper import *
class group_func(object):
def __init__(self, basedir, outdir, atlas=None, dataset=None):
"""
A class for group level quality control.
**Positional Arguments:**
- basedir:
- the ndmg-formatted functional outputs.
should have a qa/ folder contained within it.
- outdir:
- the directory to place all group level quality control.
- dataset:
- an optional parameter for the name of the dataset
to be present in the quality control output filenames.
"""
print(atlas)
self.ndmgdir = basedir
self.qadir = "{}/qa".format(self.ndmgdir)
self.outdir = outdir
self.conn_dir = "{}/connectomes".format(self.ndmgdir)
self.dataset = dataset
self.atlas = atlas
(self.qa_files, self.subs) = self.get_qa_files()
self.connectomes = self.get_connectomes()
self.qa_objects = self.load_qa()
self.group_level_analysis()
pass
def get_qa_files(self):
"""
A function to load the relevant quality assessment files,
for all the subjects we have in our study, given a properly-formatted
ndmg functional directory.
"""
qa_files = []
subs = []
for sub in os.listdir(self.qadir):
sub_qa = "{}/{}/{}_stats.pkl".format(self.qadir, sub, sub)
# if the files exists, add it to our qa_files
if os.path.isfile(sub_qa):
qa_files.append(sub_qa)
subs.append(sub)
return (qa_files, subs)
def get_connectomes(self):
"""
A function to load the relevant connectomes for all of the subjects
for each parcellation we have.
"""
connectomes = {}
for label in os.listdir(self.conn_dir):
print(label)
this_label = []
label_dir = "{}/{}".format(self.conn_dir, label)
for connectome in os.listdir(label_dir):
conn_path = "{}/{}".format(label_dir, connectome)
if os.path.isfile(conn_path):
this_label.append(conn_path)
connectomes[label] = this_label
return connectomes
def load_qa(self):
"""
A function to load the quality control objects.
"""
qa_objects = []
for qa_file in self.qa_files:
# load the qa objects as qa_mri objects
qa_objects.append(mqa.load(qa_file))
return qa_objects
def group_level_analysis(self):
"""
A function to perform group level analysis after loading the
functional qa objects properly.
"""
self.group_reg()
self.group_motion()
def group_reg(self):
"""
A function that performs group level registration quality control.
"""
regdir = "{}/{}".format(self.outdir, "reg")
cmd = "mkdir -p {}".format(regdir)
mgu.execute_cmd(cmd)
self_reg_sc = []
temp_reg_sc = []
cnr = []
snr = []
for sub in self.qa_objects:
self_reg_sc.append(sub.self_reg_sc)
temp_reg_sc.append(sub.temp_reg_sc)
cnr.append(sub.cnr)
snr.append(sub.snr)
fig_cnr = plot_rugdensity(cnr)
fig_snr = plot_rugdensity(snr)
fig_sreg = plot_rugdensity(self_reg_sc)
fig_treg = plot_rugdensity(temp_reg_sc)
figs = [fig_cnr, fig_snr, fig_sreg, fig_treg]
names = [
"temporal Contrast to Noise Ratio",
"temporal Signal to Noise Ratio",
"Self Registration Score",
"Template Registration Score",
]
ylab = ["Density", "Density", "Density", "Density"]
xlab = ["Ratio", "Ratio", "Score", "Score"]
traces = [fig_to_trace(fig) for fig in figs]
fname_multi = "registration_qa.html"
# if a dataset name is provided, add it to the name
if self.dataset is not None:
fname_multi = "{}_{}".format(self.dataset, fname_multi)
fname_multi = "{}/{}".format(regdir, fname_multi)
multi = traces_to_panels(traces, names=names, ylabs=ylab, xlabs=xlab)
pyo.plot(multi, validate=False, filename=fname_multi)
pass
def group_motion(self):
"""
A function that performs group level motion corrective quality control.
"""
mcdir = "{}/{}".format(self.outdir, "mc")
cmd = "mkdir -p {}".format(mcdir)
mgu.execute_cmd(cmd)
trans_abs = np.zeros((len(self.qa_objects)))
trans_rel = np.zeros((len(self.qa_objects)))
trans_abs_gt = np.zeros((len(self.qa_objects)))
trans_rel_gt = np.zeros((len(self.qa_objects)))
FD_mean = [sub.fd_mean for sub in self.qa_objects]
FD_max = [sub.fd_max for sub in self.qa_objects]
FD_gt_100um = [sub.fd_gt_100um for sub in self.qa_objects]
FD_gt_200um = [sub.fd_gt_200um for sub in self.qa_objects]
fig_mean = plot_rugdensity(FD_mean)
fig_max = plot_rugdensity(FD_max)
fig_gt_100um = plot_rugdensity(FD_gt_100um)
fig_gt_200um = plot_rugdensity(FD_gt_200um)
figs = [fig_mean, fig_max, fig_gt_100um, fig_gt_200um]
names = [
"Average FD KDE",
"Max FD KDE",
"Number of FD > 0.1 mm KDE",
"Number of FD > 0.2 mm KDE",
]
ylab = ["Density", "Density", "Density", "Density"]
xlab = [
"Average FD (mm)",
"Average Motion (mm)",
"Number of Volumes",
"Number of Volumes",
]
traces = [fig_to_trace(fig) for fig in figs]
fname_multi = "motion_correction.html"
# if a dataset name is provided, add it to the name
if self.dataset is not None:
fname_multi = "{}_{}".format(self.dataset, fname_multi)
fname_multi = "{}/{}".format(mcdir, fname_multi)
multi = traces_to_panels(traces, names=names, ylabs=ylab, xlabs=xlab)
pyo.plot(multi, validate=False, filename=fname_multi)
pass
|
(* Author: Tobias Nipkow, Alex Krauss *)
header "Regular sets"
theory Regular_Set
imports Main
begin
type_synonym 'a lang = "'a list set"
definition conc :: "'a lang \<Rightarrow> 'a lang \<Rightarrow> 'a lang" (infixr "@@" 75) where
"A @@ B = {xs@ys | xs ys. xs:A & ys:B}"
text {* checks the code preprocessor for set comprehensions *}
export_code conc checking SML
overloading lang_pow == "compow :: nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
begin
primrec lang_pow :: "nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang" where
"lang_pow 0 A = {[]}" |
"lang_pow (Suc n) A = A @@ (lang_pow n A)"
end
text {* for code generation *}
definition lang_pow :: "nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang" where
lang_pow_code_def [code_abbrev]: "lang_pow = compow"
hide_const (open) lang_pow
definition star :: "'a lang \<Rightarrow> 'a lang" where
"star A = (\<Union>n. A ^^ n)"
subsection{* @{term "op @@"} *}
lemma concI[simp,intro]: "u : A \<Longrightarrow> v : B \<Longrightarrow> u@v : A @@ B"
by (auto simp add: conc_def)
lemma concE[elim]:
assumes "w \<in> A @@ B"
obtains u v where "u \<in> A" "v \<in> B" "w = u@v"
using assms by (auto simp: conc_def)
lemma conc_mono: "A \<subseteq> C \<Longrightarrow> B \<subseteq> D \<Longrightarrow> A @@ B \<subseteq> C @@ D"
by (auto simp: conc_def)
lemma conc_empty[simp]: shows "{} @@ A = {}" and "A @@ {} = {}"
by auto
lemma conc_epsilon[simp]: shows "{[]} @@ A = A" and "A @@ {[]} = A"
by (simp_all add:conc_def)
lemma conc_assoc: "(A @@ B) @@ C = A @@ (B @@ C)"
by (auto elim!: concE) (simp only: append_assoc[symmetric] concI)
lemma conc_Un_distrib:
shows "A @@ (B \<union> C) = A @@ B \<union> A @@ C"
and "(A \<union> B) @@ C = A @@ C \<union> B @@ C"
by auto
lemma conc_UNION_distrib:
shows "A @@ UNION I M = UNION I (%i. A @@ M i)"
and "UNION I M @@ A = UNION I (%i. M i @@ A)"
by auto
lemma conc_subset_lists: "A \<subseteq> lists S \<Longrightarrow> B \<subseteq> lists S \<Longrightarrow> A @@ B \<subseteq> lists S"
by(fastforce simp: conc_def in_lists_conv_set)
lemma Nil_in_conc[simp]: "[] \<in> A @@ B \<longleftrightarrow> [] \<in> A \<and> [] \<in> B"
by (metis append_is_Nil_conv concE concI)
lemma concI_if_Nil1: "[] \<in> A \<Longrightarrow> xs : B \<Longrightarrow> xs \<in> A @@ B"
by (metis append_Nil concI)
lemma conc_Diff_if_Nil1: "[] \<in> A \<Longrightarrow> A @@ B = (A - {[]}) @@ B \<union> B"
by (fastforce elim: concI_if_Nil1)
lemma concI_if_Nil2: "[] \<in> B \<Longrightarrow> xs : A \<Longrightarrow> xs \<in> A @@ B"
by (metis append_Nil2 concI)
lemma conc_Diff_if_Nil2: "[] \<in> B \<Longrightarrow> A @@ B = A @@ (B - {[]}) \<union> A"
by (fastforce elim: concI_if_Nil2)
lemma singleton_in_conc:
"[x] : A @@ B \<longleftrightarrow> [x] : A \<and> [] : B \<or> [] : A \<and> [x] : B"
by (fastforce simp: Cons_eq_append_conv append_eq_Cons_conv
conc_Diff_if_Nil1 conc_Diff_if_Nil2)
subsection{* @{term "A ^^ n"} *}
lemma lang_pow_add: "A ^^ (n + m) = A ^^ n @@ A ^^ m"
by (induct n) (auto simp: conc_assoc)
lemma lang_pow_empty: "{} ^^ n = (if n = 0 then {[]} else {})"
by (induct n) auto
lemma lang_pow_empty_Suc[simp]: "({}::'a lang) ^^ Suc n = {}"
by (simp add: lang_pow_empty)
lemma length_lang_pow_ub:
"ALL w : A. length w \<le> k \<Longrightarrow> w : A^^n \<Longrightarrow> length w \<le> k*n"
by(induct n arbitrary: w) (fastforce simp: conc_def)+
lemma length_lang_pow_lb:
"ALL w : A. length w \<ge> k \<Longrightarrow> w : A^^n \<Longrightarrow> length w \<ge> k*n"
by(induct n arbitrary: w) (fastforce simp: conc_def)+
lemma lang_pow_subset_lists: "A \<subseteq> lists S \<Longrightarrow> A ^^ n \<subseteq> lists S"
by(induction n)(auto simp: conc_subset_lists[OF assms])
subsection{* @{const star} *}
lemma star_subset_lists: "A \<subseteq> lists S \<Longrightarrow> star A \<subseteq> lists S"
unfolding star_def by(blast dest: lang_pow_subset_lists)
lemma star_if_lang_pow[simp]: "w : A ^^ n \<Longrightarrow> w : star A"
by (auto simp: star_def)
lemma Nil_in_star[iff]: "[] : star A"
proof (rule star_if_lang_pow)
show "[] : A ^^ 0" by simp
qed
lemma star_if_lang[simp]: assumes "w : A" shows "w : star A"
proof (rule star_if_lang_pow)
show "w : A ^^ 1" using `w : A` by simp
qed
lemma append_in_starI[simp]:
assumes "u : star A" and "v : star A" shows "u@v : star A"
proof -
from `u : star A` obtain m where "u : A ^^ m" by (auto simp: star_def)
moreover
from `v : star A` obtain n where "v : A ^^ n" by (auto simp: star_def)
ultimately have "u@v : A ^^ (m+n)" by (simp add: lang_pow_add)
thus ?thesis by simp
qed
lemma conc_star_star: "star A @@ star A = star A"
by (auto simp: conc_def)
lemma conc_star_comm:
shows "A @@ star A = star A @@ A"
unfolding star_def conc_pow_comm conc_UNION_distrib
by simp
lemma star_induct[consumes 1, case_names Nil append, induct set: star]:
assumes "w : star A"
and "P []"
and step: "!!u v. u : A \<Longrightarrow> v : star A \<Longrightarrow> P v \<Longrightarrow> P (u@v)"
shows "P w"
proof -
{ fix n have "w : A ^^ n \<Longrightarrow> P w"
by (induct n arbitrary: w) (auto intro: `P []` step star_if_lang_pow) }
with `w : star A` show "P w" by (auto simp: star_def)
qed
lemma star_empty[simp]: "star {} = {[]}"
by (auto elim: star_induct)
lemma star_epsilon[simp]: "star {[]} = {[]}"
by (auto elim: star_induct)
lemma star_idemp[simp]: "star (star A) = star A"
by (auto elim: star_induct)
lemma star_unfold_left: "star A = A @@ star A \<union> {[]}" (is "?L = ?R")
proof
show "?L \<subseteq> ?R" by (rule, erule star_induct) auto
qed auto
lemma concat_in_star: "set ws \<subseteq> A \<Longrightarrow> concat ws : star A"
by (induct ws) simp_all
lemma in_star_iff_concat:
"w : star A = (EX ws. set ws \<subseteq> A & w = concat ws)"
(is "_ = (EX ws. ?R w ws)")
proof
assume "w : star A" thus "EX ws. ?R w ws"
proof induct
case Nil have "?R [] []" by simp
thus ?case ..
next
case (append u v)
moreover
then obtain ws where "set ws \<subseteq> A \<and> v = concat ws" by blast
ultimately have "?R (u@v) (u#ws)" by auto
thus ?case ..
qed
next
assume "EX us. ?R w us" thus "w : star A"
by (auto simp: concat_in_star)
qed
lemma star_conv_concat: "star A = {concat ws|ws. set ws \<subseteq> A}"
by (fastforce simp: in_star_iff_concat)
lemma star_insert_eps[simp]: "star (insert [] A) = star(A)"
proof-
{ fix us
have "set us \<subseteq> insert [] A \<Longrightarrow> EX vs. concat us = concat vs \<and> set vs \<subseteq> A"
(is "?P \<Longrightarrow> EX vs. ?Q vs")
proof
let ?vs = "filter (%u. u \<noteq> []) us"
show "?P \<Longrightarrow> ?Q ?vs" by (induct us) auto
qed
} thus ?thesis by (auto simp: star_conv_concat)
qed
lemma star_unfold_left_Nil: "star A = (A - {[]}) @@ (star A) \<union> {[]}"
by (metis insert_Diff_single star_insert_eps star_unfold_left)
lemma star_Diff_Nil_fold: "(A - {[]}) @@ star A = star A - {[]}"
proof -
have "[] \<notin> (A - {[]}) @@ star A" by simp
thus ?thesis using star_unfold_left_Nil by blast
qed
lemma star_decom:
assumes a: "x \<in> star A" "x \<noteq> []"
shows "\<exists>a b. x = a @ b \<and> a \<noteq> [] \<and> a \<in> A \<and> b \<in> star A"
using a by (induct rule: star_induct) (blast)+
subsection {* Left-Quotients of languages *}
definition Deriv :: "'a \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
where "Deriv x A = { xs. x#xs \<in> A }"
definition Derivs :: "'a list \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
where "Derivs xs A = { ys. xs @ ys \<in> A }"
abbreviation
Derivss :: "'a list \<Rightarrow> 'a lang set \<Rightarrow> 'a lang"
where
"Derivss s As \<equiv> \<Union> (Derivs s ` As)"
lemma Deriv_empty[simp]: "Deriv a {} = {}"
and Deriv_epsilon[simp]: "Deriv a {[]} = {}"
and Deriv_char[simp]: "Deriv a {[b]} = (if a = b then {[]} else {})"
and Deriv_union[simp]: "Deriv a (A \<union> B) = Deriv a A \<union> Deriv a B"
and Deriv_inter[simp]: "Deriv a (A \<inter> B) = Deriv a A \<inter> Deriv a B"
and Deriv_compl[simp]: "Deriv a (-A) = - Deriv a A"
and Deriv_Union[simp]: "Deriv a (Union M) = Union(Deriv a ` M)"
and Deriv_UN[simp]: "Deriv a (UN x:I. S x) = (UN x:I. Deriv a (S x))"
by (auto simp: Deriv_def)
lemma Der_conc [simp]: "Deriv c (A @@ B) = (Deriv c A) @@ B \<union> (if [] \<in> A then Deriv c B else {})"
unfolding Deriv_def conc_def
by (auto simp add: Cons_eq_append_conv)
lemma Deriv_star [simp]: "Deriv c (star A) = (Deriv c A) @@ star A"
proof -
have incl: "[] \<in> A \<Longrightarrow> Deriv c (star A) \<subseteq> (Deriv c A) @@ star A"
unfolding Deriv_def conc_def
apply(auto simp add: Cons_eq_append_conv)
apply(drule star_decom)
apply(auto simp add: Cons_eq_append_conv)
done
have "Deriv c (star A) = Deriv c (A @@ star A \<union> {[]})"
by (simp only: star_unfold_left[symmetric])
also have "... = Deriv c (A @@ star A)"
by (simp only: Deriv_union) (simp)
also have "... = (Deriv c A) @@ (star A) \<union> (if [] \<in> A then Deriv c (star A) else {})"
by simp
also have "... = (Deriv c A) @@ star A"
using incl by auto
finally show "Deriv c (star A) = (Deriv c A) @@ star A" .
qed
lemma Deriv_diff[simp]: "Deriv c (A - B) = Deriv c A - Deriv c B"
by(auto simp add: Deriv_def)
lemma Deriv_lists[simp]: "c : S \<Longrightarrow> Deriv c (lists S) = lists S"
by(auto simp add: Deriv_def)
lemma Derivs_simps [simp]:
shows "Derivs [] A = A"
and "Derivs (c # s) A = Derivs s (Deriv c A)"
and "Derivs (s1 @ s2) A = Derivs s2 (Derivs s1 A)"
unfolding Derivs_def Deriv_def by auto
lemma in_fold_Deriv: "v \<in> fold Deriv w L \<longleftrightarrow> w @ v \<in> L"
by (induct w arbitrary: L) (simp_all add: Deriv_def)
lemma Derivs_alt_def: "Derivs w L = fold Deriv w L"
by (induct w arbitrary: L) simp_all
subsection {* Shuffle product *}
fun shuffle where
"shuffle [] ys = {ys}"
| "shuffle xs [] = {xs}"
| "shuffle (x # xs) (y # ys) =
{x # w | w . w \<in> shuffle xs (y # ys)} \<union>
{y # w | w . w \<in> shuffle (x # xs) ys}"
lemma shuffle_empty2[simp]: "shuffle xs [] = {xs}"
by (cases xs) auto
lemma Nil_in_shuffle[simp]: "[] \<in> shuffle xs ys \<longleftrightarrow> xs = [] \<and> ys = []"
by (induct xs ys rule: shuffle.induct) auto
definition Shuffle (infixr "\<parallel>" 80) where
"Shuffle A B = \<Union>{shuffle xs ys | xs ys. xs \<in> A \<and> ys \<in> B}"
lemma shuffleE:
"zs \<in> shuffle xs ys \<Longrightarrow>
(zs = xs \<Longrightarrow> ys = [] \<Longrightarrow> P) \<Longrightarrow>
(zs = ys \<Longrightarrow> xs = [] \<Longrightarrow> P) \<Longrightarrow>
(\<And>x xs' z zs'. xs = x # xs' \<Longrightarrow> zs = z # zs' \<Longrightarrow> x = z \<Longrightarrow> zs' \<in> shuffle xs' ys \<Longrightarrow> P) \<Longrightarrow>
(\<And>y ys' z zs'. ys = y # ys' \<Longrightarrow> zs = z # zs' \<Longrightarrow> y = z \<Longrightarrow> zs' \<in> shuffle xs ys' \<Longrightarrow> P) \<Longrightarrow> P"
by (induct xs ys rule: shuffle.induct) auto
lemma Cons_in_shuffle_iff:
"z # zs \<in> shuffle xs ys \<longleftrightarrow>
(xs \<noteq> [] \<and> hd xs = z \<and> zs \<in> shuffle (tl xs) ys \<or>
ys \<noteq> [] \<and> hd ys = z \<and> zs \<in> shuffle xs (tl ys))"
by (induct xs ys rule: shuffle.induct) auto
lemma Deriv_Shuffle[simp]:
"Deriv a (A \<parallel> B) = Deriv a A \<parallel> B \<union> A \<parallel> Deriv a B"
unfolding Shuffle_def Deriv_def by (fastforce simp: Cons_in_shuffle_iff neq_Nil_conv)
lemma shuffle_subset_lists:
assumes "A \<subseteq> lists S" "B \<subseteq> lists S"
shows "A \<parallel> B \<subseteq> lists S"
unfolding Shuffle_def proof safe
fix x and zs xs ys :: "'a list"
assume zs: "zs \<in> shuffle xs ys" "x \<in> set zs" and "xs \<in> A" "ys \<in> B"
with assms have "xs \<in> lists S" "ys \<in> lists S" by auto
with zs show "x \<in> S" by (induct xs ys arbitrary: zs rule: shuffle.induct) auto
qed
lemma Nil_in_Shuffle[simp]: "[] \<in> A \<parallel> B \<longleftrightarrow> [] \<in> A \<and> [] \<in> B"
unfolding Shuffle_def by force
lemma shuffle_UNION_distrib:
shows "A \<parallel> UNION I M = UNION I (%i. A \<parallel> M i)"
and "UNION I M \<parallel> A = UNION I (%i. M i \<parallel> A)"
unfolding Shuffle_def by fast+
lemma Shuffle_empty[simp]:
"A \<parallel> {} = {}"
"{} \<parallel> B = {}"
unfolding Shuffle_def by auto
subsection {* Arden's Lemma *}
lemma arden_helper:
assumes eq: "X = A @@ X \<union> B"
shows "X = (A ^^ Suc n) @@ X \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)"
proof (induct n)
case 0
show "X = (A ^^ Suc 0) @@ X \<union> (\<Union>m\<le>0. (A ^^ m) @@ B)"
using eq by simp
next
case (Suc n)
have ih: "X = (A ^^ Suc n) @@ X \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)" by fact
also have "\<dots> = (A ^^ Suc n) @@ (A @@ X \<union> B) \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)" using eq by simp
also have "\<dots> = (A ^^ Suc (Suc n)) @@ X \<union> ((A ^^ Suc n) @@ B) \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)"
by (simp add: conc_Un_distrib conc_assoc[symmetric] conc_pow_comm)
also have "\<dots> = (A ^^ Suc (Suc n)) @@ X \<union> (\<Union>m\<le>Suc n. (A ^^ m) @@ B)"
by (auto simp add: le_Suc_eq)
finally show "X = (A ^^ Suc (Suc n)) @@ X \<union> (\<Union>m\<le>Suc n. (A ^^ m) @@ B)" .
qed
lemma Arden:
assumes "[] \<notin> A"
shows "X = A @@ X \<union> B \<longleftrightarrow> X = star A @@ B"
proof
assume eq: "X = A @@ X \<union> B"
{ fix w assume "w : X"
let ?n = "size w"
from `[] \<notin> A` have "ALL u : A. length u \<ge> 1"
by (metis Suc_eq_plus1 add_leD2 le_0_eq length_0_conv not_less_eq_eq)
hence "ALL u : A^^(?n+1). length u \<ge> ?n+1"
by (metis length_lang_pow_lb nat_mult_1)
hence "ALL u : A^^(?n+1)@@X. length u \<ge> ?n+1"
by(auto simp only: conc_def length_append)
hence "w \<notin> A^^(?n+1)@@X" by auto
hence "w : star A @@ B" using `w : X` using arden_helper[OF eq, where n="?n"]
by (auto simp add: star_def conc_UNION_distrib)
} moreover
{ fix w assume "w : star A @@ B"
hence "EX n. w : A^^n @@ B" by(auto simp: conc_def star_def)
hence "w : X" using arden_helper[OF eq] by blast
} ultimately show "X = star A @@ B" by blast
next
assume eq: "X = star A @@ B"
have "star A = A @@ star A \<union> {[]}"
by (rule star_unfold_left)
then have "star A @@ B = (A @@ star A \<union> {[]}) @@ B"
by metis
also have "\<dots> = (A @@ star A) @@ B \<union> B"
unfolding conc_Un_distrib by simp
also have "\<dots> = A @@ (star A @@ B) \<union> B"
by (simp only: conc_assoc)
finally show "X = A @@ X \<union> B"
using eq by blast
qed
lemma reversed_arden_helper:
assumes eq: "X = X @@ A \<union> B"
shows "X = X @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))"
proof (induct n)
case 0
show "X = X @@ (A ^^ Suc 0) \<union> (\<Union>m\<le>0. B @@ (A ^^ m))"
using eq by simp
next
case (Suc n)
have ih: "X = X @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))" by fact
also have "\<dots> = (X @@ A \<union> B) @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))" using eq by simp
also have "\<dots> = X @@ (A ^^ Suc (Suc n)) \<union> (B @@ (A ^^ Suc n)) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))"
by (simp add: conc_Un_distrib conc_assoc)
also have "\<dots> = X @@ (A ^^ Suc (Suc n)) \<union> (\<Union>m\<le>Suc n. B @@ (A ^^ m))"
by (auto simp add: le_Suc_eq)
finally show "X = X @@ (A ^^ Suc (Suc n)) \<union> (\<Union>m\<le>Suc n. B @@ (A ^^ m))" .
qed
theorem reversed_Arden:
assumes nemp: "[] \<notin> A"
shows "X = X @@ A \<union> B \<longleftrightarrow> X = B @@ star A"
proof
assume eq: "X = X @@ A \<union> B"
{ fix w assume "w : X"
let ?n = "size w"
from `[] \<notin> A` have "ALL u : A. length u \<ge> 1"
by (metis Suc_eq_plus1 add_leD2 le_0_eq length_0_conv not_less_eq_eq)
hence "ALL u : A^^(?n+1). length u \<ge> ?n+1"
by (metis length_lang_pow_lb nat_mult_1)
hence "ALL u : X @@ A^^(?n+1). length u \<ge> ?n+1"
by(auto simp only: conc_def length_append)
hence "w \<notin> X @@ A^^(?n+1)" by auto
hence "w : B @@ star A" using `w : X` using reversed_arden_helper[OF eq, where n="?n"]
by (auto simp add: star_def conc_UNION_distrib)
} moreover
{ fix w assume "w : B @@ star A"
hence "EX n. w : B @@ A^^n" by (auto simp: conc_def star_def)
hence "w : X" using reversed_arden_helper[OF eq] by blast
} ultimately show "X = B @@ star A" by blast
next
assume eq: "X = B @@ star A"
have "star A = {[]} \<union> star A @@ A"
unfolding conc_star_comm[symmetric]
by(metis Un_commute star_unfold_left)
then have "B @@ star A = B @@ ({[]} \<union> star A @@ A)"
by metis
also have "\<dots> = B \<union> B @@ (star A @@ A)"
unfolding conc_Un_distrib by simp
also have "\<dots> = B \<union> (B @@ star A) @@ A"
by (simp only: conc_assoc)
finally show "X = X @@ A \<union> B"
using eq by blast
qed
end
|
/-
Copyright (c) 2021 Adam Topaz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Adam Topaz, Scott Morrison
-/
import category_theory.limits.preserves.shapes.biproducts
import category_theory.preadditive.functor_category
/-!
# Additive Functors
A functor between two preadditive categories is called *additive*
provided that the induced map on hom types is a morphism of abelian
groups.
An additive functor between preadditive categories creates and preserves biproducts.
Conversely, if `F : C ⥤ D` is a functor between preadditive categories, where `C` has binary
biproducts, and if `F` preserves binary biproducts, then `F` is additive.
We also define the category of bundled additive functors.
# Implementation details
`functor.additive` is a `Prop`-valued class, defined by saying that for every two objects `X` and
`Y`, the map `F.map : (X ⟶ Y) → (F.obj X ⟶ F.obj Y)` is a morphism of abelian groups.
-/
namespace category_theory
/-- A functor `F` is additive provided `F.map` is an additive homomorphism. -/
class functor.additive {C D : Type*} [category C] [category D]
[preadditive C] [preadditive D] (F : C ⥤ D) : Prop :=
(map_add' : Π {X Y : C} {f g : X ⟶ Y}, F.map (f + g) = F.map f + F.map g . obviously)
section preadditive
namespace functor
section
variables {C D : Type*} [category C] [category D] [preadditive C]
[preadditive D] (F : C ⥤ D) [functor.additive F]
@[simp]
/-- `F.map_add_hom` is an additive homomorphism whose underlying function is `F.map`. -/
@[simps {fully_applied := ff}]
def map_add_hom {X Y : C} : (X ⟶ Y) →+ (F.obj X ⟶ F.obj Y) :=
add_monoid_hom.mk' (λ f, F.map f) (λ f g, F.map_add)
lemma coe_map_add_hom {X Y : C} : ⇑(F.map_add_hom : (X ⟶ Y) →+ _) = @map C _ D _ F X Y := rfl
@[priority 100]
instance preserves_zero_morphisms_of_additive : preserves_zero_morphisms F :=
{ map_zero' := λ X Y, F.map_add_hom.map_zero }
instance : additive (𝟭 C) :=
{}
instance {E : Type*} [category E] [preadditive E] (G : D ⥤ E) [functor.additive G] :
additive (F ⋙ G) :=
{}
@[simp]
lemma map_neg {X Y : C} {f : X ⟶ Y} : F.map (-f) = - F.map f :=
F.map_add_hom.map_neg _
@[simp]
lemma map_sub {X Y : C} {f g : X ⟶ Y} : F.map (f - g) = F.map f - F.map g :=
F.map_add_hom.map_sub _ _
-- You can alternatively just use `functor.map_smul` here, with an explicit `(r : ℤ)` argument.
lemma map_zsmul {X Y : C} {f : X ⟶ Y} {r : ℤ} : F.map (r • f) = r • F.map f :=
F.map_add_hom.map_zsmul _ _
open_locale big_operators
@[simp]
lemma map_sum {X Y : C} {α : Type*} (f : α → (X ⟶ Y)) (s : finset α) :
F.map (∑ a in s, f a) = ∑ a in s, F.map (f a) :=
(F.map_add_hom : (X ⟶ Y) →+ _).map_sum f s
end
section induced_category
variables {C : Type*} {D : Type*} [category D] [preadditive D] (F : C → D)
instance induced_functor_additive : functor.additive (induced_functor F) := {}
end induced_category
section
-- To talk about preservation of biproducts we need to specify universes explicitly.
noncomputable theory
universes v u₁ u₂
variables {C : Type u₁} {D : Type u₂} [category.{v} C] [category.{v} D]
[preadditive C] [preadditive D] (F : C ⥤ D)
open category_theory.limits
open category_theory.preadditive
@[priority 100]
instance preserves_finite_biproducts_of_additive [additive F] : preserves_finite_biproducts F :=
{ preserves := λ J _ _,
{ preserves := λ f,
{ preserves := λ b hb, by exactI is_bilimit_of_total _
begin
simp_rw [F.map_bicone_π, F.map_bicone_ι, ← F.map_comp, ← F.map_sum],
dsimp only [map_bicone_X],
simp_rw [← F.map_id],
refine congr_arg _ (hb.is_limit.hom_ext (λ j, hb.is_colimit.hom_ext (λ j', _))),
simp [sum_comp, comp_sum, bicone.ι_π, comp_dite, dite_comp]
end } } }
lemma additive_of_preserves_binary_biproducts [has_binary_biproducts C] [preserves_zero_morphisms F]
[preserves_binary_biproducts F] : additive F :=
{ map_add' := λ X Y f g, by rw [biprod.add_eq_lift_id_desc, F.map_comp, ← biprod.lift_map_biprod,
← biprod.map_biprod_hom_desc, category.assoc, iso.inv_hom_id_assoc, F.map_id,
biprod.add_eq_lift_id_desc] }
end
end functor
namespace equivalence
variables {C D : Type*} [category C] [category D] [preadditive C] [preadditive D]
instance inverse_additive (e : C ≌ D) [e.functor.additive] : e.inverse.additive :=
{ map_add' := λ X Y f g, by { apply e.functor.map_injective, simp, }, }
end equivalence
section
variables (C D : Type*) [category C] [category D] [preadditive C] [preadditive D]
/-- Bundled additive functors. -/
@[derive category, nolint has_inhabited_instance]
def AdditiveFunctor :=
{ F : C ⥤ D // functor.additive F }
infixr ` ⥤+ `:26 := AdditiveFunctor
instance : preadditive (C ⥤+ D) :=
preadditive.induced_category.category _
/-- An additive functor is in particular a functor. -/
@[derive full, derive faithful]
def AdditiveFunctor.forget : (C ⥤+ D) ⥤ (C ⥤ D) :=
full_subcategory_inclusion _
variables {C D}
/-- Turn an additive functor into an object of the category `AdditiveFunctor C D`. -/
def AdditiveFunctor.of (F : C ⥤ D) [F.additive] : C ⥤+ D :=
⟨F, infer_instance⟩
@[simp]
lemma AdditiveFunctor.of_fst (F : C ⥤ D) [F.additive] : (AdditiveFunctor.of F).1 = F :=
rfl
@[simp]
lemma AdditiveFunctor.forget_obj (F : C ⥤+ D) : (AdditiveFunctor.forget C D).obj F = F.1 :=
rfl
lemma AdditiveFunctor.forget_obj_of (F : C ⥤ D) [F.additive] :
(AdditiveFunctor.forget C D).obj (AdditiveFunctor.of F) = F :=
rfl
@[simp]
lemma AdditiveFunctor.forget_map (F G : C ⥤+ D) (α : F ⟶ G) :
(AdditiveFunctor.forget C D).map α = α :=
rfl
instance : functor.additive (AdditiveFunctor.forget C D) :=
{ map_add' := λ F G α β, rfl }
instance (F : C ⥤+ D) : functor.additive F.1 :=
F.2
end
end preadditive
end category_theory
|
(**************************************************************************
* TLC: A library for Coq *
* Environments for metatheory *
* Adapted for xp var path environments, Upsilon.
**************************************************************************)
(* Questions:
I could have generalized env to be (V : Type) (A : ValueType) but I didn't
want to mess up automation on envs until I really understand it.
Also I may need a different signature to get variables, not just variable paths
out at different points.
2 theorems failing but overloaded LTac which is not ideal.
The theorems that fail are about freshness, which is to be expected, as I
have no idea what freshness issues we really need to solve on this context.
*)
Set Implicit Arguments.
Require Import TLC.LibTactics TLC.LibOption TLC.LibList TLC.LibProd TLC.LibLogic TLC.LibReflect.
Require Export TLC.LibVar.
Require Export LibVarPath.
Module LVPE.
Ltac trace_goal := idtac.
(*
match goal with
| |- ?g => idtac g
end.
*)
(* ********************************************************************** *)
(** * Definition of environments and their basic operations *)
(** To avoid definitions being unfolded by [simpl] in an uncontrollable
manner, we use a module signature to enforce abstraction *)
(* ---------------------------------------------------------------------- *)
(** ** Abstract definitions *)
Generalizable Variable A.
Definition varpathenv (A:Type) := list (varpath * A).
Definition varpaths := fset varpath.
Module Type VarPathEnvOpsSig.
Section Definitions.
Variable A B : Type.
Parameter empty : varpathenv A.
Parameter empty_def : empty = nil.
Parameter single : varpath -> A -> varpathenv A.
Parameter single_def :
single = fun x v => (x,v)::nil.
Parameter concat : varpathenv A -> varpathenv A -> varpathenv A.
Parameter concat_def :
concat = fun E F => F ++ E.
Parameter singles : list varpath -> list A -> varpathenv A.
Parameter singles_def :
singles = fun xs vs =>
fold_right (fun p acc => concat acc (single (fst p) (snd p))) empty (combine xs vs).
Parameter keys : varpathenv A -> list varpath.
Parameter keys_def :
keys = map fst.
Parameter values : varpathenv A -> list A.
Parameter values_def :
values = map snd.
Parameter fold_vars : (A -> varpaths) -> varpathenv A -> varpaths.
Parameter fold_vars_defs :
fold_vars = fun fv E =>
fold_right (fun v acc => fv v \u acc) \{} (values E).
Parameter dom : varpathenv A -> varpaths.
Parameter dom_def :
dom = fold_right (fun p E => \{fst p} \u E) \{}.
Parameter dom' : varpathenv A -> vars.
Parameter dom'_def :
dom' = fold_right (fun p E => \{fst (fst p)} \u E) \{}.
Parameter map : (A -> B) -> varpathenv A -> varpathenv B.
Parameter map_def :
map = fun f E =>
LibList.map (fun p => (fst p, f (snd p))) E.
Parameter map_keys : (varpath -> varpath) -> varpathenv A -> varpathenv A.
Parameter map_keys_def :
map_keys = fun f E =>
LibList.map (fun p => (f (fst p), snd p)) E.
Parameter get : varpath -> varpathenv A -> option A.
Fixpoint get_impl (k : varpath) (E : varpathenv A) {struct E} : option A :=
match E with
| nil => None
| (x,v) :: E' => If k = x then Some v else get_impl k E'
end.
Parameter get_def :
get = get_impl.
End Definitions.
Implicit Arguments empty [A].
End VarPathEnvOpsSig.
(* ---------------------------------------------------------------------- *)
(** ** Concrete definitions *)
Module Export V : VarPathEnvOpsSig.
Section Concrete.
Variable A B : Type.
Definition empty : varpathenv A := nil.
Lemma empty_def : empty = nil.
Proof using. reflexivity. Qed.
Definition single x v : varpathenv A := (x,v)::nil.
Lemma single_def :
single = fun x v => (x,v)::nil.
Proof using. reflexivity. Qed.
Definition concat (E F : varpathenv A) := F ++ E.
Lemma concat_def :
concat = fun E F => F ++ E.
Proof using. reflexivity. Qed.
Definition singles (xs : list varpath) (vs : list A) : varpathenv A :=
fold_right (fun p acc => concat acc (single (fst p) (snd p))) empty (combine xs vs).
Lemma singles_def :
singles = fun xs vs =>
fold_right (fun p acc => concat acc (single (fst p) (snd p))) empty (combine xs vs).
Proof using. reflexivity. Qed.
Definition keys : varpathenv A -> list varpath :=
map fst.
Lemma keys_def :
keys = map fst.
Proof using. reflexivity. Qed.
Definition values : varpathenv A -> list A :=
map snd.
Lemma values_def :
values = map snd.
Proof using. reflexivity. Qed.
Definition fold_vars (fv : A -> varpaths) (E : varpathenv A) :=
fold_right (fun v acc => fv v \u acc) \{} (values E).
Lemma fold_vars_defs :
fold_vars = fun fv E => fold_right (fun v acc => fv v \u acc) \{} (values E).
Proof using. reflexivity. Qed.
Definition map (f:A->B) (E:varpathenv A) :=
LibList.map (fun p => (fst p, f (snd p))) E.
Lemma map_def :
map = fun f E => LibList.map (fun p => (fst p, f (snd p))) E.
Proof using. reflexivity. Qed.
Definition map_keys (f:varpath->varpath) (E:varpathenv A) :=
LibList.map (fun p => (f (fst p), snd p)) E.
Lemma map_keys_def :
map_keys = fun f E => LibList.map (fun p => (f (fst p), snd p)) E.
Proof using. reflexivity. Qed.
Definition dom : varpathenv A -> varpaths :=
fold_right (fun p E => \{fst p} \u E) \{}.
Lemma dom_def :
dom = fold_right (fun p E => \{fst p} \u E) \{}.
Proof using. reflexivity. Qed.
Definition dom' : varpathenv A -> vars :=
fold_right (fun p E => \{fst (fst p)} \u E) \{}.
Lemma dom'_def :
dom' = fold_right (fun p E => \{fst (fst p)} \u E) \{}.
Proof using. reflexivity. Qed.
Fixpoint get_impl (k : varpath) (E : varpathenv A) {struct E} : option A :=
match E with
| nil => None
| (x,v) :: E' => If k = x then Some v else get_impl k E'
end.
Definition get := get_impl.
Lemma get_def :
get = get_impl.
Proof using. reflexivity. Qed.
End Concrete.
Implicit Arguments empty [A].
End V.
(* ---------------------------------------------------------------------- *)
(** ** Notations *)
Module LibVarPathEnvNotations.
(** [x ~p a] is the notation for a singleton environment mapping x to a. *)
Notation "x '~p' a" := (single x a)
(at level 7, left associativity) : env_scope.
(** [xs ~p* vs] is the notation for [single_iter xs vs]. *)
Notation "xs '~p*' vs" := (singles xs vs)
(at level 27, left associativity) : env_scope.
(** [E p& F] is the notation for concatenation of E and F. *)
Notation "E '&p' F" := (concat E F)
(at level 28, left associativity) : env_scope.
(** [x p# E] to be read x fresh from E captures the fact that
x is unbound in E . *)
Notation "x '#p' E" := (x \notin (dom E)) (at level 67) : env_scope.
Bind Scope env_scope with varpathenv.
Delimit Scope env_scope with varpathenv.
Open Scope env_scope.
End LibVarPathEnvNotations.
Import LibVarPathEnvNotations.
(* ---------------------------------------------------------------------- *)
(** ** Additional definitions *)
Section MoreDefinitions.
Variable A : Type.
Implicit Types E F : varpathenv A.
(** Well-formed environments contains no duplicate bindings. *)
Inductive okp : varpathenv A -> Prop :=
| okp_empty :
okp empty
| okp_push : forall E x v,
okp E -> x #p E -> okp (E &p x ~p v).
(** An environment contains a binding from x to a iff the most recent
binding on x is mapped to a *)
Definition binds x v E :=
get x E = Some v.
(** Read the value associated to a bound variable *)
Definition get_or_arbitrary `{Inhab A} x (E:varpathenv A) :=
unsome (get x E).
(** Inclusion of an environment in another one. *)
Definition extends E F :=
forall x v, binds x v E -> binds x v F.
(** Gathering free variables contained in the keys of an environment *)
Definition fv_in_keys (fv: varpath ->vars) (E:varpathenv A) :=
fold_right (fun v L => (fv v) \u L) \{} (keys E).
(** Gathering free variables contained in the values of an environment *)
Definition fv_in_values (fv:A->vars) (E:varpathenv A) :=
fold_right (fun v L => (fv v) \u L) \{} (values E).
End MoreDefinitions.
(* ---------------------------------------------------------------------- *)
(** ** Basic Properties *)
Hint Rewrite empty_def single_def concat_def singles_def keys_def values_def
dom_def map_def map_keys_def get_def : env_defs.
Ltac rew_env_defs := autorewrite with env_defs in *.
Section Properties.
Variable A B : Type.
Implicit Types k x : varpath.
Implicit Types v : A.
Implicit Types E F G : varpathenv A.
Implicit Types f : A -> B.
Implicit Types r : varpath -> varpath.
Lemma cons_to_push : forall x v E,
(x, v) :: E = E &p x ~p v.
Proof using. intros. rew_env_defs. rew_app~. Qed.
Lemma env_ind : forall (P : varpathenv A -> Prop),
(P empty) ->
(forall E x v, P E -> P (E &p x ~p v)) ->
(forall E, P E).
Proof using.
rew_env_defs. induction E as [|(x,v)].
auto. forwards*: H0 IHE.
Qed.
Lemma map_empty : forall f,
map f empty = empty.
Proof using. intros. rew_env_defs. auto. Qed.
Lemma map_single : forall f x v,
map f (x ~p v) = (x ~p (f v)).
Proof using. intros. rew_env_defs. auto. Qed.
Lemma map_concat : forall f E F,
map f (E &p F) = (map f E) &p (map f F).
Proof using.
intros. rew_env_defs.
gen E. induction F as [|(x,v)]; intros.
rew_list~.
rew_list. fequals.
Qed.
Lemma map_push : forall f x v E,
map f (E &p x ~p v) = (map f E) &p (x ~p (f v)).
Proof using. intros. rewrite map_concat, map_single. auto. Qed.
Lemma map_keys_empty : forall r,
map_keys r (@empty A) = empty.
Proof using. intros. rew_env_defs. auto. Qed.
Lemma map_keys_single : forall r x v,
map_keys r (x ~p v) = ((r x) ~p v).
Proof using. intros. rew_env_defs. auto. Qed.
Lemma map_keys_concat : forall r E F,
map_keys r (E &p F) = map_keys r E &p map_keys r F.
Proof using.
intros. rew_env_defs.
gen E. induction F as [|(x,v)]; intros.
rew_list~.
rew_list. fequals.
Qed.
Lemma map_keys_push : forall r x v E,
map_keys r (E &p x ~p v) = map_keys r E &p ((r x) ~p v).
Proof using. intros. rewrite map_keys_concat, map_keys_single. auto. Qed.
Lemma get_empty : forall k,
get k (@empty A) = None.
Proof using. intros. rew_env_defs. auto. Qed.
Lemma get_single : forall k x v,
get k (x ~p v) = If k = x
then Some v
else None.
Proof using. intros. rew_env_defs. unfold get_impl. auto. Qed.
Lemma get_concat : forall k E F,
get k (E &p F) = match get k F with
| None => get k E
| Some v => Some v
end.
Proof using.
intros. rew_env_defs. induction F as [|(x,v)].
auto.
simpl. case_if~.
Qed.
Lemma dom_empty :
dom (@empty A) = \{}.
Proof using. rew_env_defs. auto. Qed.
Lemma dom_single : forall x v,
dom (x ~p v) = \{x}.
Proof using.
intros. rew_env_defs.
rew_list. rewrite~ union_empty_r.
Qed.
Lemma dom_concat : forall E F,
dom (E &p F) = dom E \u dom F.
Proof using.
intros. rew_env_defs.
gen E. induction F as [|(x,v)]; intros.
rew_list. rewrite~ union_empty_r.
rew_app. rewrite fold_right_cons. rewrite IHF.
rewrite~ union_comm_assoc.
Qed.
Lemma dom_push : forall x v E,
dom (E &p x ~p v) = \{x} \u dom E.
Proof using.
intros. rewrite dom_concat. rewrite dom_single.
rewrite~ union_comm.
Qed.
End Properties.
Section SinglesProperties.
Variable A B : Type.
Implicit Types x : varpath.
Implicit Types v : A.
Implicit Types xs : list varpath.
Implicit Types vs : list A.
Implicit Types E F : varpathenv A.
Lemma singles_nil :
nil ~p* nil = (@empty A).
Proof using. intros. rew_env_defs. auto. Qed.
Lemma singles_cons : forall x v xs vs,
(x::xs) ~p* (v::vs) = (xs ~p* vs) &p (x ~p v).
Proof using. intros. rew_env_defs. simpl. rew_env_defs. fequals. Qed.
Lemma singles_one : forall x v,
(x::nil) ~p* (v::nil) = (x ~p v).
Proof using. intros. rew_env_defs. simpl. rew_env_defs. fequals. Qed.
Lemma singles_two : forall x1 x2 v1 v2,
(x1::x2::nil) ~p* (v1::v2::nil) = (x2 ~p v2 &p x1 ~p v1).
Proof using. intros. rew_env_defs. simpl. rew_env_defs. fequals. Qed.
Lemma keys_singles : forall xs vs,
length xs = length vs ->
keys (xs ~p* vs) = xs.
Proof using.
intros. rew_env_defs. gen vs. induction xs; introv E.
rew_list~.
destruct vs. false. simpl. rew_env_defs. rew_list. fequals.
rew_list in E. applys IHxs. inverts E. auto.
Qed.
Lemma values_singles : forall xs vs,
length xs = length vs ->
values (xs ~p* vs) = vs.
Proof using.
intros. rew_env_defs. gen vs.
induction xs; destruct vs; introv E; tryfalse.
auto.
rew_list in E. simpl. rew_env_defs. rew_list. fequals.
applys IHxs. inverts E. auto.
Qed.
Lemma dom_singles : forall xs vs,
length xs = length vs ->
dom (xs ~p* vs) = from_list xs.
Proof using.
intros. rew_env_defs. gen vs.
induction xs; destruct vs; introv E; tryfalse.
simpl. rewrite~ from_list_nil.
simpl. rew_env_defs. rew_list. rewrite from_list_cons. fequals.
applys IHxs. inverts~ E.
Qed.
Lemma map_singles : forall (f : A -> B) xs vs,
length xs = length vs ->
map f (xs ~p* vs) = xs ~p* (LibList.map f vs).
Proof using.
intros. rew_env_defs. gen vs.
induction xs; destruct vs; introv E; tryfalse.
rew_list~.
rew_list. simpl. rew_list. fequals~.
Qed.
Lemma map_keys_singles : forall f xs vs,
length xs = length vs ->
map_keys f (xs ~p* vs) = (LibList.map f xs) ~p* vs.
Proof using.
intros. rew_env_defs. gen vs.
induction xs; destruct vs; introv E; tryfalse.
rew_list~.
rew_list. simpl. rew_list. fequals~.
Qed.
Lemma concat_singles : forall xs1 xs2 vs1 vs2,
length xs1 = length vs1 ->
length xs2 = length vs2 ->
(xs2 ~p* vs2) &p (xs1 ~p* vs1) = (xs1 ++ xs2) ~p* (vs1 ++ vs2).
Proof using.
introv E1 E2. rew_env_defs. gen vs1.
induction xs1; destruct vs1; intros; tryfalse.
rew_list~.
rew_list. simpl. rew_list. fequals~.
Qed.
Lemma singles_keys_values : forall E,
E = (keys E) ~p* (values E).
Proof using.
intros. rew_env_defs. induction E as [|[x v] E'].
auto.
rew_list. simpl. rew_env_defs. rew_list. fequals.
Qed.
End SinglesProperties.
(* ---------------------------------------------------------------------- *)
(** ** Structural properties *)
Section StructProperties.
Variable A : Type.
Implicit Types x : varpath.
Implicit Types v : A.
Implicit Types E F : varpathenv A.
Lemma env_case : forall E,
E = empty \/ exists x v E', E = E' &p x ~p v.
Proof using. intros. induction E using env_ind; autos*. Qed.
Lemma concat_empty_r : forall E,
E &p empty = E.
Proof using. intros. rew_env_defs. rew_app~. Qed.
Lemma concat_empty_l : forall E,
empty &p E = E.
Proof using. intros. rew_env_defs. rew_app~. Qed.
Lemma concat_assoc : forall E F G,
E &p (F &p G) = (E &p F) &p G.
Proof using. intros. rew_env_defs. rew_app~. Qed.
Lemma empty_single_inv : forall x v,
empty = x ~p v -> False.
Proof using. introv H. rew_env_defs. inverts H. Qed.
Lemma empty_concat_inv : forall E F,
empty = E &p F -> empty = E /\ empty = F.
Proof using. introv H. rew_env_defs. forwards*: nil_eq_app_inv H. Qed.
Lemma empty_push_inv : forall E x v,
empty = E &p x ~p v -> False.
Proof using. introv H. rew_env_defs. inverts H. Qed.
Lemma empty_middle_inv : forall E F x v,
empty = E &p x ~p v &p F -> False.
Proof using. introv H. rew_env_defs. forwards* [_ ?]: nil_eq_app_inv H. false. Qed.
Lemma eq_single_inv : forall x1 x2 v1 v2,
x1 ~p v1 = x2 ~p v2 ->
x1 = x2 /\ v1 = v2.
Proof using. introv H. rew_env_defs. inverts~ H. Qed.
Lemma eq_push_inv : forall x1 x2 v1 v2 E1 E2,
E1 &p x1 ~p v1 = E2 &p x2 ~p v2 ->
x1 = x2 /\ v1 = v2 /\ E1 = E2.
Proof using. introv H. rew_env_defs. inverts~ H. Qed.
End StructProperties.
(* ---------------------------------------------------------------------- *)
(** ** More properties *)
Section MoreProperties.
Variable A : Type.
Implicit Types x : varpath.
Implicit Types v : A.
Implicit Types E F : varpathenv A.
Lemma dom_map : forall (B:Type) (f:A->B) E,
dom (map f E) = dom E.
Proof using.
induction E using env_ind.
rewrite map_empty. do 2 rewrite dom_empty. auto.
rewrite map_concat. rewrite map_single.
rewrite_all dom_concat. rewrite_all dom_single. congruence.
Qed.
Lemma concat_assoc_map_push : forall f E F x v,
E &p (map f (F &p x ~p v)) = (E &p map f F) &p x ~p (f v).
Proof using.
intros. rewrite map_concat. rewrite map_single.
rewrite~ concat_assoc.
Qed.
Lemma get_push : forall k x v E,
get k (E &p x ~p v) =
If k = x
then Some v
else get k E.
Proof using.
intros. rewrite get_concat. rewrite get_single. case_if~.
Qed.
Ltac simpl_dom :=
rewrite_all dom_push in *;
rewrite_all dom_empty in *.
Lemma get_some : forall x E,
x \in dom E -> exists v, get x E = Some v.
(* i.e. [x \in dom E -> exists v, binds x v E] *)
Proof using. (* beautify *)
unfold binds.
introv H. rew_env_defs. induction E as [|[y v] E'].
rew_list in H. rewrite* in_empty in H.
unfold get_impl. case_if.
eauto.
rew_list in H. rewrite in_union in H. destruct H as [H|H].
rewrite in_singleton in H. simpls. false.
forwards* [v' ?]: IHE'.
Qed.
Lemma get_some_inv : forall x v E,
get x E = Some v -> x \in dom E.
(* i.e. [binds x v E -> x \in dom E] *)
Proof using. (* beautify *)
unfold binds.
introv H. rew_env_defs. unfolds get_impl. induction E as [|[y v'] E'].
false.
rew_list. rewrite in_union. simpl. case_if.
inverts H. subst. left. rewrite~ in_singleton.
forwards*: IHE'.
Qed.
Lemma get_none : forall x E,
x #p E -> get x E = None.
Proof using.
induction E using env_ind; introv In.
rewrite~ get_empty.
rewrite~ get_push. case_if.
simpl_dom. subst.
(* BUG: admitted until LibGenEnv works.
notin_false. simpl_dom. auto.
Qed.
*)
Admitted.
Lemma get_none_inv : forall x E,
get x E = None -> x #p E.
Proof using.
induction E using env_ind; introv Eq.
simpl_dom. auto.
rewrite get_push in Eq. case_if~.
simpl_dom. auto.
(* BUG: admitted until LibGenEnv works. *)
Admitted.
End MoreProperties.
Lemma binds_get_or_arbitrary : forall `{Inhab A} x v (E:varpathenv A),
binds x v E -> get_or_arbitrary x E = v.
Proof using. introv M. unfold get_or_arbitrary. rewrite~ M. Qed.
Definition indom_dec A (E:varpathenv A) x :=
match get x E with None => false | Some _ => true end.
Global Instance indom_decidable : forall A (E:varpathenv A) x,
Decidable (x \in dom E).
Proof using.
intros. applys decidable_make (indom_dec E x).
unfold indom_dec. cases (get x E) as C.
lets: (get_some_inv C). rewrite~ isTrue_true.
lets: (get_none_inv C). rewrite~ isTrue_false.
Qed.
(* ---------------------------------------------------------------------- *)
(** ** Hints and rewriting tactics *)
Hint Constructors okp.
Hint Rewrite dom_empty dom_single dom_concat : rew_env_dom.
Hint Rewrite map_empty map_single map_concat : rew_env_map.
Hint Rewrite map_keys_empty map_keys_single
map_keys_concat : rew_env_map_keys.
Hint Rewrite get_empty get_single get_concat : rew_evn_get.
Hint Rewrite concat_empty_r concat_empty_l concat_assoc : rew_env_concat.
Tactic Notation "rew_env_concat" :=
autorewrite with rew_env_concat.
Tactic Notation "rew_env_concat" "in" hyp(H) :=
autorewrite with rew_env_concat in H.
Tactic Notation "rew_env_concat" "in" "*" :=
autorewrite_in_star_patch ltac:(fun tt => autorewrite with rew_env_concat).
(* autorewrite with rew_env_concat in *. *)
Ltac simpl_dom :=
rewrite_all dom_map in *;
rewrite_all dom_push in *;
rewrite_all dom_concat in *;
rewrite_all dom_single in *;
rewrite_all dom_empty in *.
Hint Extern 1 (_ #p _) => (* idtac "(_ #p _)";*) trace_goal; simpl_dom; notin_solve.
(* ---------------------------------------------------------------------- *)
(** ** Properties of well-formedness and freshness *)
Section OkpProperties.
Variable A B : Type.
Implicit Types k x : varpath.
Implicit Types v : A.
Implicit Types E F : varpathenv A.
Lemma okp_push_inv : forall E x v,
okp (E &p x ~p v) -> okp E /\ x #p E.
Proof using.
intros. inverts H as H1 H2.
false (empty_push_inv H1).
destructs 3 (eq_push_inv H). subst~.
Qed.
Lemma okp_push_inv_okp : forall E x v,
okp (E &p x ~p v) -> okp E.
Proof using. introv H. destruct* (okp_push_inv H). Qed.
Lemma okp_concat_inv : forall E F,
okp (E &p F) -> okp E /\ okp F.
Proof using.
induction F using env_ind; rew_env_concat; introv Okp. auto.
destruct (okp_push_inv Okp).
destruct IHF.
auto.
(* BUG: admitted until LibGenEnv works. *)
(*
Qed.
*)
Admitted.
Lemma okp_concat_inv_l : forall E F,
okp (E &p F) -> okp E.
Proof using. introv H. lets*: okp_concat_inv H. Qed.
Lemma okp_concat_inv_r : forall E F,
okp (E &p F) -> okp F.
Proof using. introv H. lets*: okp_concat_inv H. Qed.
Lemma okp_middle_change : forall E F x v1 v2,
okp (E &p x ~p v1 &p F) -> okp (E &p x ~p v2 &p F).
Proof using.
induction F using env_ind; introv; rew_env_concat; introv Okp.
destruct* (okp_push_inv Okp).
destruct* (okp_push_inv Okp).
Qed.
Lemma okp_middle_inv : forall E F x v,
okp (E &p x ~p v &p F) -> x #p E /\ x #p F.
Proof using.
induction F using env_ind; introv; rew_env_concat; intros Okp;
destruct (okp_push_inv Okp).
split~.
forwards~ [? ?]: IHF H.
(* BUG: admitted until LibGenEnv works. *)
Admitted.
Lemma okp_middle_inv_l : forall E F x v,
okp (E &p x ~p v &p F) -> x #p E.
Proof using. introv H. forwards~ [? _]: okp_middle_inv H. Qed.
Lemma okp_middle_inv_r : forall E F x v,
okp (E &p x ~p v &p F) -> x #p F.
Proof using. introv H. forwards~ [_ ?]: okp_middle_inv H. Qed.
Lemma okp_remove : forall F E G,
okp (E &p F &p G) -> okp (E &p G).
Proof using.
induction G using env_ind; rew_env_concat; introv Okp.
lets*: okp_concat_inv Okp.
lets*: okp_push_inv Okp.
(* BUG: admitted until LibGenEnv works. *)
Admitted.
Lemma okp_map : forall E (f : A -> B),
okp E -> okp (map f E).
Proof using.
induction E using env_ind; introv;
autorewrite with rew_env_map; rew_env_concat; intros Okp.
auto. destruct* (okp_push_inv Okp).
Qed.
Lemma okp_concat_map: forall E F (f : A -> A),
okp (E &p F) -> okp (E &p map f F).
Proof using.
induction F using env_ind; introv;
autorewrite with rew_env_map; rew_env_concat; intros Okp.
auto. destruct* (okp_push_inv Okp).
(* BUG: admitted until LibGenEnv works. *)
Admitted.
Fixpoint freshp (L : varpaths) (n : nat) (xs : list varpath) {struct xs} : Prop :=
match xs, n with
| nil, O => True
| x::xs', S n' => x \notin L /\ freshp (L \u \{x}) n' xs'
| _,_ => False
end.
Hint Extern 1 (freshp _ _ _) => (* idtac "(freshp _ _ _)";*) trace_goal; simpl.
Lemma okp_singles : forall n E xs (vs:list A),
freshp (dom E) n xs ->
length xs = length vs ->
okp E ->
okp (E &p xs ~p* vs).
Proof using.
introv F EQ O. gen E n vs.
induction xs; destruct vs; destruct n; intros; tryfalse.
rewrite singles_nil. rewrite~ concat_empty_r.
rew_length in EQ. inverts EQ.
(*
simpl in F. destruct F as [Fr F']. lets [? M]: (fresh_union_r F').
rewrite singles_cons. rewrite concat_assoc. applys okp_push.
applys~p IHxs n.
simpl_dom. rewrite~ dom_singles. lets~: fresh_single_notin M.
Qed.
*)
Admitted.
(* LATER: not used
Lemma singles_okp : forall xs vs E,
okp E ->
fresh (dom E) (length xs) xs ->
okp (E &p xs ~p* vs).
Proof using. ...
induction xs; simpl; intros. auto.
destruct H0. destruct Us; simpls. auto.
rewrite iter_push_cons. rewrite* <- concat_assoc.
*)
(* LATER: not used;
missing a precondition unless nil~p*vs returns nil
Lemma okp_concat_singles : forall n xs vs E,
okp E ->
fresh (dom E) n xs ->
okp (E &p xs ~p* vs).
Proof using.
intros. rew_env_defs. gen vs n E.
induction xs; introv Okp Fr; destruct n; tryfalse.
auto.
destruct vs. simpl.
rew_list in Fr. destruct Fr as [F Fr'].
applys_to Fr notin_union_r...
Qed.
*)
(* LATER: not used
Lemma okp_concat : forall E F,
okp E -> okp F -> disjoint (dom E) (dom F) ->
okp (E &p F).
*)
End OkpProperties.
Implicit Arguments okp_push_inv [A E x v].
Implicit Arguments okp_concat_inv [A E F].
Implicit Arguments okp_remove [A F E G].
Implicit Arguments okp_map [A E f].
Implicit Arguments okp_middle_inv_l [A E F x v].
Implicit Arguments okp_middle_inv_r [A E F x v].
Implicit Arguments okp_middle_inv [A E F x v].
(** Automation *)
Hint Resolve okp_middle_inv_l okp_map okp_concat_map okp_singles.
Hint Extern 1 (okp (?E &p ?G)) => (* idtac "(okp (?E &p ?G))";*) trace_goal;
match goal with H: okp (E &p ?F &p G) |- _ =>
apply (okp_remove H) end.
Hint Extern 1 (okp (?E)) => (* idtac "(okp (?E))";*) trace_goal;
match goal with H: okp (E &p _ ~p _) |- _ =>
apply (okp_push_inv_okp H) end.
Hint Extern 1 (okp (?E)) => (* idtac "(okp (?E))";*) trace_goal;
match goal with H: okp (E &p _) |- _ =>
apply (okp_concat_inv_l H) end.
Hint Extern 1 (okp (?E)) => (* idtac "(okp (?E))";*) trace_goal;
match goal with H: okp (_ &p E) |- _ =>
apply (okp_concat_inv_r H) end.
(* not used
Hint Extern 1 (okp (_ &p ?xs ~p* ?vs)) =>
match goal with H: fresh _ ?n xs |- _ =>
match type of vs with list ?A =>
apply (@okp_concat_singles A n)
end end.
*)
(* ---------------------------------------------------------------------- *)
(** ** Properties of the binds relation *)
Section BindsProperties.
Variable A B : Type.
Implicit Types E F : varpathenv A.
Implicit Types x : varpath.
Implicit Types v : A.
Lemma binds_get : forall x v E,
binds x v E -> get x E = Some v.
Proof using. auto. Qed.
(** Constructor forms *)
Lemma binds_empty_inv : forall x v,
binds x v empty -> False.
Proof using.
unfold binds. introv H. rewrite get_empty in H. false.
Qed.
Lemma binds_single_eq : forall x v,
binds x v (x ~p v).
Proof using.
intros. unfold binds. rewrite get_single. case_if~.
Qed.
Lemma binds_single_inv : forall x1 x2 v1 v2,
binds x1 v1 (x2 ~p v2) ->
x1 = x2 /\ v1 = v2.
Proof using.
unfold binds. introv H. rewrite get_single in H.
case_if; inversions~ H.
Qed.
Lemma binds_push_inv : forall x1 v1 x2 v2 E,
binds x1 v1 (E &p x2 ~p v2) ->
(x1 = x2 /\ v1 = v2)
\/ (x1 <> x2 /\ binds x1 v1 E).
Proof using.
introv H. unfolds binds. rewrite get_push in H. case_if.
inverts~ H. auto.
Qed.
Lemma binds_push_eq : forall x v E,
binds x v (E &p x ~p v).
Proof using. intros. unfolds binds. rewrite get_push. case_if~. Qed.
Lemma binds_push_eq_inv : forall x v1 v2 E,
binds x v1 (E &p x ~p v2) -> v1 = v2.
Proof using.
introv H. forwards [|]: binds_push_inv H. autos*. intros [? _]. false.
Qed.
Lemma binds_push_neq_inv : forall x1 x2 v1 v2 E,
binds x1 v1 (E &p x2 ~p v2) -> x1 <> x2 -> binds x1 v1 E.
Proof using.
introv H. forwards [|]: binds_push_inv H.
intros [? ?] ?. false. autos*.
Qed.
Lemma binds_tail : forall x v E,
binds x v (E &p x ~p v).
Proof using. intros. unfold binds. rewrite get_push. cases_if~. Qed.
Lemma binds_push_neq : forall x1 x2 v1 v2 E,
binds x1 v1 E -> x1 <> x2 -> binds x1 v1 (E &p x2 ~p v2).
Proof using.
introv H N. unfolds binds. rewrite get_push. case_if~.
Qed.
Lemma binds_concat_inv : forall x v E1 E2,
binds x v (E1 &p E2) ->
(binds x v E2)
\/ (x #p E2 /\ binds x v E1).
Proof using.
introv H. induction E2 using env_ind.
rewrite~ concat_empty_r in H.
rewrite concat_assoc in H.
forwards [[? ?]|[? M]]: binds_push_inv H.
subst. left. apply binds_tail.
forwards [?|[? ?]]: IHE2 M.
left. applys~ binds_push_neq.
right~.
(*
Qed.
*)
Admitted.
Lemma binds_map : forall x v (f : A -> B) E,
binds x v E -> binds x (f v) (map f E).
Proof using.
introv H. unfolds binds. rew_env_defs.
induction E as [|[x' v'] E']; simpls.
false.
cases_if~. inverts~ H.
Qed.
(** Basic forms *)
Lemma binds_func : forall x v1 v2 E,
binds x v1 E -> binds x v2 E -> v1 = v2.
Proof using.
introv H1 H2. unfolds binds.
induction E as [|E' x' v'] using env_ind.
rewrite get_empty in H1. false.
rewrite get_push in H1,H2. case_if~.
inverts H1. inverts~ H2.
Qed.
Lemma binds_fresh_inv : forall x v E,
binds x v E -> x #p E -> False.
Proof using.
introv H F. unfolds binds.
induction E as [|E' x' v'] using env_ind.
rewrite get_empty in H. false.
rewrite get_push in H. (*
case_if~. subst.
simpl_dom; notin_false.
Qed.
*)
Admitted.
(** Derived forms *)
Lemma binds_single_eq_inv : forall x v1 v2,
binds x v1 (x ~p v2) ->
v1 = v2.
Proof using.
introv H. unfolds binds. rewrite get_single in H.
case_if. inverts~ H.
Qed.
Lemma binds_concat_left : forall x v E1 E2,
binds x v E1 ->
x #p E2 ->
binds x v (E1 &p E2).
Proof using.
introv H F. induction E2 using env_ind.
rewrite~ concat_empty_r.
rewrite concat_assoc. (*
applys~ binds_push_neq.
simpl_dom. auto.
Qed.
*)
Admitted.
Lemma binds_concat_left_okp : forall x v E1 E2,
okp (E1 &p E2) ->
binds x v E1 ->
binds x v (E1 &p E2).
Proof using.
introv O H. induction E2 using env_ind.
rewrite~ concat_empty_r.
rewrite concat_assoc in O|-*. lets [_ ?]: okp_push_inv O.
applys~ binds_push_neq. intro_subst. (*
applys~ binds_fresh_inv H.
Qed.
*)
Admitted.
Lemma binds_concat_left_inv : forall x v E1 E2,
binds x v (E1 &p E2) ->
x #p E2 ->
binds x v E1.
Proof using.
introv H F. lets~ [M|[? ?]]: binds_concat_inv H.
false. applys~ binds_fresh_inv M.
Qed.
Lemma binds_concat_right : forall x v E1 E2,
binds x v E2 ->
binds x v (E1 &p E2).
Proof using.
introv H. induction E2 using env_ind.
false. applys* binds_empty_inv.
rewrite concat_assoc. lets [[? ?]|[? ?]]: binds_push_inv H.
subst. applys binds_tail.
applys~ binds_push_neq.
Qed.
Lemma binds_concat_right_inv : forall x v E1 E2,
binds x v (E1 &p E2) ->
x #p E1 ->
binds x v E2.
Proof using.
introv H F. lets~ [?|[? M]]: binds_concat_inv H.
false. applys~ binds_fresh_inv M.
Qed.
Lemma binds_middle_eq : forall x E1 E2 v,
x #p E2 ->
binds x v (E1 &p x ~p v &p E2).
Proof using.
introv F. applys~ binds_concat_left. applys binds_tail.
Qed.
(** Metatheory proof forms *)
(** Interaction between binds and the insertion of bindings.
In theory we don't need this lemma since it would suffice
to use the binds_cases tactics, but since weakening is a
very common operation we provide a lemma for it. *)
Lemma binds_weaken : forall x a E F G,
binds x a (E &p G) -> okp (E &p F &p G) ->
binds x a (E &p F &p G).
Proof using.
introv H O. lets [?|[? ?]]: binds_concat_inv H.
applys~ binds_concat_right.
applys~ binds_concat_left. applys~ binds_concat_left_okp.
Qed.
Lemma binds_remove : forall E2 E1 E3 x v,
binds x v (E1 &p E2 &p E3) ->
x #p E2 ->
binds x v (E1 &p E3).
Proof using.
introv H F. lets [?|[? M]]: binds_concat_inv H.
applys~ binds_concat_right.
forwards~: binds_concat_left_inv M. applys~ binds_concat_left.
Qed.
Lemma binds_subst : forall x2 v2 x1 v1 E1 E2,
binds x1 v1 (E1 &p x2 ~p v2 &p E2) ->
x1 <> x2 ->
binds x1 v1 (E1 &p E2).
Proof using. introv H N. applys~ binds_remove H.
(*
Qed.
*)
Admitted.
Lemma binds_middle_eq_inv : forall x E1 E2 v1 v2,
binds x v1 (E1 &p x ~p v2 &p E2) ->
okp (E1 &p x ~p v2 &p E2) ->
v1 = v2.
Proof using.
introv H O. lets [? ?]: okp_middle_inv O.
forwards~ M: binds_concat_left_inv H.
applys~ binds_push_eq_inv M.
Qed.
Lemma binds_middle_inv : forall x1 v1 x2 v2 E1 E2,
binds x1 v1 (E1 &p x2 ~p v2 &p E2) ->
(binds x1 v1 E2)
\/ (x1 #p E2 /\ x1 = x2 /\ v1 = v2)
\/ (x1 #p E2 /\ x1 <> x2 /\ binds x1 v1 E1).
Proof using.
introv H. lets [?|[? M]]: (binds_concat_inv H).
left~.
right. lets [N|[? N]]: (binds_concat_inv M).
lets [? ?]: (binds_single_inv N). subst~.
right. simpl_dom. split~.
(*
Qed.
*)
Admitted.
Lemma binds_not_middle_inv : forall x v E1 E2 E3,
binds x v (E1 &p E2 &p E3) ->
x #p E2 ->
(binds x v E3)
\/ (x #p E3 /\ binds x v E1).
Proof using.
introv H F. lets [?|[? M]]: (binds_concat_inv H).
left~.
right. forwards~ N: (binds_concat_left_inv M).
Qed.
Lemma fv_in_values_binds : forall y fv x v E,
binds x v E -> y \notin fv_in_values fv E -> y \notin fv v.
Proof using.
unfold fv_in_values. introv H.
induction E using env_ind; introv M.
false. applys* binds_empty_inv.
rewrite values_def in M,IHE.
rewrite concat_def, single_def in M. rew_list in M. simpl in M.
lets [[? ?]|[? ?]]: (binds_push_inv H); subst~.
Qed.
(* unused -- requires a precondition on f
Lemma binds_keys : forall x v f E,
binds x v E -> binds (f x) v (map_keys f E).
Proof using.
Qed.
*)
(* unused
Lemma binds_concat_inv_okp : forall x v E1 E2,
okp (E1 &p E2) ->
binds x v (E1 &p E2) ->
(x #p E2 /\ binds x v E1)
\/ (x #p E1 /\ binds x v E2).
Proof using.
introv O H. induction E2 using env_ind.
rewrite~ concat_empty_r in H.
rewrite concat_assoc in O,H.
forwards [[? ?]|[? M]]: binds_push_inv H.
subst. left. apply conj_dup_r.
split.
apply binds_tail.
forwards [?|[? ?]]: IHE2 M.
left. applys~ binds_push_neq.
right~....
Qed.
*)
End BindsProperties.
(* ---------------------------------------------------------------------- *)
(** ** Tactics *)
Hint Resolve binds_push_eq binds_push_neq
binds_map binds_concat_left binds_concat_right.
Tactic Notation "binds_mid" :=
match goal with H: binds ?x ?v1 (_ &p ?x ~p ?v2 &p _) |- _ =>
asserts: (v1 = v2); [ apply (binds_middle_eq_inv H) | subst; clear H ]
end.
Tactic Notation "binds_mid" "~" :=
binds_mid; auto_tilde.
Tactic Notation "binds_mid" "*" :=
binds_mid; auto_star.
Tactic Notation "binds_push" constr(H) :=
match type of H with binds ?x1 ?v1 (_ &p ?x2 ~p ?v2) =>
destruct (binds_push_inv H) as [[? ?]|[? ?]]; [ subst x2 v2 | ]
end.
Tactic Notation "binds_push" "~" constr(H) :=
binds_push H; auto_tilde.
Tactic Notation "binds_push" "*" constr(H) :=
binds_push H; auto_star.
(* ---------------------------------------------------------------------- *)
(** ** Properties of environment inclusion *)
Section ExtendsProperties.
Variable A : Type.
Implicit Types x : varpath.
Implicit Types v : A.
Implicit Types E F : varpathenv A.
Lemma extends_refl : forall E,
extends E E.
Proof using. intros_all~. Qed.
Lemma extends_push : forall E x v,
x #p E -> extends E (E &p x ~p v).
Proof using.
introv Fr. intros x' v' B. unfolds binds.
rewrite get_push. case_if~.
lets: get_none Fr. false.
Qed.
Lemma extends_concat_l : forall E F,
extends F (E &p F).
Proof using.
introv B. unfolds binds.
rewrite get_concat. rewrite~ B.
Qed.
Lemma extends_concat_r : forall E F,
disjoint (dom E) (dom F) ->
extends E (E &p F).
Proof using.
introv D B. unfolds binds.
lets: get_some_inv A B.
forwards M: get_none A x F.
applys~ disjoint_in_notin D.
rewrite get_concat. rewrite~ M.
Qed.
(* not used
Lemma extends_push_reoccur : forall E x v,
binds x v E -> extends E (E &p x ~p v).
*)
End ExtendsProperties.
Hint Resolve extends_refl extends_push.
(* ********************************************************************** *)
(** ** Tactics for case analysis on binding relations *)
(** [binds_get H as EQ] produces from an hypothesis [H] of
the form [binds x a (E &p x ~p b &p F)] the equality [EQ: a = b]. *)
Ltac binds_get_nosubst_base H EQ :=
match type of H with
| binds ?x ?v1 (?E1 &p ?x ~p ?v2 &p ?E2) =>
forwards EQ: (@binds_middle_eq_inv _ x E1 E2 v1 v2 H); [ auto | ]
(* | binds ?x1 ?v1 (?E1 &p ?x2 ~p ?v2 &p ?E2) =>
forwards EQ: (@binds_middle_inv _ x1 v1 x2 v2 E1 E2); [ | auto ] *)
end.
Tactic Notation "binds_get_nosubst" constr(H) "as" ident(EQ) :=
binds_get_nosubst_base H EQ.
Tactic Notation "binds_get_nosubst" constr(H) :=
let EQ := fresh "EQ" in binds_get_nosubst H as EQ.
(** [binds_get H] expects an hypothesis [H] of the form
[binds x a (E &p x ~p b &p F)] and substitute [a] for [b] in the goal. *)
Ltac binds_get H :=
let EQ := fresh in binds_get_nosubst H as EQ;
try match type of EQ with
| ?f _ = ?f _ => inversions EQ
| ?x = ?y => subst x end.
(** [binds_single H] derives from an hypothesis [H] of the form
[binds x a (y ~p b)] the equalities [x = y] and [a = b], then
it substitutes [x] for [y] in the goal or deduce a contradiction
if [x <> y] can be found in the context. *)
Ltac binds_single H :=
match type of H with binds ?x ?a (?y ~p ?b) =>
let EQ := fresh "EQ" in
destruct (binds_single_inv H) as [? EQ];
try discriminate; try subst y;
try match goal with N: ?x <> ?x |- _ =>
false; apply N; reflexivity end end.
(** [binds_case H as B1 B2] derives from an hypothesis [H] of the form
[binds x a (E &p F)] two subcases: [B1: binds x a E] (with a freshness
condition [x #p F]) and [B2: binds x a F]. *)
Lemma binds_concat_inv' : forall A, forall x (v:A) E1 E2,
binds x v (E1 &p E2) ->
(x #p E2 /\ binds x v E1)
\/ (binds x v E2).
Proof using. intros. forwards K: binds_concat_inv A H. destruct* K. Qed.
Tactic Notation "binds_case" constr(H) "as" ident(B1) ident(B2) :=
let Fr := fresh "Fr" in
destruct (binds_concat_inv' H) as [[Fr B1] | B2].
(** [binds_case H] makes a case analysis on an hypothesis [H] of the form
[binds x a E] where E can be constructed using concatenation and
singletons. It calls [binds_single] when reaching a singleton. *)
Ltac binds_cases H :=
let go B := clear H;
first [ binds_single B | binds_cases B | idtac ] in
let B1 := fresh "B" in let B2 := fresh "B" in
binds_case H as B1 B2; (*fix_env;*) [ go B1 | go B2 ].
(* TODO: add support for binds_empty_inv *)
(* LATER: improve the above tactic using pattern matching
Ltac binds_cases_base H :=
match H with
| binds _ _ empty => false (binds_empty_inv H)
| binds _ _ (_ &p _) =>
let H1 := fresh "B" in
destruct (binds_concat_inv H) as [H1|H1];
clear H; binds_cases_base H1
| binds _ _ (_ ~p _) =>
| _ => idtac
end.
*)
End LVPE.
|
module Time
import Coda.Range
%access public export
%default total
{-
ISO 8601 Time Package
Time Representation includes:
Hour : Minute : Second (+/-) Offset
Where (+/-) Offset is represented as a TimeZone
And Hour : Minute : Second is represented as an Instant
And the combination of both is represented as a LocalTime
-}
{-
Type Definitions
-}
data Second : Type where
MkSec : (s : Nat) -> { auto prf : (Dec (0 <= s = s < 60) ) } -> Second
%name Second s,s1,s2,s3
data Minute : Type where
MkMin : (m : Nat) -> { auto prf : (Dec (0 <= m = m < 60) ) } -> Minute
%name Minute m,m1,m2,m3
data Hour : Type where
MkHour : (h : Nat) -> { auto prf : (Dec (0 <= h = h < 24) ) } -> Hour
%name Hour h,m1,m2,m3
data Instant : Type where
MkInstant : (h : Hour) -> (m : Minute) -> (s : Second) -> Instant
%name Instant i,i1,i2,i3
data Sign : Type where
(+) : Sign
(-) : Sign
%name Sign s,s1,s2,s3
data Offset : Type where
MkOffset : (sign : Sign) -> (i : Instant) -> Offset
%name Offset off,off1,off2,off3
data TimeZone : Offset -> Type where
MkTimeZone : (name : String) -> (offset : Offset) -> TimeZone offset
%name TimeZone tz,tz1,tz2,tz3
data LocalTime : TimeZone offset -> Type where
MkLocalTime : (tz : TimeZone offset) -> (i : Instant) -> LocalTime tz
%name LocalTime lt,lt1,lt2,lt3
{-
Standard Interface Implementations
-}
Eq Second where
(==) (MkSec s) (MkSec s') = s == s'
Ord Second where
compare (MkSec s) (MkSec s') = compare s s'
Show Second where
show (MkSec s) = if s < 10 then "0"++show s else show s
Eq Minute where
(==) (MkMin s) (MkMin s') = s == s'
Ord Minute where
compare (MkMin s) (MkMin s') = compare s s'
Show Minute where
show (MkMin m) = if m < 10 then "0"++show m else show m
Eq Hour where
(==) (MkHour s) (MkHour s') = s == s'
Ord Hour where
compare (MkHour s) (MkHour s') = compare s s'
Show Hour where
show (MkHour h) = if h < 10 then "0"++show h else show h
Eq Instant where
(==) (MkInstant h m s) (MkInstant h' m' s') = h == h' && m == m' && s == s'
Ord Instant where
compare (MkInstant h m s) (MkInstant h' m' s') =
case compare h h' of
EQ => case compare m m' of
EQ => compare s s'
r => r
r => r
Show Instant where
show (MkInstant h m s) = "" ++ show h ++ ":" ++ show m ++ ":" ++ show s
Eq Sign where
(==) (+) (+) = True
(==) (-) (-) = True
(==) _ _ = False
Ord Sign where
compare (+) (-) = GT
compare (-) (+) = LT
compare (+) (+) = EQ
compare (-) (-) = EQ
Show Sign where
show (+) = "+"
show (-) = "-"
Eq Offset where
(==) (MkOffset sign i) (MkOffset sign' i') = sign == sign' && i == i'
Ord Offset where
compare (MkOffset sign i) (MkOffset sign' i') =
case compare sign sign' of
EQ => compare i i'
r => r
Show Offset where
show (MkOffset sign i@(MkInstant h m _)) =
if isZero i
then "Z"
else show sign ++ show h ++ ":" ++ show m
where isZero : Instant -> Bool
isZero (MkInstant (MkHour Z) (MkMin Z) (MkSec Z)) = True
isZero _ = False
Eq (TimeZone offset) where
(==) (MkTimeZone name _) (MkTimeZone name' _) = name == name'
Ord (TimeZone offset) where
compare (MkTimeZone name _) (MkTimeZone name' _) = compare name name'
Show (TimeZone offset) where
show (MkTimeZone name offset) = show name ++ show offset
Eq (LocalTime tz) where
(==) (MkLocalTime _ i) (MkLocalTime _ i') = i == i'
Ord (LocalTime tz) where
compare (MkLocalTime _ i) (MkLocalTime _ i') = compare i i'
Show (LocalTime (MkTimeZone name offset)) where
show (MkLocalTime (MkTimeZone name offset) i) = show i ++ show offset
{-
Utility Functions
-}
minuteToSeconds : Minute -> Integer
minuteToSeconds (MkMin m) = (toIntegerNat m) * 60
hourToSeconds : Hour -> Integer
hourToSeconds (MkHour h) = (toIntegerNat h) * 60 * 60
instantToSeconds : Instant -> Integer
instantToSeconds (MkInstant h m (MkSec s)) = (hourToSeconds h) +
(minuteToSeconds m) +
(toIntegerNat s)
Distance Instant Integer where
distance (MkRange x y) = (instantToSeconds y) - (instantToSeconds x)
instantZero : Instant
instantZero = (MkInstant (MkHour 0) (MkMin 0) (MkSec 0))
UTC : TimeZone (MkOffset (+) Time.instantZero)
UTC = MkTimeZone "UTC" (MkOffset (+) instantZero)
|
(** * Commutativity *)
From Coq Require Import
Classes.Morphisms.
From DEZ Require Export
Init.
(** ** Commutative Elements of a Form *)
Class IsCommElemsForm (A B : Type) (X : A -> A -> Prop)
(s : B -> B -> A) (a b : B) : Prop :=
comm_elems_form : X (s a b) (s b a).
(** ** Commutative Form *)
Class IsCommForm (A B : Type) (X : A -> A -> Prop)
(s : B -> B -> A) : Prop :=
comm_form (a b : B) : X (s a b) (s b a).
Section Context.
Context (A B : Type) (X : A -> A -> Prop)
(s : B -> B -> A).
(** Commutative forms are forms with commutative elements. *)
#[export] Instance comm_form_is_comm_elems_form
`{!IsCommForm X s} (a b : B) : IsCommElemsForm X s a b.
Proof. apply comm_form. Qed.
#[local] Instance comm_elems_form_is_comm_form
`{!forall a b : B, IsCommElemsForm X s a b} : IsCommForm X s.
Proof. intros a b. apply comm_elems_form. Qed.
End Context.
(** ** Commutative Elements of a Binary Operation *)
Class IsCommElemsBinOp (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (x y : A) : Prop :=
comm_elems_bin_op : X (k x y) (k y x).
(** ** Commutative Binary Operation *)
(** This has the same shape as [Z.mul_comm]. *)
Class IsCommBinOp (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) : Prop :=
comm_bin_op (x y : A) : X (k x y) (k y x).
Section Context.
Context (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A).
(** Commutative binary operations
are binary operations with commutative elements. *)
#[export] Instance comm_bin_op_is_comm_elems_bin_op
`{!IsCommBinOp X k} (x y : A) : IsCommElemsBinOp X k x y.
Proof. apply comm_bin_op. Qed.
#[local] Instance comm_elems_bin_op_is_comm_bin_op
`{!forall x y : A, IsCommElemsBinOp X k x y} : IsCommBinOp X k.
Proof. intros x y. apply comm_elems_bin_op. Qed.
End Context.
(** ** Coherent Unary Functions *)
Class IsCommUnFns (A B0 B1 C : Type) (X : C -> C -> Prop)
(f : A -> B0) (g : A -> B1) (h : B1 -> C) (i : B0 -> C) : Prop :=
comm_un_fns (x : A) : X (h (g x)) (i (f x)).
(** ** Commutative Unary Operations *)
Class IsCommUnOps (A : Type) (X : A -> A -> Prop)
(f g : A -> A) : Prop :=
comm_un_ops (x : A) : X (f (g x)) (g (f x)).
Section Context.
Context (A : Type) (X : A -> A -> Prop)
(f g : A -> A).
(** Commutativity of unary operations
is a special case of their commutativity as unary functions. *)
#[export] Instance comm_un_ops_is_comm_un_fns
`{!IsCommUnOps X f g} : IsCommUnFns X f g f g.
Proof. auto. Qed.
#[local] Instance comm_un_fns_is_comm_un_ops
`{!IsCommUnFns X f g f g} : IsCommUnOps X f g.
Proof. auto. Qed.
End Context.
Section Context.
Context (A : Type) (X : A -> A -> Prop)
(f g : A -> A).
(** Commutative unary operations are commutative elements
of the endofunction monoid. *)
#[export] Instance comm_un_ops_is_comm_elems_bin_op_compose
`{!IsCommUnOps X f g} : IsCommElemsBinOp (pointwise_relation _ X) _o_ f g.
Proof. intros x. unfold compose. apply comm_un_ops. Qed.
#[local] Instance comm_elems_bin_op_compose_is_comm_un_ops
`{!IsCommElemsBinOp (pointwise_relation _ X) _o_ f g} : IsCommUnOps X f g.
Proof.
intros x.
change (f (g x)) with ((f o g) x).
change (g (f x)) with ((g o f) x).
pose proof comm_elems_bin_op x as a.
apply a.
Qed.
End Context.
(** ** Binary Functions Left-Commuting over Unary Functions *)
Class IsCommBinFnsL (A0 A1 B0 B1 C : Type) (X : C -> C -> Prop)
(k : A0 -> A1 -> B1) (f : A0 -> B0)
(m : B0 -> A1 -> C) (g : B1 -> C) : Prop :=
comm_bin_fns_l (x : A0) (y : A1) : X (m (f x) y) (g (k x y)).
(** ** Binary Functions Right-Commuting over Unary Functions *)
Class IsCommBinFnsR (A0 A1 B0 B1 C : Type) (X : C -> C -> Prop)
(k : A0 -> A1 -> B0) (f : A1 -> B1)
(m : A0 -> B1 -> C) (g : B0 -> C) : Prop :=
comm_bin_fns_r (x : A0) (y : A1) : X (m x (f y)) (g (k x y)).
Section Context.
Context (A0 A1 B0 B1 C : Type) (X : C -> C -> Prop)
(k : A0 -> A1 -> B1) (f : A0 -> B0) (m : B0 -> A1 -> C) (g : B1 -> C).
(** Left-commutativity of binary functions over unary functions
is a special case of the right-commutativity of their flipped versions. *)
#[local] Instance comm_bin_fns_l_is_comm_bin_fns_r_flip
`{!IsCommBinFnsL X k f m g} : IsCommBinFnsR X (flip k) f (flip m) g.
Proof. intros y x. unfold flip in *. eauto. Qed.
#[local] Instance comm_bin_fns_r_flip_is_comm_bin_fns_l
`{!IsCommBinFnsR X (flip k) f (flip m) g} : IsCommBinFnsL X k f m g.
Proof. intros x y. unfold flip in *. eauto. Qed.
End Context.
(** ** Right Action Left-Commuting over a Unary Function *)
Class IsCommActRL (A B : Type) (X : B -> B -> Prop)
(ar : B -> A -> B) (f : B -> B) : Prop :=
comm_act_r_l (a : B) (x : A) : X (ar (f a) x) (f (ar a x)).
Section Context.
Context (A B : Type) (X : B -> B -> Prop)
(f : B -> B) (ar : B -> A -> B).
(** Left-commutativity of a right action over a unary function
is a special case of its left-commutativity
as a binary function over a unary function. *)
#[export] Instance comm_act_r_l_is_comm_bin_fns_l
`{!IsCommActRL X ar f} : IsCommBinFnsL X ar f ar f.
Proof. auto. Qed.
#[local] Instance comm_bin_fns_l_is_comm_act_r_l
`{!IsCommBinFnsL X ar f ar f} : IsCommActRL X ar f.
Proof. auto. Qed.
End Context.
(** ** Left Action Right-Commuting over a Unary Function *)
Class IsCommActLR (A B : Type) (X : B -> B -> Prop)
(al : A -> B -> B) (f : B -> B) : Prop :=
comm_act_l_r (x : A) (a : B) : X (al x (f a)) (f (al x a)).
Section Context.
Context (A B : Type) (X : B -> B -> Prop)
(f : B -> B) (al : A -> B -> B).
(** Right-commutativity of a left action over a unary function
is a special case of its right-commutativity
as a binary function over a unary function. *)
#[export] Instance comm_act_l_r_is_comm_bin_fns_r
`{!IsCommActLR X al f} : IsCommBinFnsR X al f al f.
Proof. auto. Qed.
#[local] Instance comm_bin_fns_r_is_comm_act_l_r
`{!IsCommBinFnsR X al f al f} : IsCommActLR X al f.
Proof. auto. Qed.
End Context.
(** ** Binary Operation Left-Commuting over a Unary Operation *)
(** This has the same shape as [Z.mul_opp_l]. *)
Class IsCommL (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (f : A -> A) : Prop :=
comm_l (x y : A) : X (k (f x) y) (f (k x y)).
Section Context.
Context (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (f : A -> A).
(** Left-commutativity of a binary operation over a unary operation
is a special case of its left-commutativity
as a binary function over a unary function. *)
#[export] Instance comm_l_is_comm_bin_fns_l
`{!IsCommL X k f} : IsCommBinFnsL X k f k f.
Proof. auto. Qed.
#[local] Instance comm_bin_fns_l_is_comm_l
`{!IsCommBinFnsL X k f k f} : IsCommL X k f.
Proof. auto. Qed.
(** Left-commutativity of a binary operation over a unary operation
is a special case of the commutativity
of its flipped partial application over the unary operation. *)
#[export] Instance comm_l_is_comm_un_ops_flip
`{!IsCommL X k f} (x : A) : IsCommUnOps X (flip k x) f.
Proof. intros y. unfold flip. apply comm_l. Qed.
#[local] Instance comm_un_ops_flip_is_comm_l
`{!forall x : A, IsCommUnOps X (flip k x) f} : IsCommL X k f.
Proof.
intros x y.
change (k x y) with (flip k y x).
change (k (f x) y) with (flip k y (f x)).
apply comm_un_ops.
Qed.
End Context.
(** ** Binary Operation Right-Commuting over a Unary Operation *)
(** This has the same shape as [Z.mul_opp_r]. *)
Class IsCommR (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (f : A -> A) : Prop :=
comm_r (x y : A) : X (k x (f y)) (f (k x y)).
Section Context.
Context (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (f : A -> A).
(** Right-commutativity of a binary operation over a unary operation
is a special case of its right-commutativity
as a binary function over a unary function. *)
#[export] Instance comm_r_is_comm_bin_fns_r
`{!IsCommR X k f} : IsCommBinFnsR X k f k f.
Proof. auto. Qed.
#[local] Instance comm_bin_fns_r_is_comm_r
`{!IsCommBinFnsR X k f k f} : IsCommR X k f.
Proof. auto. Qed.
(** Right-commutativity of a binary operation over a unary operation
is a special case of the commutativity
of its partial application over the unary operation. *)
#[export] Instance comm_r_is_comm_un_ops
`{!IsCommR X k f} (x : A) : IsCommUnOps X (k x) f.
Proof. intros y. apply comm_r. Qed.
#[local] Instance comm_un_ops_is_comm_r
`{!forall x : A, IsCommUnOps X (k x) f} : IsCommR X k f.
Proof. intros x y. apply comm_un_ops. Qed.
End Context.
(** ** Commutative Binary Operation over a Unary Operation *)
Class IsComm (A : Type) (X : A -> A -> Prop)
(k : A -> A -> A) (f : A -> A) : Prop := {
comm_is_comm_l :> IsCommL X k f;
comm_is_comm_r :> IsCommR X k f;
}.
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.Monoid where
open import Cubical.Algebra.Monoid.Base public
|
[STATEMENT]
lemma hext_heap_copy_loc:
"heap_copy_loc a a' al h obs h' \<Longrightarrow> h \<unlhd> h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. heap_copy_loc a a' al h obs h' \<Longrightarrow> h \<unlhd> h'
[PROOF STEP]
by(blast elim: heap_copy_loc.cases dest: hext_heap_ops) |
I'm so lucky to live in a state that doesn't have a closed season.
I'd love to have an open season all year. But, at least this way I can get some much needed tying and casting practice in! |
[STATEMENT]
lemma rel_pmf_return_pmfI: "P x y \<Longrightarrow> rel_pmf P (return_pmf x) (return_pmf y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P x y \<Longrightarrow> rel_pmf P (return_pmf x) (return_pmf y)
[PROOF STEP]
by(rule rel_pmf.intros[where pq="return_pmf (x, y)"])(simp_all) |
/-
This defines the ArithM monad basic operations used to generate a system of
numerical equations from a Lean local context.
-/
import ClausalExtraction.Basic
import ClausalExtraction.ArithTheory.Int
open Lean -- (Expr levelZero levelOne mkApp mkAppN mkConst mkRawNatLit)
open Std (HashMap)
namespace ClausalExtraction
namespace ArithTheory
section ExpressionUtils
def natExpr : Expr := mkConst ``Nat
def intExpr := mkConst ``Int
private def notExpr : Expr := mkConst ``Not
def intAddConst : Expr :=
let f := mkConst ``HAdd.hAdd [levelZero, levelZero, levelZero]
let inst := mkAppN (mkConst ``instHAdd [levelZero]) #[intExpr, mkConst ``Int.instAddInt]
mkAppN f #[intExpr, intExpr, intExpr, inst]
def intSubConst : Expr :=
let f := mkConst ``HSub.hSub [levelZero, levelZero, levelZero]
let inst := mkAppN (mkConst ``instHAdd [levelZero]) #[intExpr, mkConst ``Int.instSubInt]
mkAppN f #[intExpr, intExpr, intExpr, inst]
def intMulConst : Expr :=
let f := mkConst ``HMul.hMul [levelZero, levelZero, levelZero]
let inst := mkAppN (mkConst ``instHMul [levelZero]) #[intExpr, mkConst ``Int.instMulInt]
mkAppN f #[intExpr, intExpr, intExpr, inst]
def intNegConst : Expr :=
let f := mkConst ``Neg.neg [levelZero]
let inst := mkConst ``Int.instNegInt
mkAppN f #[intExpr, inst]
-- A structure used to denote an integer expression
structure IntExpr where
toExpr : Expr
deriving BEq, Hashable, Inhabited
namespace IntExpr
instance : Coe IntExpr Expr where
coe := IntExpr.toExpr
instance : Add IntExpr where
add := λx y => IntExpr.mk (mkAppN intAddConst #[x, y])
end IntExpr
-- Create a nat as an int
def mkOfNat (n:Expr) : IntExpr :=
let ofNat := mkConst ``OfNat.ofNat [levelZero]
let inst := mkConst ``Int.instOfNatInt
IntExpr.mk (mkAppN ofNat #[intExpr, n, mkApp inst n])
-- Create a nat lit as an int
def natLitAsIntExpr (n:Nat) : IntExpr := mkOfNat (mkRawNatLit n)
def mkIntLit : Int → IntExpr
| Int.ofNat n => natLitAsIntExpr n
| Int.negSucc n => IntExpr.mk (mkApp intNegConst (natLitAsIntExpr (n+1)))
instance : Coe Int IntExpr where
coe := mkIntLit
def intZeroExpr : IntExpr := mkIntLit 0
private def intNonNegExpr : Expr := mkConst ``Int.NonNeg
def mkIntNonNegExpr (e:Expr) : Expr := mkApp intNonNegExpr e
private def intEqExpr : Expr := mkApp (mkConst ``Eq [levelOne]) (mkConst ``Int)
def mkIntEq0Expr (e:Expr) : Expr := mkAppN intEqExpr #[e, intZeroExpr]
end ExpressionUtils
-- Represents a polynomial.
structure Poly where
-- Poly should be a sorted array of non-zero integers and variable pairs.
elements : Array (Int × TheoryVar)
deriving BEq, Hashable, Repr
namespace Poly
-- | Create polynomial denoting constant zero.
protected def const (z:Int) : Poly := ⟨#[(z, ⟨0⟩)]⟩
-- | Create polynomial denoting constant zero.
protected def zero : Poly := Poly.const 0
-- | Create polynomial denoting constant zero.
protected def one : Poly := Poly.const 1
instance : Inhabited Poly := ⟨Poly.zero⟩
-- | Create polynomial denoting constant zero.
-- protected def one : Poly := ⟨#[(1, ⟨0⟩)]⟩
def addc : Poly → Int → Poly
| ⟨a⟩, q =>
let (p,v) := a.get! 0
⟨a.set! 0 (p+q, v)⟩
-- | @add p i _ v@ returns poly denoting @p + i*v@.
def add : Poly → Int → TheoryVar → Poly
| ⟨a⟩, q, v =>
let rec loop : ∀(i : Nat), Poly
| 0 => ⟨a.insertAt 1 (q, v)⟩
| Nat.succ i =>
let (p,u) := a[i+1]
if v < u then
loop i
else if u < v then
⟨a.insertAt (i+2) (q,v)⟩
else -- v = u
let q := p+q
if q = 0 then
⟨a.eraseIdx (i+1)⟩
else
⟨a.set! (i+1) (q,v)⟩
loop (a.size-1)
protected def toString : Poly → String
| ⟨a⟩ =>
let scalarProd : Int × TheoryVar → String
| (m,v) => s!"{m}*{v}"
let firstScalarProd : Int × TheoryVar → String
| (m, _) => toString m
let polyIns := λ(e:String) h => s!"{e} + {scalarProd h}"
a[1:].foldl polyIns (firstScalarProd a[0])
instance : ToString Poly where
toString := Poly.toString
def scalarProd (f: v → IO IntExpr) : Int × v → IO IntExpr
| (m, v) => do IntExpr.mk (mkAppN intMulConst #[m, ← f v])
-- | Create an reflexivity proof from the int expression.
def mkIntRefl (e:IntExpr) : Expr := mkApp (mkApp (mkConst ``rfl [levelOne]) intExpr) e
-- | Map polynomial to expression given mapping from variables
-- to expressions.
-- The optional parameter allowss this to to only take the first n elements.
protected
def expr (poly:Poly) (f: TheoryVar → IO IntExpr) (limit: optParam Nat (poly.elements.size - 1)) : IO IntExpr := do
if poly.elements.size = 0 then
panic! "Empty polyExpr"
if limit ≥ poly.elements.size then
panic! "polyExpr given bad limit."
let mut e : IntExpr ← poly.elements[0].fst
for p in poly.elements[1:limit+1] do
e := e + (← scalarProd f p)
pure e
private
theorem polyProofAddContextLemma {c x a:Int} (h:x + c = a) (y:Int)
: (x + y) + c = a + y := by
simp [h.symm, Int.add_assoc, Int.add_comm y c]
-- polyProofAddContext s x c a h poly idx where h is a proof of "x + c = a" returns
-- a proof "(x + poly[idx] + poly[idx+1] + ..) + c = a + poly[idx] + poly[idx+1] + .."
private
def polyProofAddContext (f:TheoryVar → IO IntExpr) (x c a:IntExpr) (h:Expr) (poly:Poly) (idx:Nat) : IO Expr := do
let mut x := x
let mut a := a
let mut h := h
let pr := mkApp (mkConst ``polyProofAddContextLemma) c
for p in poly.elements[idx:] do
let y ← scalarProd f p
h := mkAppN pr #[x, a, h, y]
x := x + y
a := a + y
pure h
section Lemmas
private
theorem sum0Lemma (p q v:Int) : p*v + q*v = (p+q)*v := Eq.symm (Int.add_mul _ _ _)
private
theorem sumLemma (r p q v:Int) : (r + p*v) + q*v = r + (p+q)*v := by
apply Eq.trans (Int.add_assoc r _ _)
apply congrArg (fun y => r + y)
exact sum0Lemma _ _ _
private
theorem cancel0Lemma {p q:Int} (h : p+q = 0) (v:Int) : p*v + q*v = 0 := by
apply Eq.trans (sum0Lemma p q v)
exact @Eq.substr Int (λx => x * v = 0) _ _ h (Int.zero_mul v)
example : (64:Int) + -64 = 0 := @cancel0Lemma (64) (-64) (@rfl Int 0) 1
example : (-64:Int) + 64 = 0 := @cancel0Lemma (-64) (64) (@rfl Int 0) 1
example (v:Int): -64 * v + 64 * v = 0 := @cancel0Lemma (-64) 64 (@rfl Int 0) v
example (v:Int): 64 * v + -64 * v = 0 := @cancel0Lemma (64) (-64) (@rfl Int 0) v
private
theorem cancelLemma (r p q v:Int) (h : p+q = 0) : (r + p*v) + q*v = r := by
apply Eq.trans (Int.add_assoc r _ _)
exact Eq.trans (cancel0Lemma h v ▸ rfl) (Int.add_zero r)
end Lemmas
def addcProof (f:TheoryVar → IO IntExpr) (poly:Poly) (c:Int) (g:c ≠ 0) : IO Expr := do
let x := poly.elements[0].fst
let a := x + c
let h := mkIntRefl a
polyProofAddContext f x c a h poly 1
-- | @addProof f p m v@ returns proof showing that
-- @p.expr + scalarProd f (m, v) = (p.add m v).expr@.
def addProof (f:TheoryVar → IO IntExpr) : ∀(poly:Poly) (q:Int), q ≠ 0 → TheoryVar → IO Expr
| poly, q, g, v => do
let c ← scalarProd f (q, v)
let rec loop : ∀(i : Nat), IO Expr
| 0 => do
-- Handle case where var is zero.
let x : IntExpr := poly.elements[0].fst
let a := x + c
let h := mkIntRefl a
polyProofAddContext f x c a h poly 1
| Nat.succ i => do
let (p,u) := poly.elements[i+1]
if v < u then
loop i
else if u < v then
let x ← poly.expr f (limit := i+1)
let a := x + c
let h := mkIntRefl a
polyProofAddContext f x c a h poly (i+2)
else -- v = u
if p+q = 0 then
let a ← poly.expr f (limit := i)
let x := a + (← scalarProd f (p,u))
let rflExpr := mkIntRefl intZeroExpr
-- Create proof: (a + -q*v) + q*v = a.
let h := mkAppN (mkConst ``cancelLemma) #[a, (-q : Int), q, ← f v, rflExpr]
polyProofAddContext f x c a h poly (i+2)
else
let r ← poly.expr f (limit := i)
let x := r + (←scalarProd f (p, u))
let a := r + (←scalarProd f (p+q, u))
let h := mkAppN (mkConst ``sumLemma) #[r, p, q, ←f v]
polyProofAddContext f x c a h poly (i+2)
loop (poly.elements.size - 1)
end Poly
-- Definition associated with a variable.
inductive Decl
-- A int variable from another theory.
| uninterpInt : Var → Decl
-- A nat variable from another theory.
| uninterpNat : Var → Decl
-- Theory variable is equal to polynomial.
| poly : Poly → Decl
deriving BEq, Hashable
namespace Decl
protected def toString : Decl → String
| uninterpInt v => s!"{v}"
| uninterpNat v => s!"ofNat {v}"
| poly p => s!"poly {p}"
instance : ToString Decl where
toString := Decl.toString
instance : Inhabited Decl := ⟨uninterpInt arbitrary⟩
end Decl
-- | An atomic predicate
inductive Pred where
-- This denotes a proof of the form (v = 0)
| IsEq0 : TheoryVar → Pred
-- This denotes a proof of the form (Not (v = 0))
| IsNe0 : TheoryVar → Pred
-- This denotes a proof of the form (Int.NonNeg v)
| IsGe0 : TheoryVar → Pred
deriving Inhabited
namespace Pred
protected def toString : Pred → String
| IsEq0 v => s!"IsEq0 {v}"
| IsNe0 v => s!"IsNe0 {v}"
| IsGe0 v => s!"IsGe0 {v}"
instance : ToString Pred := ⟨Pred.toString⟩
end Pred
def oneVar : TheoryVar := ⟨0⟩
structure State : Type where
exprMap : HashMap Decl TheoryVar := Std.mkHashMap.insert (Decl.poly Poly.one) oneVar
vars : Array Decl := #[Decl.poly Poly.one]
preds : Array Pred := #[]
section
variable (r:IO.Ref State)
variable (f: Var → IO Expr)
-- | Return Lean expression associated with IntExpr
partial def thvarExpr (v:TheoryVar) : IO IntExpr := do
let s ← r.get
if p : v.toNat < s.vars.size then
match s.vars.get ⟨v.toNat, p⟩ with
| Decl.uninterpInt v => do
IntExpr.mk <$> f v
| Decl.uninterpNat v => do
mkOfNat <$> f v
| Decl.poly p => p.expr (thvarExpr)
else
panic! s!"Invalid theory variable index {v} (max = {s.vars.size})"
end
abbrev ArithM := ReaderT (IO.Ref State) SolverM
-- | Return a theory variable associated with the given uninterpreted Lean expression.
def getTheoryVar (d:Decl) : ArithM TheoryVar := do
let r ← read
let s ← r.get
match s.exprMap.find? d with
| none => pure ()
| some v => return v
let newVar := ⟨OfNat.ofNat s.vars.size⟩
if (newVar.index : UInt32) = 0 then
throwError m!"Only 2^32 arithmetic variables allowed."
r.set
{ s with exprMap := s.exprMap.insert d newVar,
vars := s.vars.push d }
return newVar
-- | Return a theory variable associated with the given uninterpreted Lean expression.
def getPolyVar (p:Poly) : ArithM TheoryVar := getTheoryVar (Decl.poly p)
def getThvarExpr (v:TheoryVar) : ArithM IntExpr := do
let svc ← (read : SolverM _)
let r ← read
thvarExpr r svc.varExpr v
-- | Return expression associated with in solver.
def getPolyExpr (poly:Poly) : ArithM IntExpr := do
let svc ← (read : SolverM _)
let r ← read
let f (v:TheoryVar) : IO IntExpr := thvarExpr r svc.varExpr v
poly.expr f
def getTheoryPred (p:Pred) : ArithM TheoryPred := do
let r ← read
let s ← r.get
if TheoryPred.max ≤ s.preds.size then
throwError "Only 2^32 arithmetic variables allowed."
let n := TheoryPred.ofNat (s.preds.size)
r.set { s with preds := s.preds.push p }
pure n
def mthvarExpr (r: IO.Ref State) (f : Var → IO Expr) (thv : TheoryVar) : IO Expr := do
IntExpr.toExpr <$> thvarExpr r f thv
def predExpr (r : IO.Ref State) (f : Var → IO Expr) (idx : TheoryPred) : IO Expr := do
let s ← r.get
if lt : idx.toNat < s.preds.size then
match s.preds.get ⟨idx.toNat, lt⟩ with
| Pred.IsEq0 v => mkIntEq0Expr <$> mthvarExpr r f v
| Pred.IsNe0 v => mkApp notExpr <$> (mkIntEq0Expr <$> mthvarExpr r f v)
| Pred.IsGe0 v => mkIntNonNegExpr <$> mthvarExpr r f v
else
panic s!"Invalid predicate index {idx} (max = {s.preds.size})"
end ArithTheory
end ClausalExtraction |
Formal statement is: lemma smallo_imp_eventually_sgn: fixes f g :: "real \<Rightarrow> real" assumes "g \<in> o(f)" shows "eventually (\<lambda>x. sgn (f x + g x) = sgn (f x)) at_top" Informal statement is: If $g(x)$ is $o(f(x))$, then eventually $sgn(f(x) + g(x)) = sgn(f(x))$. |
Formal statement is: lemma measurable_translation: "S \<in> lmeasurable \<Longrightarrow> ((+) a ` S) \<in> lmeasurable" Informal statement is: If $S$ is a Lebesgue measurable set, then $S + a$ is also Lebesgue measurable. |
!*robodoc*u* tests/testABH
! NAME
! testABH
! SYNOPSIS
!$Id: testABH.f90 389 2017-03-22 16:31:21Z mexas $
program testABH
! PURPOSE
! Checking: cgca_redand, part of cgca_m2red
! DESCRIPTION
! Checking collective AND reduction over a logical coarray.
! Works only when the number of images is 2**p,
! where p is an integer, so use 2, 4, 8, 16, 32, etc. images.
! NOTES
! The program must be called with 2 command line arguments,
! both positive integers. These are codimensions along 1 and 2.
! The number of images must be such that
! codimension3 = num_images()/( codimension1 * codimension3 )
! is a positive integer. Example:
! cafrun -np 16 ./testABH.x 2 2 ! OpenCoarrays
! or
! ./testABH.x 2 2 ! Intel, Cray
! which will make the third codimension equal to 16/(2*2)=4.
! AUTHOR
! Anton Shterenlikht
! COPYRIGHT
! See LICENSE
! USES
! cgca testaux
! USED BY
! Part of CGPACK test suite
! SOURCE
use testaux
implicit none
real, parameter :: l2 = log(real(2))
logical, parameter :: nodebug = .false.
real :: num
integer(kind=idef) :: p, nimages, img, codim(3)[*]
logical(kind=ldef) :: z[*]
!*********************************************************************72
! first executable statement
nimages=num_images()
img = this_image()
! check than n is a power of 2
p = nint(log(real(nimages))/l2)
if ( 2**p .ne. nimages) &
error stop "number of images is not a power of 2"
! do a check on image 1
if (img .eq. 1) then
call getcodim(nimages,codim)
! print a banner
call banner("ABH")
write (*,'(a,i0,a)') "running on ", nimages, " images in a 3D grid"
end if
! Trying to separate the output
sync all
! initialise random number seed
call cgca_irs(nodebug)
! assign z
call random_number(num)
if (num .gt. 0.5) then
z = .true.
else
z = .false.
end if
z = .true.
if (img .eq. nimages) z = .false.
write (*,*) "image", img, "z", z
! Trying to separate the output
sync all
! call collective AND
call cgca_redand(z,p)
write (*,*) "image", img, "answer", z
end program testABH
!*roboend*
|
[STATEMENT]
lemma rem_implicit_pres_ops_valid_plan:
assumes "wf_ast_problem prob"
"(\<And>op. op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> consistent_pres_op op)"
"(\<And>op. op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> is_standard_operator op)"
shows "ast_problem.valid_plan (rem_implicit_pres_ops prob) \<pi>s = ast_problem.valid_plan prob \<pi>s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ast_problem.valid_plan (rem_implicit_pres_ops prob) \<pi>s = ast_problem.valid_plan prob \<pi>s
[PROOF STEP]
using wf_ast_problem.I_valid[OF assms(1)] rem_implicit_pres_ops_path_to[OF assms]
[PROOF STATE]
proof (prove)
using this:
ast_problem.I prob \<in> ast_problem.valid_states prob
\<lbrakk>\<And>op. op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> op \<in> set (ast_problem.ast\<delta> prob); \<And>op. op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> op \<in> set (ast_problem.ast\<delta> prob); ?s \<in> ast_problem.valid_states prob\<rbrakk> \<Longrightarrow> ast_problem.path_to (rem_implicit_pres_ops prob) ?s ?\<pi>s ?s' = ast_problem.path_to prob ?s ?\<pi>s ?s'
goal (1 subgoal):
1. ast_problem.valid_plan (rem_implicit_pres_ops prob) \<pi>s = ast_problem.valid_plan prob \<pi>s
[PROOF STEP]
by (simp add: ast_problem.valid_plan_def rem_implicit_pres_ops_goal rem_implicit_pres_ops_init) |
lemma LIM_def: "f \<midarrow>a\<rightarrow> L \<longleftrightarrow> (\<forall>r > 0. \<exists>s > 0. \<forall>x. x \<noteq> a \<and> dist x a < s \<longrightarrow> dist (f x) L < r)" for a :: "'a::metric_space" and L :: "'b::metric_space" |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Finite maps with indexed keys and values, based on AVL trees
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
open import Data.Product as Prod
open import Relation.Binary
open import Relation.Binary.PropositionalEquality using (_≡_; cong; subst)
import Data.AVL.Value
module Data.AVL.IndexedMap
{i k v ℓ}
{Index : Set i} {Key : Index → Set k} (Value : Index → Set v)
{_<_ : Rel (∃ Key) ℓ}
(isStrictTotalOrder : IsStrictTotalOrder _≡_ _<_)
where
import Data.AVL
open import Data.Bool
open import Data.List.Base as List using (List)
open import Data.Maybe.Base as Maybe
open import Function
open import Level
-- Key/value pairs.
KV : Set (i ⊔ k ⊔ v)
KV = ∃ λ i → Key i × Value i
-- Conversions.
private
fromKV : KV → Σ[ ik ∈ ∃ Key ] Value (proj₁ ik)
fromKV (i , k , v) = ((i , k) , v)
toKV : Σ[ ik ∈ ∃ Key ] Value (proj₁ ik) → KV
toKV ((i , k) , v) = (i , k , v)
-- The map type.
private
open module AVL =
Data.AVL (record { isStrictTotalOrder = isStrictTotalOrder })
using () renaming (Tree to Map')
Map = Map' (AVL.MkValue (Value ∘ proj₁) (subst Value ∘′ cong proj₁))
-- Repackaged functions.
empty : Map
empty = AVL.empty
singleton : ∀ {i} → Key i → Value i → Map
singleton k v = AVL.singleton (-, k) v
insert : ∀ {i} → Key i → Value i → Map → Map
insert k v = AVL.insert (-, k) v
delete : ∀ {i} → Key i → Map → Map
delete k = AVL.delete (-, k)
lookup : ∀ {i} → Key i → Map → Maybe (Value i)
lookup k m = AVL.lookup (-, k) m
infix 4 _∈?_
_∈?_ : ∀ {i} → Key i → Map → Bool
_∈?_ k = AVL._∈?_ (-, k)
headTail : Map → Maybe (KV × Map)
headTail m = Maybe.map (Prod.map toKV id) (AVL.headTail m)
initLast : Map → Maybe (Map × KV)
initLast m = Maybe.map (Prod.map id toKV) (AVL.initLast m)
fromList : List KV → Map
fromList = AVL.fromList ∘ List.map fromKV
toList : Map → List KV
toList = List.map toKV ∘ AVL.toList
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
s✝ t✝ s : Set α
t : Set β
inst✝² : Fintype ↑s
inst✝¹ : Fintype ↑t
inst✝ : Fintype ↑(s ×ˢ t)
⊢ toFinset (s ×ˢ t) = toFinset s ×ˢ toFinset t
[PROOFSTEP]
ext
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
s✝ t✝ s : Set α
t : Set β
inst✝² : Fintype ↑s
inst✝¹ : Fintype ↑t
inst✝ : Fintype ↑(s ×ˢ t)
a✝ : α × β
⊢ a✝ ∈ toFinset (s ×ˢ t) ↔ a✝ ∈ toFinset s ×ˢ toFinset t
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
s✝ t s : Set α
inst✝² : DecidableEq α
inst✝¹ : Fintype ↑s
inst✝ : Fintype ↑(offDiag s)
⊢ ∀ (a : α × α), a ∈ toFinset (offDiag s) ↔ a ∈ Finset.offDiag (toFinset s)
[PROOFSTEP]
simp
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
α : Type u_4
β : Type u_5
inst✝¹ : Fintype α
inst✝ : Fintype β
x✝ : α × β
a : α
b : β
⊢ (a, b) ∈ univ ×ˢ univ
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ Infinite (α × β) ↔ Infinite α ∧ Nonempty β ∨ Nonempty α ∧ Infinite β
[PROOFSTEP]
refine'
⟨fun H => _, fun H => H.elim (and_imp.2 <| @Prod.infinite_of_left α β) (and_imp.2 <| @Prod.infinite_of_right α β)⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
H : Infinite (α × β)
⊢ Infinite α ∧ Nonempty β ∨ Nonempty α ∧ Infinite β
[PROOFSTEP]
rw [and_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
H : Infinite (α × β)
⊢ Nonempty β ∧ Infinite α ∨ Nonempty α ∧ Infinite β
[PROOFSTEP]
contrapose! H
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
H : (Nonempty β → ¬Infinite α) ∧ (Nonempty α → ¬Infinite β)
⊢ ¬Infinite (α × β)
[PROOFSTEP]
intro H'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
H : (Nonempty β → ¬Infinite α) ∧ (Nonempty α → ¬Infinite β)
H' : Infinite (α × β)
⊢ False
[PROOFSTEP]
rcases Infinite.nonempty (α × β) with ⟨a, b⟩
[GOAL]
case intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
H : (Nonempty β → ¬Infinite α) ∧ (Nonempty α → ¬Infinite β)
H' : Infinite (α × β)
a : α
b : β
⊢ False
[PROOFSTEP]
haveI := fintypeOfNotInfinite (H.1 ⟨b⟩)
[GOAL]
case intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
H : (Nonempty β → ¬Infinite α) ∧ (Nonempty α → ¬Infinite β)
H' : Infinite (α × β)
a : α
b : β
this : Fintype α
⊢ False
[PROOFSTEP]
haveI := fintypeOfNotInfinite (H.2 ⟨a⟩)
[GOAL]
case intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
H : (Nonempty β → ¬Infinite α) ∧ (Nonempty α → ¬Infinite β)
H' : Infinite (α × β)
a : α
b : β
this✝ : Fintype α
this : Fintype β
⊢ False
[PROOFSTEP]
exact H'.false
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Sort u_4
π : ι → Type ?u.4746
inst✝¹ : ∀ (i : ι), Nontrivial (π i)
inst✝ : Infinite ι
⊢ Infinite ((i : ι) → π i)
[PROOFSTEP]
choose m n hm using fun i => exists_pair_ne (π i)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Sort u_4
π : ι → Type ?u.4746
inst✝¹ : ∀ (i : ι), Nontrivial (π i)
inst✝ : Infinite ι
m n : (i : ι) → π i
hm : ∀ (i : ι), m i ≠ n i
⊢ Infinite ((i : ι) → π i)
[PROOFSTEP]
refine' Infinite.of_injective (fun i => update m i (n i)) fun x y h => of_not_not fun hne => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Sort u_4
π : ι → Type ?u.4746
inst✝¹ : ∀ (i : ι), Nontrivial (π i)
inst✝ : Infinite ι
m n : (i : ι) → π i
hm : ∀ (i : ι), m i ≠ n i
x y : ι
h : (fun i => update m i (n i)) x = (fun i => update m i (n i)) y
hne : ¬x = y
⊢ False
[PROOFSTEP]
simp_rw [update_eq_iff, update_noteq hne] at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Sort u_4
π : ι → Type ?u.4746
inst✝¹ : ∀ (i : ι), Nontrivial (π i)
inst✝ : Infinite ι
m n : (i : ι) → π i
hm : ∀ (i : ι), m i ≠ n i
x y : ι
hne : ¬x = y
h : n x = m x ∧ ∀ (x_1 : ι), x_1 ≠ x → m x_1 = update m y (n y) x_1
⊢ False
[PROOFSTEP]
exact (hm x h.1.symm).elim
|
An angiographic catheter is described with a valve covering the end-hole on the distal end. The valve will allow the catheter to be passed over a guidewire but will prevent an injected fluid (e.g., a contrast medium) from being discharged from the end hole. The valved-end catheter has side-holes near the distal end that provide for lateral discharge of the fluid thereby preventing the creation of an end-hole jet and the resulting undesirable effects of the jet.
Angiographic catheters are very small but long thin-walled tubes that are inserted into the human vascular system for diagnostic or therapeutic purposes. At the present time, almost all such catheters have an end-hole at the distal end so that the catheter can be passed over and guided by a wire which has been inserted into the vascular system through a hollow needle placed in a blood vessel. Also, because these catheters are open at both ends to allow passage over the guidewire, one catheter can be exchanged for another by replacing the wire and withdrawing the catheter over the wire.
When used in diagnostic procedures, the primary purpose of these catheters is to allow injection of radiopaque contrast material into the blood stream so as to produce an image of the blood vessel (an angiogram) on x-ray film. During the process of diagnostic angiography, the contrast medium is usually injected at a rapid rate using a power injector, and the contrast medium is forcefully discharged from the distal end-hole of the catheter creating a jet effect. This produces undesirable recoil of the catheter and can also produce a dangerous complication, subintimal injection of the contrast medium, in which the jet tunnels into the wall of the blood vessel sometimes resulting in acute occlusion of the vessel.
To minimize the undesirable effect of recoil and the potential complication of subintimal injection, some early catheter designs had sealed distal ends with side-holes near the distal end to allow injection of the contrast medium into the blood vessel laterally and symmetrically thereby reducing subintimal injections. Of course, with the sealed distal end, these catheters cannot be inserted into a blood vessel over a guidewire, and therefore this design is not widely used. Most catheters presently used for rapid flush angiography are configured with a circular loop or "pigtail" at the distal end. Although the end-hole is open, these pigtail type catheters are provided with side-holes through which approximately 40% of the contrast medium is discharged. Although the looped end of the catheter decreases somewhat the chance of subintimal injection, the open end-hole still allows approximately 60% of the contrast medium to exit the end-hole in a strong jet. During left ventriculography, this jet has been associated with the production of ventricular arrhythmias which can be dangerous to the patient and which can lessen the accuracy of the acquired physiologic data. To overcome the limitations of the pigtail catheter in cardiac angiography, various modifications have been attempted to the pigtail configuration, such as a multiple loop configuration or formation of a bend at an acute angle in the distal portion of the catheter. However, these modifications have not satisfactorily alleviated the problems associated with the use of any catheter which has an open end-hole.
Moreover, when catheters of this type are used during abdominal aortography, it is sometimes desirable to have the contrast medium injected in a lateral fashion to opacify the renal arteries which arise at right angles to the abdominal aorta. The pigtail catheter, including known modifications of it, tends to inject the contrast medium in a superior direction through the superiorally directed end-hole. This produces the undesirable effect of filling the blood vessels superior to the renal arteries which can obscure visualization of the anatomic structures being observed.
There is therefore a need for a catheter which can be inserted into the vascular system by passing it over a guidewire but which catheter will have the advantages of a closed-end catheter.
The catheter of the invention has formed over the end-hole at its distal end a two-way valve which opens to allow passage of a guidewire and closes upon removal of the guidewire to prevent discharge of fluid from the end-hole of the catheter. In one embodiment, the catheter is formed with side-holes for discharge of the fluid, such as a contrast medium, and the principles of the invention can be utilized in a balloon occlusion catheter because all of the fluid will be discharged proximally to the balloon through the end-holes. When used with a ballon for coronary or small vessel peripheral angioplasty, no side holes are formed in the catheter, thus creating a single lumen over-the-wire angioplasty ballon catheter. The valve at the distal end of the catheter may be formed by several slits along radial lines in a solid tubular piece of material similar to the material used in the catheter itself. However, the material must have sufficient elasticity so that the valve remains normally closed and is opened only when a guidewire is advanced in either direction against the valve.
FIG. 4 is a perspective view that illustrates the principles of the invention as applied to a balloon occlusion catheter.
The intravascular catheter of the invention has an elongated tubular wall 10 that defines a longitudinally extending lumen or passageway 12 extending throughout the length of the catheter. The catheter is constructed of any suitable material that has the required strength and flexibility which will permit the catheter to be inserted into a blood vessel for use in various diagnostic and therapeutic applications where it is necessary to inject a fluid, such as a contrast medium, into the body. There are a number of plastic materials presently available that are suitable for this use, such as various forms of soft polyurethanes, soft polyesters guidewire. When the guidewire is removed, the resiliency of the flaps 22 will once again close off the distal end 14.
The distal end 14 may be either normally straight as shown in the drawings, or distal end 14 may be shaped into a circular pigtail configuration similar to that of presently used catheters which have a closed distal end. The principles of the invention therefore can be applied to pigtail catheters which are used extensively in ventriculography.
In FIG. 4 there is illustrated a balloon occlusion catheter to which the principles of the invention have been applied by positioning valve 16 at the distal end. In this balloon occulsion catheter, there is in addition to the outer wall 10 an interior tube 24 that provides a passageway or lumen 26 which carries fluid, such as air, to an expandable wall or balloon 28 formed in the wall 10 near its distal end 14 downstream from the side-holes 18. The expandable wall or balloon 28 can therefore be inflated using lumen 26 so as to occlude flow through the blood vessel. As in the other embodiments of the invention, when the catheter is inserted into the blood vessel using a guidewire, as soon as the end of the guidewire engages the flaps 22 of valve 16, the flaps 22 will flex to permit passage of the guidewire until such time as it is withdrawn. When the guidewire is withdrawn, valve 16 will close and remain closed, thus allowing for all of the fluid flowing through passageway 12 to be discharged through side-holes 18.
The principles of the invention also can be applied to balloon catheters with no side holes, thereby allowing guidewire insertion and low density polyethylenes. The passageway 12 normally terminates in an end-hole or discharge opening at the distal end 14 of the catheter, but the invention provides a valve, indicated generally by the reference numeral 16, that covers the end-hole. Valve 16 may be formed in any suitable manner. For example, valve 16 may be formed at the outer end of a solid tubular end piece 20 of relatively short length which is fused or otherwise suitably attached at the distal end 14 of the wall 10 of the catheter.
The desired action of valve 16 may be created by a plurality of flaps 22 formed by one or more cuts, such as radially extending slits, in the outer end of end piece 20. In the alternative, a pin hole could be formed in the outer end of the end piece 20, the pin hole being just large enough to permit the passage of a guidewire. The flaps 22 are preferably formed of the same basic material as the material that forms the wall 10 of the catheter, but the material forming flaps 22 must have sufficient elasticity to perform the valve function in the manner described herein.
In one embodiment of the invention, upstream from the valve 16 are a plurality of side-holes 18 formed in the wall 10 of the catheter to provide for lateral discharge of fluid flowing through the passageway 12 of the catheter. The flaps 22 normally close the end-hole at distal end 14 and substantially seal off distal end 14 so that any fluid introduced into the passageway 12 is discharged through the side-holes 18. However, when the end of a guidewire (not shown) strikes the flaps 22 from either direction, the flaps 22 will bend a sufficient amount to permit the passage of the and balloon inflation through a single passageway. Similarly, the valved end construction utilizing the principles of the invention will allow an angioplasty balloon to be inflated through the same lumen used to pass the catheter over a guidewire thus creating a single lumen over-the-wire angioplasty balloon. This would allow the catheter diameter to be reduced significantly which would be useful in coronary angioplasty and small vessel peripheral angioplasty.
The operation and use of the catheter of the invention is evident from the foregoing description. When the catheter of the invention is properly used, catheter recoil will be greatly reduced if not eliminated since the contrast medium, for example, will be discharged from the catheter laterally in a symmetrical fashion through the side-holes. Also, use of the valved distal end will greatly reduce the chance of subintimal contrast injections since there will no longer be any jet effect from the end-hole. The catheter of the invention will also provide more accurate contrast injection in the aorta, since contrast medium is injected laterally through the side-holes. Also, infusion of other fluids, such as thrombolytic agents, is improved since these fluids can be infused evenly through multiple side-holes rather than a single end-hole of the catheter. Moreover, the catheter of the invention may be useful in nonvascular catheter applications, such as in biliary and renal catheterization procedures.
It will be thus evident to those skilled in the art that there are numerous applications for the principles of the invention and that various revisions and modifications can be made to the preferred embodiments disclosed herein without departing from the spirit and scope of the invention. It is my intention, however, that all such revisions and modifications as are obvious to those skilled in the art will be included within the scope of the following claims.
1. A catheter insertable over a guidewire for introducing a fluid into a vessel of the human body, said catheter comprising a long hollow flexible tube of the desired length having a small diameter and a proximal end and a distal end, said tube having a thin wall extending between said ends to define a passageway extending throughout the length of the tube and terminating at the distal end, a plurality of side holes extending through the wall of the tube near its distal end to provide for the discharge of fluid from the passageway through the holes in the wall and into the vessel, and means closing the distal end and preventing the discharge of fluid from the passageway through the distal end, said means opening during the passage of the guidewire through the distal end.
2. The catheter of claim 1 in which the means closing the distal end is comprised of a plurality of resilient flaps that normally close off the distal end but which will flex to allow the passage of the guidewire.
3. The catheter of claim 2 in which the flaps are formed by a plurality of radial slits.
4. The catheter of claim 1 in which there is an opening in the means closing the distal end which opening is small enough to allow the passage of the guidewire without allowing the passage of fluid from the distal end.
5. A catheter for introducing a fluid into the body, said catheter comprising a tube of the desired length having a proximal end and a distal end, a thin wall extending between said ends to define a passageway extending throughout the length of the tube, the distal end terminating in an end hole that is in communication with the passageway, a plurality of side holes extending through the wall of the tube near its distal end to provide for the flow of fluid from the passageway through the wall, an inflatable means near the distal end between the side holes and the end hole, said inflatable means being normally not inflated, pressure means to controllably inflate the inflatable means from the proximal end, and valve means comprised of a plurality of resilient flaps formed by a plurality of radial slits that normally close off the end hole to prevent the discharge of fluid from the passageway through the end hole but which will flex to allow the passage of a guidewire through the valve means.
6. The catheter of claim 5 in which the pressure means includes a tube positioned inside of the passageway and extending from the proximal end terminating at the inflatable means.
EP0608609A3 (en) * 1992-12-01 1995-05-24 Cardiac Pathways Corp Catheter for RF ablation with cooled electrode and method.
EP0599065A3 (en) * 1992-10-29 1994-08-17 Sachse Hans Tubelar ureteral conduit. |
Formal statement is: lemma open_neg_translation: fixes S :: "'a::real_normed_vector set" assumes "open S" shows "open((\<lambda>x. a - x) ` S)" Informal statement is: If $S$ is an open set, then the set of all points $a - x$ for $x \in S$ is also open. |
corollary continuous_at_eps_delta: "continuous (at x) f \<longleftrightarrow> (\<forall>e > 0. \<exists>d > 0. \<forall>x'. dist x' x < d \<longrightarrow> dist (f x') (f x) < e)" |
10 ) can only be formed by oxidation by concentrated nitric acid . Antimony also forms a mixed @-@ valence oxide , antimony tetroxide ( Sb
|
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__46_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_inv__46_on_rules imports n_germanSymIndex_lemma_on_inv__46
begin
section{*All lemmas on causal relation between inv__46*}
lemma lemma_inv__46_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__46 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__46) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__46) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
Formal statement is: lemma Janiszewski_weak: fixes a b::complex assumes "compact S" "compact T" and conST: "connected(S \<inter> T)" and ccS: "connected_component (- S) a b" and ccT: "connected_component (- T) a b" shows "connected_component (- (S \<union> T)) a b" Informal statement is: If $S$ and $T$ are compact sets in the complex plane, and $S \cap T$ is connected, then the connected component of $a$ and $b$ in the complement of $S \cup T$ is the same as the connected component of $a$ and $b$ in the complement of $S$ or the complement of $T$. |
lemma sgn_zero_iff: "sgn x = 0 \<longleftrightarrow> x = 0" for x :: "'a::real_normed_vector" |
\section{Overview}
\label{sec:inference:overview}
Figure \ref{fig:overview_app} shows a high-level overview of the entire application consisting of these four parts:
\begin{enumerate}
\item Camera Library (\texttt{libcamera.so})
\item Python Package (\texttt{fhnwtoys})
\item User Interface Class (\texttt{ui.py})
\item Inference Application (\texttt{aionfpga.py})
\end{enumerate}
The camera interface is written in C++ and compiled into a shared library (\texttt{.so}).
This allows the inference application to load the camera library into its memory space (see section \ref{sec:inference:camera_library}).
The \texttt{fhnwtoys} Python package provides access to various settings and constants.
The \acrlong{ui} class serves as an interface between the inference application and the display.
It features two methods to easily update the user interface screens.
The inference application itself uses the other three components to acquire frames, run inference and display the classification results on the screen.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{overview_app}
\caption{High-level overview of the entire application}
\label{fig:overview_app}
\end{figure}
|
function zz=v_lpccw2zz(cw)
%V_LPCPZ2ZZ LPC: Power spectrum roots to LPC poles ZZ=(CW)
% pz are the roots of the power spectrum polynomial pp(cos(w))
% Copyright (C) Mike Brookes 1998
% Version: $Id: v_lpccw2zz.m 10865 2018-09-21 17:22:45Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
zs=sqrt(cw.^2-1);
zz=cw-sign(real(conj(cw).*zs)).*zs;
|
module Hedgehog.Internal.Property
import Control.Monad.Either
import Control.Monad.Identity
import Control.Monad.Trans
import Control.Monad.Writer
import Data.DPair
import Data.Lazy
import Data.SortedMap
import Generics.Derive
import Hedgehog.Internal.Gen
import Hedgehog.Internal.Util
import Text.Show.Diff
import Text.Show.Pretty
%language ElabReflection
%default total
--------------------------------------------------------------------------------
-- Tagged Primitives
--------------------------------------------------------------------------------
public export
data Tag = ConfidenceTag
| CoverCountTag
| CoverPercentageTag
| GroupNameTag
| LabelNameTag
| PropertyCountTag
| PropertyNameTag
| ShrinkCountTag
| ShrinkLimitTag
| TestCountTag
| TestLimitTag
public export
record Tagged (tag : Tag) (t : Type) where
constructor MkTagged
unTag : t
public export %inline
Show t => Show (Tagged tag t) where
show = show . unTag
public export %inline
Eq t => Eq (Tagged tag t) where
(==) = (==) `on` unTag
public export %inline
Ord t => Ord (Tagged tag t) where
compare = compare `on` unTag
public export %inline
Num t => Num (Tagged tag t) where
fromInteger = MkTagged . fromInteger
MkTagged x + MkTagged y = MkTagged (x + y)
MkTagged x * MkTagged y = MkTagged (x * y)
public export %inline
FromString t => FromString (Tagged tag t) where
fromString = MkTagged . fromString
public export %inline
Semigroup (Tagged tag Nat) where (<+>) = (+)
public export %inline
Monoid (Tagged tag Nat) where neutral = 0
||| The total number of tests which are covered by a classifier.
|||
||| Can be constructed using numeric literals.
public export
0 CoverCount : Type
CoverCount = Tagged CoverCountTag Nat
||| The name of a group of properties.
public export
0 GroupName : Type
GroupName = Tagged GroupNameTag String
||| The number of properties in a group.
public export
0 PropertyCount : Type
PropertyCount = Tagged PropertyCountTag Nat
||| The numbers of times a property was able to shrink after a failing test.
public export
0 ShrinkCount : Type
ShrinkCount = Tagged ShrinkCountTag Nat
||| The number of shrinks to try before giving up on shrinking.
|||
||| Can be constructed using numeric literals:
public export
0 ShrinkLimit : Type
ShrinkLimit = Tagged ShrinkLimitTag Nat
||| The number of tests a property ran successfully.
public export
0 TestCount : Type
TestCount = Tagged TestCountTag Nat
||| The number of successful tests that need to be run before a property test
||| is considered successful.
|||
||| Can be constructed using numeric literals.
public export
0 TestLimit : Type
TestLimit = Tagged TestLimitTag Nat
||| The name of a property.
public export
0 PropertyName : Type
PropertyName = Tagged PropertyNameTag String
||| The acceptable occurrence of false positives
|||
||| Example, `the Confidence 1000000000` would mean that
||| you'd accept a false positive
||| for 1 in 10^9 tests.
public export
record Confidence where
constructor MkConfidence
confidence : Bits64
0 inBound : confidence >= 2 = True
public export %inline
Eq Confidence where (==) = (==) `on` confidence
public export %inline
Ord Confidence where compare = compare `on` confidence
public export %inline
Show Confidence where showPrec p = showPrec p . confidence
namespace Confidence
public export
fromInteger : (n : Integer)
-> {auto 0 prf : the Bits64 (fromInteger n) >= 2 = True}
-> Confidence
fromInteger n = MkConfidence (fromInteger n) prf
||| The relative number of tests which are covered by a classifier.
public export
0 CoverPercentage : Type
CoverPercentage = Tagged CoverPercentageTag Double
||| The name of a classifier.
public export
0 LabelName : Type
LabelName = Tagged LabelNameTag String
--------------------------------------------------------------------------------
-- Journal
--------------------------------------------------------------------------------
||| The difference between some expected and actual value.
public export
record Diff where
constructor MkDiff
diffPrefix : String
diffRemoved : String
diffInfix : String
diffAdded : String
diffSuffix : String
diffValue : ValueDiff
%runElab derive "Hedgehog.Internal.Property.Diff" [Generic,Meta,Show,Eq]
||| Whether a test is covered by a classifier, and therefore belongs to a
||| 'Class'.
public export
data Cover = NotCovered | Covered
%runElab derive "Cover" [Generic,Meta,Show,Eq,Ord]
public export
Semigroup Cover where
NotCovered <+> NotCovered = NotCovered
_ <+> _ = Covered
public export
Monoid Cover where
neutral = NotCovered
public export
toCoverCount : Cover -> CoverCount
toCoverCount NotCovered = 0
toCoverCount Covered = 1
||| The extent to which a test is covered by a classifier.
|||
||| When a classifier's coverage does not exceed the required minimum, the
||| test will be failed.
public export
record Label a where
constructor MkLabel
labelName : LabelName
labelMinimum : CoverPercentage
labelAnnotation : a
%runElab derive "Label" [Generic,Meta,Show,Eq]
public export
Functor Label where
map f = {labelAnnotation $= f}
public export
Foldable Label where
foldl f a l = f a l.labelAnnotation
foldr f a l = f l.labelAnnotation a
null _ = False
public export
Traversable Label where
traverse f l = (\v => {labelAnnotation := v} l) <$>
f l.labelAnnotation
||| This semigroup is right biased. The name, location and percentage from the
||| rightmost `Label` will be kept. This shouldn't be a problem since the
||| library doesn't allow setting multiple classes with the same 'ClassifierName'.
export
Semigroup a => Semigroup (Label a) where
ll <+> lr = { labelAnnotation $= (ll.labelAnnotation <+>) } lr
||| Log messages which are recorded during a test run.
public export
data Log = Annotation (Lazy String)
| Footnote (Lazy String)
| LogLabel (Label Cover)
%runElab derive "Log" [Generic,Meta,Show,Eq]
||| A record containing the details of a test run.
public export
record Journal where
constructor MkJournal
journalLogs : List (Lazy Log)
%runElab derive "Journal" [Generic,Meta,Show,Eq,Semigroup,Monoid]
||| Details on where and why a test failed.
public export
record Failure where
constructor MkFailure
message : String
diff : Maybe Diff
%runElab derive "Failure" [Generic,Meta,Show,Eq]
||| The extent to which all classifiers cover a test.
|||
||| When a given classification's coverage does not exceed the required/
||| minimum, the test will be failed.
public export
record Coverage a where
constructor MkCoverage
coverageLabels : SortedMap LabelName (Label a)
%runElab derive "Coverage" [Generic,Meta,Show,Eq]
export
Functor Coverage where
map f = {coverageLabels $= map (map f) }
export
Foldable Coverage where
foldl f acc (MkCoverage sm) = foldl (foldl f) acc sm
foldr f acc (MkCoverage sm) = foldr (\l,a => foldr f a l) acc sm
null = null . coverageLabels
export
Traversable Coverage where
traverse f (MkCoverage sm) = MkCoverage <$> traverse (traverse f) sm
export
Semigroup a => Semigroup (Coverage a) where
MkCoverage c0 <+> MkCoverage c1 = MkCoverage $ c0 <+> c1
export
Semigroup a => Monoid (Coverage a) where
neutral = MkCoverage empty
--------------------------------------------------------------------------------
-- Config
--------------------------------------------------------------------------------
public export
data TerminationCriteria =
EarlyTermination Confidence TestLimit
| NoEarlyTermination Confidence TestLimit
| NoConfidenceTermination TestLimit
%runElab derive "TerminationCriteria" [Generic,Meta,Show,Eq]
public export
unCriteria : TerminationCriteria -> (Maybe Confidence, TestLimit)
unCriteria (EarlyTermination c t) = (Just c, t)
unCriteria (NoEarlyTermination c t) = (Just c, t)
unCriteria (NoConfidenceTermination t) = (Nothing, t)
||| Configuration for a property test.
public export
record PropertyConfig where
constructor MkPropertyConfig
shrinkLimit : ShrinkLimit
terminationCriteria : TerminationCriteria
%runElab derive "PropertyConfig" [Generic,Meta,Show,Eq]
||| The minimum amount of tests to run for a 'Property'
public export
defaultMinTests : TestLimit
defaultMinTests = 100
||| The default confidence allows one false positive in 10^9 tests
public export
defaultConfidence : Confidence
defaultConfidence = Confidence.fromInteger 1000000000
||| The default configuration for a property test.
public export
defaultConfig : PropertyConfig
defaultConfig =
MkPropertyConfig {
shrinkLimit = 1000
, terminationCriteria = NoConfidenceTermination defaultMinTests
}
--------------------------------------------------------------------------------
-- Test
--------------------------------------------------------------------------------
||| A test monad transformer allows the assertion of expectations.
public export
0 TestT : (Type -> Type) -> Type -> Type
TestT m = EitherT Failure (WriterT Journal m)
public export
0 Test : Type -> Type
Test = TestT Identity
export
mkTestT : Functor m => m (Either Failure a, Journal) -> TestT m a
mkTestT = MkEitherT . writerT
export
mkTest : (Either Failure a, Journal) -> Test a
mkTest = mkTestT . Id
export
runTestT : TestT m a -> m (Either Failure a, Journal)
runTestT = runWriterT . runEitherT
export
runTest : Test a -> (Either Failure a, Journal)
runTest = runIdentity . runTestT
||| Log some information which might be relevant to a potential test failure.
export
writeLog : Applicative m => Lazy Log -> TestT m ()
writeLog x = mkTestT $ pure (Right (), MkJournal [x])
||| Fail the test with an error message, useful for building other failure
||| combinators.
export
failWith : Applicative m => Maybe Diff -> String -> TestT m a
failWith diff msg = mkTestT $ pure (Left $ MkFailure msg diff, neutral)
||| Annotates the source code with a message that might be useful for
||| debugging a test failure.
export
annotate : Applicative m => Lazy String -> TestT m ()
annotate v = writeLog $ Annotation v
||| Annotates the source code with a value that might be useful for
||| debugging a test failure.
export covering
annotateShow : (Applicative m, Show a) => a -> TestT m ()
annotateShow v = annotate $ ppShow v
||| Logs a message to be displayed as additional information in the footer of
||| the failure report.
export
footnote : Applicative m => Lazy String -> TestT m ()
footnote v = writeLog $ Footnote v
||| Logs a value to be displayed as additional information in the footer of
||| the failure report.
export covering
footnoteShow : (Applicative m, Show a) => a -> TestT m ()
footnoteShow v = writeLog (Footnote $ ppShow v)
||| Fails with an error that shows the difference between two values.
export covering
failDiff : (Applicative m, Show a, Show b) => a -> b -> TestT m ()
failDiff x y =
case valueDiff <$> reify x <*> reify y of
Nothing =>
failWith Nothing $
unlines $ [
"Failed"
, "━━ lhs ━━"
, ppShow x
, "━━ rhs ━━"
, ppShow y
]
Just vdiff@(Same _) =>
failWith (Just $
MkDiff "━━━ Failed (" "" "no differences" "" ") ━━━" vdiff) ""
Just vdiff =>
failWith (Just $
MkDiff "━━━ Failed (" "- lhs" ") (" "+ rhs" ") ━━━" vdiff) ""
||| Causes a test to fail.
export
failure : Applicative m => TestT m a
failure = failWith Nothing ""
||| Another name for `pure ()`.
export
success : Monad m => TestT m ()
success = pure ()
||| Fails the test if the condition provided is 'False'.
export
assert : Monad m => Bool -> TestT m ()
assert ok = if ok then success else failure
||| Fails the test and shows a git-like diff if the comparison operation
||| evaluates to 'False' when applied to its arguments.
|||
||| The comparison function is the second argument, which may be
||| counter-intuitive to Haskell programmers. However, it allows operators to
||| be written infix for easy reading:
|||
||| This function behaves like the unix @diff@ tool, which gives a 0 exit
||| code if the compared files are identical, or a 1 exit code code
||| otherwise. Like unix @diff@, if the arguments fail the comparison, a
||| /diff is shown.
|||
export covering
diff : (Monad m, Show a, Show b)
=> a -> (a -> b -> Bool) -> b -> TestT m ()
diff x op y = if x `op` y then success
else failDiff x y
infix 4 ===
||| Fails the test if the two arguments provided are not equal.
export covering
(===) : (Monad m, Eq a, Show a) => a -> a -> TestT m ()
(===) x y = diff x (==) y
infix 4 /==
||| Fails the test if the two arguments provided are equal.
export covering
(/==) : (Monad m, Eq a, Show a) => a -> a -> TestT m ()
(/==) x y = diff x (/=) y
||| Fails the test if the 'Either' is 'Left', otherwise returns the value in
||| the 'Right'.
export covering
evalEither : (Monad m, Show x) => Either x a -> TestT m a
evalEither (Left x) = failWith Nothing (ppShow x)
evalEither (Right x) = pure x
||| Fails the test if the 'Maybe' is 'Nothing', otherwise returns the value in
||| the 'Just'.
export
evalMaybe : Monad m => Maybe a -> TestT m a
evalMaybe Nothing = failWith Nothing "the value was Nothing"
evalMaybe (Just x) = pure x
--------------------------------------------------------------------------------
-- PropertyT
--------------------------------------------------------------------------------
||| The property monad allows both the generation of test inputs
||| and the assertion of expectations.
public export
0 PropertyT : Type -> Type
PropertyT = TestT Gen
||| Generates a random input for the test by running the provided generator.
|||
||| This is a the same as 'forAll' but allows the user to provide a custom
||| rendering function. This is useful for values which don't have a
||| 'Show' instance.
export
forAllWith : (a -> String) -> Gen a -> PropertyT a
forAllWith render gen = do x <- lift (lift gen)
annotate (render x)
pure x
||| Generates a random input for the test by running the provided generator.
export covering
forAll : Show a => Gen a -> PropertyT a
forAll = forAllWith ppShow
||| Lift a test in to a property.
export
test : Test a -> PropertyT a
test = mapEitherT $ mapWriterT (pure . runIdentity)
--------------------------------------------------------------------------------
-- Property
--------------------------------------------------------------------------------
||| A property test, along with some configurable limits like how many times
||| to run the test.
public export
record Property where
constructor MkProperty
config : PropertyConfig
test : PropertyT ()
namespace Property
||| Map a config modification function over a property.
export
mapConfig : (PropertyConfig -> PropertyConfig) -> Property -> Property
mapConfig f p = { config $= f } p
verifiedTermination : Property -> Property
verifiedTermination =
mapConfig $ \config =>
let
newTerminationCriteria = case config.terminationCriteria of
NoEarlyTermination c tests => EarlyTermination c tests
NoConfidenceTermination tests => EarlyTermination defaultConfidence tests
EarlyTermination c tests => EarlyTermination c tests
in { terminationCriteria := newTerminationCriteria } config
||| Adjust the number of times a property should be executed before it is considered
||| successful.
export
mapTests : (TestLimit -> TestLimit) -> Property -> Property
mapTests f = mapConfig {terminationCriteria $= setLimit}
where setLimit : TerminationCriteria -> TerminationCriteria
setLimit (NoEarlyTermination c n) = NoEarlyTermination c (f n)
setLimit (NoConfidenceTermination n) = NoConfidenceTermination (f n)
setLimit (EarlyTermination c n) = EarlyTermination c (f n)
||| Set the number of times a property should be executed before it is considered
||| successful.
|||
||| If you have a test that does not involve any generators and thus does not
||| need to run repeatedly, you can use @withTests 1@ to define a property that
||| will only be checked once.
export
withTests : TestLimit -> Property -> Property
withTests = mapTests . const
||| Set the number of times a property is allowed to shrink before the test
||| runner gives up and prints the counterexample.
export
withShrinks : ShrinkLimit -> Property -> Property
withShrinks n = mapConfig { shrinkLimit := n }
||| Make sure that the result is statistically significant in accordance to
||| the passed 'Confidence'
export
withConfidence : Confidence -> Property -> Property
withConfidence c = mapConfig { terminationCriteria $= setConfidence }
where setConfidence : TerminationCriteria -> TerminationCriteria
setConfidence (NoEarlyTermination _ n) = NoEarlyTermination c n
setConfidence (NoConfidenceTermination n) = NoConfidenceTermination n
setConfidence (EarlyTermination _ n) = EarlyTermination c n
||| Creates a property with the default configuration.
export
property : PropertyT () -> Property
property = MkProperty defaultConfig
||| A named collection of property tests.
public export
record Group where
constructor MkGroup
name : GroupName
properties : List (PropertyName, Property)
namespace Group
export
mapProperty : (Property -> Property) -> Group -> Group
mapProperty f = { properties $= map (mapSnd f) }
||| Map a config modification function over all
||| properties in a `Group`.
export
mapConfig : (PropertyConfig -> PropertyConfig) -> Group -> Group
mapConfig = mapProperty . mapConfig
||| Set the number of times the properties in a `Group`
||| should be executed before they are considered
||| successful.
export
withTests : TestLimit -> Group -> Group
withTests = mapProperty . withTests
||| Set the number of times the properties in a `Group`
||| are allowed to shrink before the test
||| runner gives up and prints the counterexample.
export
withShrinks : ShrinkLimit -> Group -> Group
withShrinks = mapProperty . withShrinks
||| Make sure that the results of a `Group` are statistically
||| significant in accordance to the passed 'Confidence'
export
withConfidence : Confidence -> Group -> Group
withConfidence = mapProperty . withConfidence
--------------------------------------------------------------------------------
-- Coverage
--------------------------------------------------------------------------------
export
coverPercentage : TestCount -> CoverCount -> CoverPercentage
coverPercentage (MkTagged tests) (MkTagged count) =
let percentage = the Double (cast count / cast tests * 100)
thousandths = round {a = Double} $ percentage * 10
in MkTagged (thousandths / 10)
export
labelCovered : TestCount -> Label CoverCount -> Bool
labelCovered tests (MkLabel _ min population) =
coverPercentage tests population >= min
export
coverageFailures : TestCount -> Coverage CoverCount -> List $ Label CoverCount
coverageFailures tests (MkCoverage kvs) =
filter (not . labelCovered tests) (values kvs)
||| All labels are covered
export
coverageSuccess : TestCount -> Coverage CoverCount -> Bool
coverageSuccess tests c = null $ coverageFailures tests c
||| Require a certain percentage of the tests to be covered by the
||| classifier.
|||
||| ```idris
||| prop_with_coverage : Property
||| prop_with_coverage =
||| property $ do
||| match <- forAll Gen.bool
||| cover 30 "True" $ match
||| cover 30 "False" $ not match
||| ```
|||
||| The example above requires a minimum of 30% coverage for both
||| classifiers. If these requirements are not met, it will fail the test.
export
cover : Monad m => CoverPercentage -> LabelName -> Bool -> TestT m ()
cover min name covered =
let cover = if covered then Covered else NotCovered
in writeLog $ LogLabel (MkLabel name min cover)
||| Records the proportion of tests which satisfy a given condition.
|||
||| ```idris example
||| prop_with_classifier : Property
||| prop_with_classifier =
||| property $ do
||| xs <- forAll $ Gen.list (Range.linear 0 100) Gen.alpha
||| for_ xs $ \\x -> do
||| classify "newborns" $ x == 0
||| classify "children" $ x > 0 && x < 13
||| classify "teens" $ x > 12 && x < 20
||| ```
export
classify : Monad m => LabelName -> Bool -> TestT m ()
classify name covered = cover 0 name covered
||| Add a label for each test run. It produces a table showing the percentage
||| of test runs that produced each label.
export
label : Monad m => LabelName -> TestT m ()
label name = cover 0 name True
||| Like 'label', but uses 'Show' to render its argument for display.
export
collect : (Monad m, Show a) => a -> TestT m ()
collect x = cover 0 (MkTagged $ show x) True
fromLabel : Label a -> Coverage a
fromLabel x = MkCoverage $ singleton (labelName x) x
unionsCoverage : Semigroup a => List (Coverage a) -> Coverage a
unionsCoverage = MkCoverage . concatMap coverageLabels
export
journalCoverage : Journal -> Coverage CoverCount
journalCoverage = map toCoverCount
. unionsCoverage
. (>>= fromLog)
. journalLogs
where fromLog : Lazy Log -> List (Coverage Cover)
fromLog (LogLabel x) = [fromLabel x]
fromLog (Footnote _) = []
fromLog (Annotation _) = []
--------------------------------------------------------------------------------
-- Confidence
--------------------------------------------------------------------------------
-- not strictly true, but holds for the values we generate
-- in this module
times : InUnit -> InUnit -> InUnit
times (Element x _) (Element y _) =
Element (x * y) (believe_me (Refl {x = True}))
oneMin : InUnit -> InUnit
oneMin (Element v _) = Element (1.0 - v) (believe_me (Refl {x = True} ))
half : InUnit -> InUnit
half = times (Element 0.5 Refl)
-- In order to get an accurate measurement with small sample sizes, we're
-- using the Wilson score interval
-- (<https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval wikipedia>) instead of a normal approximation interval.
wilsonBounds : Nat -> Nat -> InUnit -> (Double, Double)
wilsonBounds positives count acceptance =
let
p = the Double (cast positives / cast count)
n = the Double (cast count)
z = invnormcdf $ oneMin (half acceptance)
midpoint = p + z * z / (2 * n)
offset = z / (1 + z * z / n) * sqrt (p * (1 - p) / n + z * z / (4 * n * n))
denominator = 1 + z * z / n
low = (midpoint - offset) / denominator
high = (midpoint + offset) / denominator
in (low, high)
boundsForLabel : TestCount -> Confidence -> Label CoverCount -> (Double, Double)
boundsForLabel (MkTagged tests) (MkConfidence c ib) lbl =
wilsonBounds (unTag lbl.labelAnnotation) tests (recipBits64 (Element c ib))
||| Is true when the test coverage satisfies the specified 'Confidence'
||| contstraint for all 'Coverage CoverCount's
export
confidenceSuccess : TestCount -> Confidence -> Coverage CoverCount -> Bool
confidenceSuccess tests confidence =
all assertLow . values . coverageLabels
where assertLow : Label CoverCount -> Bool
assertLow cc =
fst (boundsForLabel tests confidence cc) >=
unTag cc.labelMinimum / 100.0
||| Is true when there exists a label that is sure to have failed according to
||| the 'Confidence' constraint
export
confidenceFailure : TestCount -> Confidence -> Coverage CoverCount -> Bool
confidenceFailure tests confidence =
any assertHigh . values . coverageLabels
where assertHigh : Label CoverCount -> Bool
assertHigh cc =
snd (boundsForLabel tests confidence cc) <
(unTag cc.labelMinimum / 100.0)
export
multOf100 : TestCount -> Bool
multOf100 (MkTagged n) = natToInteger n `mod` 100 == 0
export
failureVerified : TestCount -> Coverage CoverCount -> Maybe Confidence -> Bool
failureVerified count cover conf =
multOf100 count &&
maybe False (\c => confidenceFailure count c cover) conf
export
successVerified : TestCount -> Coverage CoverCount -> Maybe Confidence -> Bool
successVerified count cover conf =
multOf100 count &&
maybe False (\c => confidenceSuccess count c cover) conf
export
abortEarly : TerminationCriteria
-> TestCount
-> Coverage CoverCount
-> Maybe Confidence
-> Bool
abortEarly (EarlyTermination _ _) tests cover conf =
let coverageReached = successVerified tests cover conf
coverageUnreachable = failureVerified tests cover conf
in unTag tests >= unTag defaultMinTests &&
(coverageReached || coverageUnreachable)
abortEarly _ _ _ _ = False
|
program assignboolean; {sample22}
var x y : boolean;
begin
x := true;
y := false;
end.
|
Formal statement is: lemma lborel_real_affine: "c \<noteq> 0 \<Longrightarrow> lborel = density (distr lborel borel (\<lambda>x. t + c * x)) (\<lambda>_. ennreal (abs c))" Informal statement is: The Lebesgue measure on the real line is the pushforward of the Lebesgue measure on the real line under the affine transformation $x \mapsto t + c x$. |
State Before: C : Type u
inst✝⁸ : Category C
J : GrothendieckTopology C
D : Type w₁
inst✝⁷ : Category D
E : Type w₂
inst✝⁶ : Category E
F : D ⥤ E
inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D
inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E
inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D
inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E
inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F
inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F
P : Cᵒᵖ ⥤ D
⊢ (sheafificationWhiskerRightIso J F).hom.app P = (sheafifyCompIso J F P).hom State After: C : Type u
inst✝⁸ : Category C
J : GrothendieckTopology C
D : Type w₁
inst✝⁷ : Category D
E : Type w₂
inst✝⁶ : Category E
F : D ⥤ E
inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D
inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E
inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D
inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E
inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F
inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F
P : Cᵒᵖ ⥤ D
⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫
(plusCompIso J F (plusObj J P)).hom ≫
(𝟙 (plusObj J (plusObj J P ⋙ F)) ≫ plusMap J (plusCompIso J F P).hom) ≫ 𝟙 (plusObj J (plusObj J (P ⋙ F))) =
(plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom Tactic: dsimp [sheafificationWhiskerRightIso, sheafifyCompIso] State Before: C : Type u
inst✝⁸ : Category C
J : GrothendieckTopology C
D : Type w₁
inst✝⁷ : Category D
E : Type w₂
inst✝⁶ : Category E
F : D ⥤ E
inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D
inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E
inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D
inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E
inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F
inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F
P : Cᵒᵖ ⥤ D
⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫
(plusCompIso J F (plusObj J P)).hom ≫
(𝟙 (plusObj J (plusObj J P ⋙ F)) ≫ plusMap J (plusCompIso J F P).hom) ≫ 𝟙 (plusObj J (plusObj J (P ⋙ F))) =
(plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom State After: C : Type u
inst✝⁸ : Category C
J : GrothendieckTopology C
D : Type w₁
inst✝⁷ : Category D
E : Type w₂
inst✝⁶ : Category E
F : D ⥤ E
inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D
inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E
inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D
inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E
inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F
inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F
P : Cᵒᵖ ⥤ D
⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom =
(plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom Tactic: simp only [Category.id_comp, Category.comp_id] State Before: C : Type u
inst✝⁸ : Category C
J : GrothendieckTopology C
D : Type w₁
inst✝⁷ : Category D
E : Type w₂
inst✝⁶ : Category E
F : D ⥤ E
inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D
inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E
inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D
inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E
inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F
inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F
P : Cᵒᵖ ⥤ D
⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom =
(plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom State After: no goals Tactic: erw [Category.id_comp] |
Formal statement is: lemma smallo_real_nat_transfer: "(f :: real \<Rightarrow> real) \<in> o(g) \<Longrightarrow> (\<lambda>x::nat. f (real x)) \<in> o(\<lambda>x. g (real x))" Informal statement is: If $f(x)$ is $o(g(x))$ as $x \to \infty$, then $f(n)$ is $o(g(n))$ as $n \to \infty$. |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_monte.h>
#include <gsl/gsl_monte_plain.h>
#include <gsl/gsl_monte_miser.h>
#include <gsl/gsl_monte_vegas.h>
double calculateIntegralValue(int sampleCount)
{
int correctSamples = 0;
srand(time(NULL));
FILE* file = fopen("sqrt.txt", "w");
for (int i = 0; i < sampleCount; ++i){
double x = (double)rand() / RAND_MAX; // double [0, 1]
double y = (double)rand() / RAND_MAX; // double [0, 1000]
if (y <= 1.0/sqrt(x)){
fprintf (file,"%g %g\n", x, y); //Save correct samples to file
correctSamples++;
}
}
return (double)correctSamples / (double)sampleCount; // (b-a) is (1.0-0.0) so we can ignore this multiply
}
void calculateIntegralError(int maxSamples)
{
srand(time(NULL));
FILE* file = fopen("sqrt_err.txt", "w");
for (int sampleCount = 1; sampleCount < maxSamples; ++sampleCount){
int correctSamples = 0;
for (int i = 0; i < sampleCount; ++i){
double x = (double)rand() / RAND_MAX; // double [0, 1]
double y = (double)rand() / RAND_MAX; // double [0, 1]
if (y <= x * x){
correctSamples++;
}
}
double sum = (double)correctSamples / (double)sampleCount; // (b-a) is (1.0-0.0) so we can ignore this multiply
double sumError = sum - 1.0/3.0;
fprintf(file, "%d %g\n", sampleCount, sqrt(sumError * sumError));
}
}
void monteCarloPLAIN()
{
double res, err;
double xl[3] = {0,0,0};
double xu[3] = {M_PI, M_PI, M_PI};
const gsl_rng_type *T;
gsl_rng *r;
gsl_monte_function G;
}
int main(void)
{
// printf("Sum: %g\n", sum);
return 0;
} |
section\<open>Model of the negation of the Continuum Hypothesis\<close>
theory Not_CH
imports
Cardinal_Preservation
begin
text\<open>We are taking advantage that the poset of finite functions is absolute,
and thus we work with the unrelativized \<^term>\<open>Fn\<close>. But it would have been more
appropriate to do the following using the relative \<^term>\<open>Fn_rel\<close>. As it turns
out, the present theory was developed prior to having \<^term>\<open>Fn\<close> relativized!
We also note that \<^term>\<open>Fn(\<omega>,\<kappa>\<times>\<omega>,2)\<close> is separative, i.e. each \<^term>\<open>X \<in> Fn(\<omega>,\<kappa>\<times>\<omega>,2)\<close>
has two incompatible extensions; therefore we may recover part of our previous theorem
@{thm [source] extensions_of_ctms_ZF}. But that result also included the possibility
of not having $\AC$ in the ground model, which would not be sensible in a context
where the cardinality of the continuum is under discussion. It is also the case that
@{thm [source] extensions_of_ctms_ZF} was historically our first formalized result
(with a different proof) that showed the forcing machinery had all of its elements
in place.\<close>
abbreviation
Add_subs :: "i \<Rightarrow> i" where
"Add_subs(\<kappa>) \<equiv> Fn(\<omega>,\<kappa>\<times>\<omega>,2)"
abbreviation
Add_le :: "i \<Rightarrow> i" where
"Add_le(\<kappa>) \<equiv> Fnle(\<omega>,\<kappa> \<times> \<omega>,2)"
lemma (in M_aleph) Aleph_rel2_closed[intro,simp]: "M(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>)"
using nat_into_Ord by simp
locale M_master = M_cohen + M_library +
assumes
UN_lepoll_assumptions:
"M(A) \<Longrightarrow> M(b) \<Longrightarrow> M(f) \<Longrightarrow> M(A') \<Longrightarrow> separation(M, \<lambda>y. \<exists>x\<in>A'. y = \<langle>x, \<mu> i. x\<in>if_range_F_else_F((`)(A), b, f, i)\<rangle>)"
subsection\<open>Non-absolute concepts between extensions\<close>
sublocale M_master \<subseteq> M_Pi_replacement
by unfold_locales
locale M_master_sub = M_master + N:M_aleph N for N +
assumes
M_imp_N: "M(x) \<Longrightarrow> N(x)" and
Ord_iff: "Ord(x) \<Longrightarrow> M(x) \<longleftrightarrow> N(x)"
sublocale M_master_sub \<subseteq> M_N_Perm
using M_imp_N by unfold_locales
context M_master_sub
begin
lemma cardinal_rel_le_cardinal_rel: "M(X) \<Longrightarrow> |X|\<^bsup>N\<^esup> \<le> |X|\<^bsup>M\<^esup>"
using M_imp_N N.lepoll_rel_cardinal_rel_le[OF lepoll_rel_transfer Card_rel_is_Ord]
cardinal_rel_eqpoll_rel[THEN eqpoll_rel_sym, THEN eqpoll_rel_imp_lepoll_rel]
by simp
lemma Aleph_rel_sub_closed: "Ord(\<alpha>) \<Longrightarrow> M(\<alpha>) \<Longrightarrow> N(\<aleph>\<^bsub>\<alpha>\<^esub>\<^bsup>M\<^esup>)"
using Ord_iff[THEN iffD1, OF Card_rel_Aleph_rel[THEN Card_rel_is_Ord]]
by simp
lemma Card_rel_imp_Card_rel: "Card\<^bsup>N\<^esup>(\<kappa>) \<Longrightarrow> M(\<kappa>) \<Longrightarrow> Card\<^bsup>M\<^esup>(\<kappa>)"
using N.Card_rel_is_Ord[of \<kappa>] M_imp_N Ord_cardinal_rel_le[of \<kappa>]
cardinal_rel_le_cardinal_rel[of \<kappa>] le_anti_sym
unfolding Card_rel_def by auto
lemma csucc_rel_le_csucc_rel:
assumes "Ord(\<kappa>)" "M(\<kappa>)"
shows "(\<kappa>\<^sup>+)\<^bsup>M\<^esup> \<le> (\<kappa>\<^sup>+)\<^bsup>N\<^esup>"
proof -
note assms
moreover from this
have "N(L) \<and> Card\<^bsup>N\<^esup>(L) \<and> \<kappa> < L \<Longrightarrow> M(L) \<and> Card\<^bsup>M\<^esup>(L) \<and> \<kappa> < L"
(is "?P(L) \<Longrightarrow> ?Q(L)") for L
using M_imp_N Ord_iff[THEN iffD2, of L] N.Card_rel_is_Ord lt_Ord
Card_rel_imp_Card_rel by auto
moreover from assms
have "N((\<kappa>\<^sup>+)\<^bsup>N\<^esup>)" "Card\<^bsup>N\<^esup>((\<kappa>\<^sup>+)\<^bsup>N\<^esup>)" "\<kappa> < (\<kappa>\<^sup>+)\<^bsup>N\<^esup>"
using N.lt_csucc_rel[of \<kappa>] N.Card_rel_csucc_rel[of \<kappa>] M_imp_N by simp_all
ultimately
show ?thesis
using M_imp_N Least_antitone[of _ ?P ?Q] unfolding csucc_rel_def by blast
qed
lemma Aleph_rel_le_Aleph_rel: "Ord(\<alpha>) \<Longrightarrow> M(\<alpha>) \<Longrightarrow> \<aleph>\<^bsub>\<alpha>\<^esub>\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>\<alpha>\<^esub>\<^bsup>N\<^esup>"
proof (induct rule:trans_induct3)
case 0
then
show ?case
using Aleph_rel_zero N.Aleph_rel_zero by simp
next
case (succ x)
then
have "\<aleph>\<^bsub>x\<^esub>\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>x\<^esub>\<^bsup>N\<^esup>" "Ord(x)" "M(x)" by simp_all
moreover from this
have "(\<aleph>\<^bsub>x\<^esub>\<^bsup>M\<^esup>\<^sup>+)\<^bsup>M\<^esup> \<le> (\<aleph>\<^bsub>x\<^esub>\<^bsup>N\<^esup>\<^sup>+)\<^bsup>M\<^esup>"
using M_imp_N Ord_iff[THEN iffD2, OF N.Card_rel_is_Ord]
by (intro csucc_rel_le_mono) simp_all
moreover from calculation
have "(\<aleph>\<^bsub>x\<^esub>\<^bsup>N\<^esup>\<^sup>+)\<^bsup>M\<^esup> \<le> (\<aleph>\<^bsub>x\<^esub>\<^bsup>N\<^esup>\<^sup>+)\<^bsup>N\<^esup>"
using M_imp_N N.Card_rel_is_Ord Ord_iff[THEN iffD2, OF N.Card_rel_is_Ord]
by (intro csucc_rel_le_csucc_rel) auto
ultimately
show ?case
using M_imp_N Aleph_rel_succ N.Aleph_rel_succ csucc_rel_le_csucc_rel
le_trans by auto
next
case (limit x)
then
show ?case
using M_imp_N Aleph_rel_limit N.Aleph_rel_limit
by simp (blast dest: transM intro!:le_implies_UN_le_UN)
qed
end \<comment> \<open>\<^locale>\<open>M_master_sub\<close>\<close>
lemmas (in M_ZF2_trans) sep_instances =
separation_ifrangeF_body separation_ifrangeF_body2 separation_ifrangeF_body3
separation_ifrangeF_body4 separation_ifrangeF_body5 separation_ifrangeF_body6
separation_ifrangeF_body7 separation_cardinal_rel_lesspoll_rel
separation_is_dcwit_body separation_cdltgamma separation_cdeqgamma
lemmas (in M_ZF2_trans) repl_instances = lam_replacement_inj_rel
sublocale M_ZFC2_ground_notCH_trans \<subseteq> M_master "##M"
using replacement_trans_apply_image
by unfold_locales (simp_all add:repl_instances sep_instances del:setclass_iff
add: transrec_replacement_def wfrec_replacement_def)
sublocale M_ZFC2_trans \<subseteq> M_Pi_replacement "##M"
by unfold_locales
subsection\<open>Cohen forcing is ccc\<close>
context M_ctm2_AC
begin
lemma ccc_Add_subs_Aleph_2: "ccc\<^bsup>M\<^esup>(Add_subs(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>),Add_le(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>))"
proof -
interpret M_add_reals "##M" "\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega>"
by unfold_locales blast
show ?thesis
using ccc_rel_Fn_nat by fast
qed
end \<comment> \<open>\<^locale>\<open>M_ctm2_AC\<close>\<close>
sublocale G_generic3_AC \<subseteq> M_master_sub "##M" "##(M[G])"
using M_subset_MG[OF one_in_G] generic Ord_MG_iff
by unfold_locales auto
lemma (in M_trans) mem_F_bound4:
fixes F A
defines "F \<equiv> (`)"
shows "x\<in>F(A,c) \<Longrightarrow> c \<in> (range(f) \<union> domain(A))"
using apply_0 unfolding F_def
by (cases "M(c)", auto simp:F_def)
lemma (in M_trans) mem_F_bound5:
fixes F A
defines "F \<equiv> \<lambda>_ x. A`x "
shows "x\<in>F(A,c) \<Longrightarrow> c \<in> (range(f) \<union> domain(A))"
using apply_0 unfolding F_def
by (cases "M(c)", auto simp:F_def drSR_Y_def dC_F_def)
sublocale M_ctm2_AC \<subseteq> M_replacement_lepoll "##M" "(`)"
using UN_lepoll_assumptions lam_replacement_apply lam_replacement_inj_rel
mem_F_bound4 apply_0 lam_replacement_minimum
unfolding lepoll_assumptions_defs
proof (unfold_locales,
rule_tac [3] lam_Least_assumption_general[where U=domain, OF _ mem_F_bound4], simp_all)
fix A i x
assume "A \<in> M" "x \<in> M" "x \<in> A ` i"
then
show "i \<in> M"
using apply_0[of i A] transM[of _ "domain(A)", simplified]
by force
qed
context G_generic3_AC begin
context
includes G_generic1_lemmas
begin
lemma G_in_MG: "G \<in> M[G]"
using G_in_Gen_Ext
by blast
lemma ccc_preserves_Aleph_succ:
assumes "ccc\<^bsup>M\<^esup>(\<bbbP>,leq)" "Ord(z)" "z \<in> M"
shows "Card\<^bsup>M[G]\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)"
proof (rule ccontr)
assume "\<not> Card\<^bsup>M[G]\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)"
moreover
note \<open>z \<in> M\<close> \<open>Ord(z)\<close>
moreover from this
have "Ord(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)"
using Card_rel_is_Ord by fastforce
ultimately
obtain \<alpha> f where "\<alpha> < \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>" "f \<in> surj\<^bsup>M[G]\<^esup>(\<alpha>, \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)"
using ext.lt_surj_rel_empty_imp_Card_rel M_subset_MG[OF one_in_G]
by force
moreover from this and \<open>z\<in>M\<close> \<open>Ord(z)\<close>
have "\<alpha> \<in> M" "f \<in> M[G]"
using ext.trans_surj_rel_closed
by (auto dest:transM ext.transM dest!:ltD)
moreover
note \<open>ccc\<^bsup>M\<^esup>(\<bbbP>,leq)\<close> \<open>z\<in>M\<close>
ultimately
obtain F where "F:\<alpha>\<rightarrow>Pow\<^bsup>M\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)" "\<forall>\<beta>\<in>\<alpha>. f`\<beta> \<in> F`\<beta>" "\<forall>\<beta>\<in>\<alpha>. |F`\<beta>|\<^bsup>M\<^esup> \<le> \<omega>"
"F \<in> M"
using ccc_fun_approximation_lemma[of \<alpha> "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>" f]
ext.mem_surj_abs[of f \<alpha> "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>"] \<open>Ord(z)\<close>
surj_is_fun[of f \<alpha> "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>"] by auto
then
have "\<beta> \<in> \<alpha> \<Longrightarrow> |F`\<beta>|\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>0\<^esub>\<^bsup>M\<^esup>" for \<beta>
using Aleph_rel_zero by simp
have "w \<in> F ` x \<Longrightarrow> x \<in> M" for w x
proof -
fix w x
assume "w \<in> F`x"
then
have "x \<in> domain(F)"
using apply_0 by auto
with \<open>F:\<alpha>\<rightarrow>Pow\<^bsup>M\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)\<close> \<open>\<alpha> \<in> M\<close>
show "x \<in> M" using domain_of_fun
by (auto dest:transM)
qed
with \<open>\<alpha> \<in> M\<close> \<open>F:\<alpha>\<rightarrow>Pow\<^bsup>M\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)\<close> \<open>F\<in>M\<close>
interpret M_cardinal_UN_lepoll "##M" "\<lambda>\<beta>. F`\<beta>" \<alpha>
using UN_lepoll_assumptions lepoll_assumptions
lam_replacement_apply lam_replacement_inj_rel lam_replacement_minimum
proof (unfold_locales, auto dest:transM simp del:if_range_F_else_F_def)
fix f b
assume "b\<in>M" "f\<in>M"
with \<open>F\<in>M\<close>
show "lam_replacement(##M, \<lambda>x. \<mu> i. x \<in> if_range_F_else_F((`)(F), b, f, i))"
using UN_lepoll_assumptions mem_F_bound5
by (rule_tac lam_Least_assumption_general[where U="domain", OF _ mem_F_bound5])
simp_all
qed
from \<open>\<alpha> < \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>\<close> \<open>\<alpha> \<in> M\<close> \<open>Ord(z)\<close> \<open>z\<in>M\<close>
have "\<alpha> \<lesssim>\<^bsup>M\<^esup> \<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"
using
cardinal_rel_lt_csucc_rel_iff[of "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>" \<alpha>]
le_Card_rel_iff[of "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>" \<alpha>]
Aleph_rel_succ[of z] Card_rel_lt_iff[of \<alpha> "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>"]
lt_Ord[of \<alpha> "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>"]
Card_rel_csucc_rel[of "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"]
Card_rel_Aleph_rel[THEN Card_rel_is_Ord]
by simp
with \<open>\<alpha> < \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>\<close> \<open>\<forall>\<beta>\<in>\<alpha>. |F`\<beta>|\<^bsup>M\<^esup> \<le> \<omega>\<close> \<open>\<alpha> \<in> M\<close> assms
have "|\<Union>\<beta>\<in>\<alpha>. F`\<beta>|\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"
using InfCard_rel_Aleph_rel[of z] Aleph_rel_zero
subset_imp_lepoll_rel[THEN lepoll_rel_imp_cardinal_rel_le,
of "\<Union>\<beta>\<in>\<alpha>. F`\<beta>" "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"] Aleph_rel_succ
Aleph_rel_increasing[THEN leI, THEN [2] le_trans, of _ 0 z]
Ord_0_lt_iff[THEN iffD1, of z]
by (cases "0<z"; rule_tac lepoll_rel_imp_cardinal_rel_UN_le) (auto, force)
moreover
note \<open>z\<in>M\<close> \<open>Ord(z)\<close>
moreover from \<open>\<forall>\<beta>\<in>\<alpha>. f`\<beta> \<in> F`\<beta>\<close> \<open>f \<in> surj\<^bsup>M[G]\<^esup>(\<alpha>, \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)\<close>
\<open>\<alpha> \<in> M\<close> \<open>f \<in> M[G]\<close> and this
have "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup> \<subseteq> (\<Union>\<beta>\<in>\<alpha>. F`\<beta>)"
using ext.mem_surj_abs by (force simp add:surj_def)
moreover from \<open>F \<in> M\<close> \<open>\<alpha> \<in> M\<close>
have "(\<Union>x\<in>\<alpha>. F ` x) \<in> M"
using j.B_replacement
by (intro Union_closed[simplified] RepFun_closed[simplified])
(auto dest:transM)
ultimately
have "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"
using subset_imp_le_cardinal_rel[of "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>" "\<Union>\<beta>\<in>\<alpha>. F`\<beta>"]
le_trans by auto
with assms
show "False"
using Aleph_rel_increasing not_le_iff_lt[of "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>" "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"]
Card_rel_Aleph_rel[THEN Card_rel_is_Ord]
by auto
qed
end \<comment> \<open>bundle G\_generic1\_lemmas\<close>
end \<comment> \<open>\<^locale>\<open>G_generic3_AC\<close>\<close>
context M_ctm1
begin
abbreviation
Add :: "i" where
"Add \<equiv> Fn(\<omega>, \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega>, 2)"
end \<comment> \<open>\<^locale>\<open>M_ctm1\<close>\<close>
locale add_generic3 = G_generic3_AC "Fn(\<omega>, \<aleph>\<^bsub>2\<^esub>\<^bsup>##M\<^esup> \<times> \<omega>, 2)" "Fnle(\<omega>, \<aleph>\<^bsub>2\<^esub>\<^bsup>##M\<^esup> \<times> \<omega>, 2)" 0
sublocale add_generic3 \<subseteq> cohen_data \<omega> "\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega>" 2 by unfold_locales auto
context add_generic3
begin
notation Leq (infixl "\<preceq>" 50)
notation Incompatible (infixl "\<bottom>" 50)
lemma Add_subs_preserves_Aleph_succ: "Ord(z) \<Longrightarrow> z\<in>M \<Longrightarrow> Card\<^bsup>M[G]\<^esup>(\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>)"
using ccc_preserves_Aleph_succ ccc_Add_subs_Aleph_2
by auto
lemma Aleph_rel_nats_MG_eq_Aleph_rel_nats_M:
includes G_generic1_lemmas
assumes "z \<in> \<omega>"
shows "\<aleph>\<^bsub>z\<^esub>\<^bsup>M[G]\<^esup> = \<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>"
using assms
proof (induct)
case 0
show ?case
by(rule trans[OF ext.Aleph_rel_zero Aleph_rel_zero[symmetric]])
next
case (succ z)
then
have "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup> \<le> \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M[G]\<^esup>"
using Aleph_rel_le_Aleph_rel nat_into_M by simp
moreover from \<open>z \<in> \<omega>\<close>
have "\<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup> \<in> M[G]" "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup> \<in> M[G]"
using nat_into_M by simp_all
moreover from this and \<open>\<aleph>\<^bsub>z\<^esub>\<^bsup>M[G]\<^esup> = \<aleph>\<^bsub>z\<^esub>\<^bsup>M\<^esup>\<close> \<open>z \<in> \<omega>\<close>
have "\<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M[G]\<^esup> \<le> \<aleph>\<^bsub>succ(z)\<^esub>\<^bsup>M\<^esup>"
using ext.Aleph_rel_succ nat_into_M
Add_subs_preserves_Aleph_succ[THEN ext.csucc_rel_le, of z]
Aleph_rel_increasing[of z "succ(z)"]
by simp
ultimately
show ?case using le_anti_sym by blast
qed
abbreviation
f_G :: "i" (\<open>f\<^bsub>G\<^esub>\<close>) where
"f\<^bsub>G\<^esub> \<equiv> \<Union>G"
abbreviation
dom_dense :: "i \<Rightarrow> i" where
"dom_dense(x) \<equiv> {p \<in> Add . x \<in> domain(p) }"
declare (in M_ctm2_AC) Fn_nat_closed[simplified setclass_iff, simp, intro]
declare (in M_ctm2_AC) Fnle_nat_closed[simp del, rule del,
simplified setclass_iff, simp, intro]
declare (in M_ctm2_AC) cexp_rel_closed[simplified setclass_iff, simp, intro]
declare (in G_generic3_AC) ext.cexp_rel_closed[simplified setclass_iff, simp, intro]
lemma dom_dense_closed[intro,simp]: "x \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega> \<Longrightarrow> dom_dense(x) \<in> M"
using separation_in_domain[of x] nat_into_M
by (rule_tac separation_closed[simplified], blast dest:transM) simp
lemma domain_f_G: assumes "x \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>" "y \<in> \<omega>"
shows "\<langle>x, y\<rangle> \<in> domain(f\<^bsub>G\<^esub>)"
proof -
from assms
have "Add = Fn\<^bsup>M\<^esup>(\<omega>,\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>\<times>\<omega>,2)"
using Fn_nat_abs by auto
moreover from this
have "Fnle(\<omega>,\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>\<times>\<omega>,2) = Fnle\<^bsup>M\<^esup>(\<omega>,\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>\<times>\<omega>,2)"
unfolding Fnle_rel_def Fnle_def by auto
moreover from calculation assms
have "dense(dom_dense(\<langle>x, y\<rangle>))"
using dense_dom_dense[of "\<langle>x,y\<rangle>" "\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>\<times>\<omega>" \<omega> 2] InfCard_rel_nat
unfolding dense_def by auto
with assms
obtain p where "p\<in>dom_dense(\<langle>x, y\<rangle>)" "p\<in>G"
using M_generic_denseD[of "dom_dense(\<langle>x, y\<rangle>)"]
by auto
then
show "\<langle>x, y\<rangle> \<in> domain(f\<^bsub>G\<^esub>)" by blast
qed
lemma f_G_funtype:
includes G_generic1_lemmas
shows "f\<^bsub>G\<^esub> : \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega> \<rightarrow> 2"
using generic domain_f_G Pi_iff Un_filter_is_function generic
subset_trans[OF filter_subset_notion Fn_nat_subset_Pow]
by force
lemma inj_dense_closed[intro,simp]:
"w \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<Longrightarrow> x \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<Longrightarrow> inj_dense(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>,2,w,x) \<in> M"
using transM[OF _ Aleph_rel2_closed] separation_conj separation_bex
lam_replacement_product
separation_in lam_replacement_fst lam_replacement_snd lam_replacement_constant
lam_replacement_hcomp[OF lam_replacement_snd lam_replacement_restrict']
separation_bex separation_conj
by simp
lemma Aleph_rel2_new_reals:
assumes "w \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>" "x \<in> \<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>" "w \<noteq> x"
shows "(\<lambda>n\<in>\<omega>. f\<^bsub>G\<^esub> ` \<langle>w, n\<rangle>) \<noteq> (\<lambda>n\<in>\<omega>. f\<^bsub>G\<^esub> ` \<langle>x, n\<rangle>)"
proof -
have "0\<in>2" by auto
with assms
have "dense(inj_dense(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>,2,w,x))"
unfolding dense_def using dense_inj_dense by auto
with assms
obtain p where "p\<in>inj_dense(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>,2,w,x)" "p\<in>G"
using M_generic_denseD[of "inj_dense(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>,2,w,x)"]
by blast
then
obtain n where "n \<in> \<omega>" "\<langle>\<langle>w, n\<rangle>, 1\<rangle> \<in> p" "\<langle>\<langle>x, n\<rangle>, 0\<rangle> \<in> p"
by blast
moreover from this and \<open>p\<in>G\<close>
have "\<langle>\<langle>w, n\<rangle>, 1\<rangle> \<in> f\<^bsub>G\<^esub>" "\<langle>\<langle>x, n\<rangle>, 0\<rangle> \<in> f\<^bsub>G\<^esub>" by auto
moreover from calculation
have "f\<^bsub>G\<^esub> ` \<langle>w, n\<rangle> = 1" "f\<^bsub>G\<^esub> ` \<langle>x, n\<rangle> = 0"
using f_G_funtype apply_equality
by auto
ultimately
have "(\<lambda>n\<in>\<omega>. f\<^bsub>G\<^esub> ` \<langle>w, n\<rangle>) ` n \<noteq> (\<lambda>n\<in>\<omega>. f\<^bsub>G\<^esub> ` \<langle>x, n\<rangle>) ` n"
by simp
then
show ?thesis by fastforce
qed
definition
h_G :: "i" (\<open>h\<^bsub>G\<^esub>\<close>) where
"h\<^bsub>G\<^esub> \<equiv> \<lambda>\<alpha>\<in>\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>. \<lambda>n\<in>\<omega>. f\<^bsub>G\<^esub>`\<langle>\<alpha>,n\<rangle>"
lemma h_G_in_MG[simp]:
includes G_generic1_lemmas
shows "h\<^bsub>G\<^esub> \<in> M[G]"
using ext.curry_closed[unfolded curry_def] G_in_MG
unfolding h_G_def
by simp
lemma h_G_inj_Aleph_rel2_reals: "h\<^bsub>G\<^esub> \<in> inj\<^bsup>M[G]\<^esup>(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>, \<omega> \<rightarrow>\<^bsup>M[G]\<^esup> 2)"
using Aleph_rel_sub_closed f_G_funtype G_in_MG Aleph_rel_sub_closed
ext.curry_rel_exp[unfolded curry_def] ext.curry_closed[unfolded curry_def]
ext.mem_function_space_rel_abs
by (intro ext.mem_inj_abs[THEN iffD2],simp_all)
(auto simp: inj_def h_G_def dest:Aleph_rel2_new_reals)
lemma Aleph2_extension_le_continuum_rel:
includes G_generic1_lemmas
shows "\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup> \<le> 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>"
proof -
have "\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup> \<lesssim>\<^bsup>M[G]\<^esup> \<omega> \<rightarrow>\<^bsup>M[G]\<^esup> 2"
using ext.def_lepoll_rel[of "\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>" "\<omega> \<rightarrow>\<^bsup>M[G]\<^esup> 2"]
h_G_inj_Aleph_rel2_reals Aleph_rel_nats_MG_eq_Aleph_rel_nats_M
by auto
moreover from calculation
have "\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup> \<lesssim>\<^bsup>M[G]\<^esup> |\<omega> \<rightarrow>\<^bsup>M[G]\<^esup> 2|\<^bsup>M[G]\<^esup>"
using ext.lepoll_rel_imp_lepoll_rel_cardinal_rel by simp
ultimately
have "|\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup>|\<^bsup>M[G]\<^esup> \<le> 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>"
using ext.lepoll_rel_imp_cardinal_rel_le[of "\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup>" "\<omega> \<rightarrow>\<^bsup>M[G]\<^esup> 2",
OF _ _ ext.function_space_rel_closed]
ext.Aleph_rel_zero
unfolding cexp_rel_def by simp
then
show "\<aleph>\<^bsub>2\<^esub>\<^bsup>M[G]\<^esup> \<le> 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>"
using ext.Card_rel_Aleph_rel[of 2, THEN ext.Card_rel_cardinal_rel_eq]
by simp
qed
lemma Aleph_rel_lt_continuum_rel: "\<aleph>\<^bsub>1\<^esub>\<^bsup>M[G]\<^esup> < 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>"
using Aleph2_extension_le_continuum_rel
ext.Aleph_rel_increasing[of 1 2] le_trans by auto
corollary not_CH: "\<aleph>\<^bsub>1\<^esub>\<^bsup>M[G]\<^esup> \<noteq> 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>"
using Aleph_rel_lt_continuum_rel by auto
end \<comment> \<open>\<^locale>\<open>add_generic3\<close>\<close>
subsection\<open>Models of fragments of $\ZFC + \neg \CH$\<close>
definition
ContHyp :: "o" where
"ContHyp \<equiv> \<aleph>\<^bsub>1\<^esub> = 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^esup>"
relativize functional "ContHyp" "ContHyp_rel"
notation ContHyp_rel (\<open>CH\<^bsup>_\<^esup>\<close>)
relationalize "ContHyp_rel" "is_ContHyp"
context M_ZF_library
begin
is_iff_rel for "ContHyp"
using is_cexp_iff is_Aleph_iff[of 0] is_Aleph_iff[of 1]
unfolding is_ContHyp_def ContHyp_rel_def
by (auto simp del:setclass_iff) (rule rexI[of _ _ M, OF _ nonempty], auto)
end \<comment> \<open>\<^locale>\<open>M_ZF_library\<close>\<close>
synthesize "is_ContHyp" from_definition assuming "nonempty"
arity_theorem for "is_ContHyp_fm"
notation is_ContHyp_fm (\<open>\<cdot>CH\<cdot>\<close>)
theorem ctm_of_not_CH:
assumes
"M \<approx> \<omega>" "Transset(M)" "M \<Turnstile> ZC \<union> {\<cdot>Replacement(p)\<cdot> . p \<in> overhead_notCH}"
"\<Phi> \<subseteq> formula" "M \<Turnstile> { \<cdot>Replacement(ground_repl_fm(\<phi>))\<cdot> . \<phi> \<in> \<Phi>}"
shows
"\<exists>N.
M \<subseteq> N \<and> N \<approx> \<omega> \<and> Transset(N) \<and> N \<Turnstile> ZC \<union> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>} \<union> { \<cdot>Replacement(\<phi>)\<cdot> . \<phi> \<in> \<Phi>} \<and>
(\<forall>\<alpha>. Ord(\<alpha>) \<longrightarrow> (\<alpha> \<in> M \<longleftrightarrow> \<alpha> \<in> N))"
proof -
from \<open>M \<Turnstile> ZC \<union> {\<cdot>Replacement(p)\<cdot> . p \<in> overhead_notCH}\<close>
interpret M_ZFC3 M
using M_satT_overhead_imp_M_ZF3 unfolding overhead_notCH_def by force
from \<open>M \<Turnstile> ZC \<union> {\<cdot>Replacement(p)\<cdot> . p \<in> overhead_notCH}\<close> \<open>Transset(M)\<close>
interpret M_ZF_ground_notCH_trans M
using M_satT_imp_M_ZF_ground_notCH_trans
unfolding ZC_def by auto
from \<open>M \<approx> \<omega>\<close>
obtain enum where "enum \<in> bij(\<omega>,M)"
using eqpoll_sym unfolding eqpoll_def by blast
then
interpret M_ctm3_AC M enum by unfold_locales
interpret cohen_data \<omega> "\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup> \<times> \<omega>" 2 by unfold_locales auto
have "Add \<in> M" "Add_le(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>) \<in> M"
using nat_into_M Aleph_rel_closed M_nat cartprod_closed Fn_nat_closed Fnle_nat_closed
by simp_all
then
interpret forcing_data1 "Add" "Add_le(\<aleph>\<^bsub>2\<^esub>\<^bsup>M\<^esup>)" 0 M enum
by unfold_locales simp_all
obtain G where "M_generic(G)"
using generic_filter_existence[OF one_in_P]
by auto
moreover from this
interpret add_generic3 M enum G by unfold_locales
have "\<not> (\<aleph>\<^bsub>1\<^esub>\<^bsup>M[G]\<^esup> = 2\<^bsup>\<up>\<aleph>\<^bsub>0\<^esub>\<^bsup>M[G]\<^esup>,M[G]\<^esup>)"
using not_CH .
then
have "M[G], [] \<Turnstile> \<cdot>\<not>\<cdot>CH\<cdot>\<cdot>"
using ext.is_ContHyp_iff
by (simp add:ContHyp_rel_def)
then
have "M[G] \<Turnstile> ZC \<union> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>}"
using ext.M_satT_ZC by auto
moreover
have "Transset(M[G])" using Transset_MG .
moreover
have "M \<subseteq> M[G]" using M_subset_MG[OF one_in_G] generic by simp
moreover
note \<open>M \<Turnstile> { \<cdot>Replacement(ground_repl_fm(\<phi>))\<cdot> . \<phi> \<in> \<Phi>}\<close> \<open>\<Phi> \<subseteq> formula\<close>
ultimately
show ?thesis
using Ord_MG_iff MG_eqpoll_nat satT_ground_repl_fm_imp_satT_ZF_replacement_fm[of \<Phi>]
by (rule_tac x="M[G]" in exI, blast)
qed
lemma ZF_replacement_overhead_sub_ZFC: "{\<cdot>Replacement(p)\<cdot> . p \<in> overhead} \<subseteq> ZFC"
using overhead_type unfolding ZFC_def ZF_def ZF_schemes_def by auto
lemma ZF_replacement_overhead_notCH_sub_ZFC: "{\<cdot>Replacement(p)\<cdot> . p \<in> overhead_notCH} \<subseteq> ZFC"
using overhead_notCH_type unfolding ZFC_def ZF_def ZF_schemes_def by auto
lemma ZF_replacement_overhead_CH_sub_ZFC: "{\<cdot>Replacement(p)\<cdot> . p \<in> overhead_CH} \<subseteq> ZFC"
using overhead_CH_type unfolding ZFC_def ZF_def ZF_schemes_def by auto
corollary ctm_ZFC_imp_ctm_not_CH:
assumes
"M \<approx> \<omega>" "Transset(M)" "M \<Turnstile> ZFC"
shows
"\<exists>N.
M \<subseteq> N \<and> N \<approx> \<omega> \<and> Transset(N) \<and> N \<Turnstile> ZFC \<union> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>} \<and>
(\<forall>\<alpha>. Ord(\<alpha>) \<longrightarrow> (\<alpha> \<in> M \<longleftrightarrow> \<alpha> \<in> N))"
proof-
from assms
have "\<exists>N.
M \<subseteq> N \<and>
N \<approx> \<omega> \<and>
Transset(N) \<and>
N \<Turnstile> ZC \<and> N \<Turnstile> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>} \<and> N \<Turnstile> {\<cdot>Replacement(x)\<cdot> . x \<in> formula} \<and> (\<forall>\<alpha>. Ord(\<alpha>) \<longrightarrow> \<alpha> \<in> M \<longleftrightarrow> \<alpha> \<in> N)"
using ctm_of_not_CH[of M formula] satT_ZFC_imp_satT_ZC[of M]
satT_mono[OF _ ground_repl_fm_sub_ZFC, of M]
satT_mono[OF _ ZF_replacement_overhead_notCH_sub_ZFC, of M]
satT_mono[OF _ ZF_replacement_fms_sub_ZFC, of M]
by (simp add: satT_Un_iff)
then
obtain N where "N \<Turnstile> ZC" "N \<Turnstile> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>}" "N \<Turnstile> {\<cdot>Replacement(x)\<cdot> . x \<in> formula}"
"M \<subseteq> N" "N \<approx> \<omega>" "Transset(N)" "(\<forall>\<alpha>. Ord(\<alpha>) \<longrightarrow> \<alpha> \<in> M \<longleftrightarrow> \<alpha> \<in> N)"
by auto
moreover from this
have "N \<Turnstile> ZFC"
using satT_ZC_ZF_replacement_imp_satT_ZFC
by auto
moreover from this and \<open>N \<Turnstile> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>}\<close>
have "N \<Turnstile> ZFC \<union> {\<cdot>\<not>\<cdot>CH\<cdot>\<cdot>}"
by auto
ultimately
show ?thesis by auto
qed
end |
C From: VEGA::SANDER "Chris Sander" 9-JUL-1987 23:21
C To: HOLM
C Subj: U3B - I have other routines that go with it + for testing
SUBROUTINE U3B(W,X,Y,N,MODE,RMS,U,T,IER)
C.... this version copied July 1986. DO NOT REDISTRIBUTE.
C.... If you want this routine, ask Wolfgang Kabsch !!
C**** CALCULATES A BEST ROTATION & TRANSLATION BETWEEN TWO VECTOR SETS
C**** SUCH THAT U*X+T IS THE CLOSEST APPROXIMATION TO Y.
C**** THE CALCULATED BEST SUPERPOSITION MAY NOT BE UNIQUE AS INDICATED
C**** BY A RESULT VALUE IER=-1. HOWEVER IT IS GARANTIED THAT WITHIN
C**** NUMERICAL TOLERANCES NO OTHER SUPERPOSITION EXISTS GIVING A
C**** SMALLER VALUE FOR RMS.
C**** THIS VERSION OF THE ALGORITHM IS OPTIMIZED FOR THREE-DIMENSIONAL
C**** REAL VECTOR SPACE.
C**** USE OF THIS ROUTINE IS RESTRICTED TO NON-PROFIT ACADEMIC
C**** APPLICATIONS.
C**** PLEASE REPORT ERRORS TO
C**** PROGRAMMER: W.KABSCH MAX-PLANCK-INSTITUTE FOR MEDICAL RESEARCH
C JAHNSTRASSE 29, 6900 HEIDELBERG, FRG.
C**** REFERENCES: W.KABSCH ACTA CRYST.(1978).A34,827-828
C W.KABSCH ACTA CRYST.(1976).A32,922-923
C
C W - W(M) IS WEIGHT FOR ATOM PAIR # M (GIVEN)
C X - X(I,M) ARE COORDINATES OF ATOM # M IN SET X (GIVEN)
C Y - Y(I,M) ARE COORDINATES OF ATOM # M IN SET Y (GIVEN)
C N - N IS NUMBER OF ATOM PAIRS (GIVEN)
C MODE - 0:CALCULATE RMS ONLY (GIVEN)
C 1:CALCULATE RMS,U,T (TAKES LONGER)
C RMS - SUM OF W*(UX+T-Y)**2 OVER ALL ATOM PAIRS (RESULT)
C U - U(I,J) IS ROTATION MATRIX FOR BEST SUPERPOSITION (RESULT)
C T - T(I) IS TRANSLATION VECTOR FOR BEST SUPERPOSITION (RESULT)
C IER - 0: A UNIQUE OPTIMAL SUPERPOSITION HAS BEEN DETERMINED(RESULT)
C -1: SUPERPOSITION IS NOT UNIQUE BUT OPTIMAL
C -2: NO RESULT OBTAINED BECAUSE OF NEGATIVE WEIGHTS W
C OR ALL WEIGHTS EQUAL TO ZERO.
C
C-----------------------------------------------------------------------
INTEGER IP(9),IP2312(4),I,J,K,L,M1,M,IER,MODE
REAL W(N),X(3,N),Y(3,N),U(3,3),T(3),RMS,SIGMA
REAL*8 H,G,D, SQRTH,CTH,STH, SS(6), A(3,3),
1 SS1, SS2, SS3, SS4, SS5, SS6, P
REAL*8 R(3,3),XC(3),YC(3),WC,B(3,3),E0,
1 E(3),E1,E2,E3,SPUR,DET,COF,TOL,
2 RR(6),RR1,RR2,RR3,RR4,RR5,RR6,
3 ZERO,ONE,TWO,THREE,SQRT3
EQUIVALENCE (RR1,RR(1)),(RR2,RR(2)),(RR3,RR(3)),
1 (RR4,RR(4)),(RR5,RR(5)),(RR6,RR(6)),
2 (SS1,SS(1)),(SS2,SS(2)),(SS3,SS(3)),
3 (SS4,SS(4)),(SS5,SS(5)),(SS6,SS(6)),
4 (E1,E(1)),(E2,E(2)),(E3,E(3))
DATA SQRT3,TOL/1.73205080756888D+00, 1.0D-2/
DATA ZERO,ONE,TWO,THREE/0.0D+00, 1.0D+00, 2.0D+00, 3.0D+00/
DATA IP/1,2,4, 2,3,5, 4,5,6/
DATA IP2312/2,3,1,2/
WC=ZERO
RMS=0.0
E0=ZERO
DO 1 I=1,3
XC(I)=ZERO
YC(I)=ZERO
T(I)=0.0
DO 1 J=1,3
D=ZERO
IF (I.EQ.J)D=ONE
U(I,J)=D
A(I,J)=D
1 R(I,J)=ZERO
IER=-1
IF (N.LT.1)RETURN
C**** DETERMINE CENTROIDS OF BOTH VECTOR SETS X AND Y
IER=-2
DO 2 M=1,N
IF (W(M).LT.0.0)RETURN
WC=WC+W(M)
DO 2 I=1,3
XC(I)=XC(I)+W(M)*X(I,M)
2 YC(I)=YC(I)+W(M)*Y(I,M)
IF (WC.LE.ZERO)RETURN
DO 3 I=1,3
XC(I)=XC(I)/WC
3 YC(I)=YC(I)/WC
C**** DETERMINE CORRELATION MATRIX R BETWEEN VECTOR SETS Y AND X
DO 4 M=1,N
DO 4 I=1,3
E0=E0+W(M)*((X(I,M)-XC(I))**2+(Y(I,M)-YC(I))**2)
D=W(M)*(Y(I,M)-YC(I))
DO 4 J=1,3
4 R(I,J)=R(I,J)+D*(X(J,M)-XC(J))
C**** CALCULATE DETERMINANT OF R(I,J)
DET=R(1,1)*(R(2,2)*R(3,3)-R(2,3)*R(3,2))
1 -R(1,2)*(R(2,1)*R(3,3)-R(2,3)*R(3,1))
2 +R(1,3)*(R(2,1)*R(3,2)-R(2,2)*R(3,1))
SIGMA=DET
C**** FORM UPPER TRIANGLE OF TRANSPOSED(R)*R
M=0
DO 5 J=1,3
DO 5 I=1,J
M=M+1
5 RR(M)=R(1,I)*R(1,J)+R(2,I)*R(2,J)+R(3,I)*R(3,J)
C***************** EIGENVALUES *****************************************
C**** FORM CHARACTERISTIC CUBIC X**3-3*SPUR*X**2+3*COF*X-DET=0
SPUR=(RR1+RR3+RR6)/THREE
COF=(RR3*RR6-RR5*RR5+RR1*RR6-RR4*RR4+RR1*RR3-RR2*RR2)/THREE
DET=DET*DET
DO 6 I=1,3
6 E(I)=SPUR
IF (SPUR.LE.ZERO)GO TO 40
C**** REDUCE CUBIC TO STANDARD FORM Y**3-3HY+2G=0 BY PUTTING X=Y+SPUR
D=SPUR*SPUR
H=D-COF
G=(SPUR*COF-DET)/TWO-SPUR*H
C**** SOLVE CUBIC. ROOTS ARE E1,E2,E3 IN DECREASING ORDER
IF (H.LE.ZERO)GO TO 8
SQRTH=SQRT(H)
C WRITE(*,*) H,G,SPUR, COF, DET
C IF (ABS(G).LT.1E12) GO TO 7
C WRITE(*,*) 'KUKKUU'
C D=H*H/G-G/H
C D=D*G*H
C GO TO 9
7 D=H*H*H-G*G
9 IF (D.LT.ZERO)D=ZERO
c the result of atan2 is undefined if both arguments are zero !
D=ATAN2(SQRT(D),-G)/THREE
CTH=SQRTH*COS(D)
STH=SQRTH*SQRT3*SIN(D)
E1=SPUR+CTH+CTH
E2=SPUR-CTH+STH
E3=SPUR-CTH-STH
IF (MODE)10,50,10
C.....HANDLE SPECIAL CASE OF 3 IDENTICAL ROOTS
8 IF (MODE)30,50,30
C**************** EIGENVECTORS *****************************************
10 DO 15 L=1,3,2
D=E(L)
SS1=(D-RR3)*(D-RR6)-RR5*RR5
SS2=(D-RR6)*RR2+RR4*RR5
SS3=(D-RR1)*(D-RR6)-RR4*RR4
SS4=(D-RR3)*RR4+RR2*RR5
SS5=(D-RR1)*RR5+RR2*RR4
SS6=(D-RR1)*(D-RR3)-RR2*RR2
J=1
IF (ABS(SS1).GE.ABS(SS3))GO TO 12
J=2
IF (ABS(SS3).GE.ABS(SS6))GO TO 13
11 J=3
GO TO 13
12 IF (ABS(SS1).LT.ABS(SS6))GO TO 11
13 D=ZERO
J=3*(J-1)
DO 14 I=1,3
K=IP(I+J)
A(I,L)=SS(K)
14 D=D+SS(K)*SS(K)
IF (D.GT.ZERO)D=ONE/SQRT(D)
DO 15 I=1,3
15 A(I,L)=A(I,L)*D
D=A(1,1)*A(1,3)+A(2,1)*A(2,3)+A(3,1)*A(3,3)
M1=3
M=1
IF ((E1-E2).GT.(E2-E3))GO TO 16
M1=1
M=3
16 P=ZERO
DO 17 I=1,3
A(I,M1)=A(I,M1)-D*A(I,M)
17 P=P+A(I,M1)**2
IF (P.LE.TOL)GO TO 19
P=ONE/SQRT(P)
DO 18 I=1,3
18 A(I,M1)=A(I,M1)*P
GO TO 21
19 P=ONE
DO 20 I=1,3
IF (P.LT.ABS(A(I,M)))GO TO 20
P=ABS(A(I,M))
J=I
20 CONTINUE
K=IP2312(J)
L=IP2312(J+1)
P=SQRT(A(K,M)**2+A(L,M)**2)
IF (P.LE.TOL)GO TO 40
A(J,M1)=ZERO
A(K,M1)=-A(L,M)/P
A(L,M1)= A(K,M)/P
21 A(1,2)=A(2,3)*A(3,1)-A(2,1)*A(3,3)
A(2,2)=A(3,3)*A(1,1)-A(3,1)*A(1,3)
A(3,2)=A(1,3)*A(2,1)-A(1,1)*A(2,3)
C****************** ROTATION MATRIX ************************************
30 DO 32 L=1,2
D=ZERO
DO 31 I=1,3
B(I,L)=R(I,1)*A(1,L)+R(I,2)*A(2,L)+R(I,3)*A(3,L)
31 D=D+B(I,L)**2
IF (D.GT.ZERO)D=ONE/SQRT(D)
DO 32 I=1,3
32 B(I,L)=B(I,L)*D
D=B(1,1)*B(1,2)+B(2,1)*B(2,2)+B(3,1)*B(3,2)
P=ZERO
DO 33 I=1,3
B(I,2)=B(I,2)-D*B(I,1)
33 P=P+B(I,2)**2
IF (P.LE.TOL)GO TO 35
P=ONE/SQRT(P)
DO 34 I=1,3
34 B(I,2)=B(I,2)*P
GO TO 37
35 P=ONE
DO 36 I=1,3
IF (P.LT.DABS(B(I,1)))GO TO 36
P=DABS(B(I,1))
J=I
36 CONTINUE
K=IP2312(J)
L=IP2312(J+1)
P=DSQRT(B(K,1)**2+B(L,1)**2)
IF (P.LE.TOL)GO TO 40
B(J,2)=ZERO
B(K,2)=-B(L,1)/P
B(L,2)= B(K,1)/P
37 B(1,3)=B(2,1)*B(3,2)-B(2,2)*B(3,1)
B(2,3)=B(3,1)*B(1,2)-B(3,2)*B(1,1)
B(3,3)=B(1,1)*B(2,2)-B(1,2)*B(2,1)
DO 39 I=1,3
DO 39 J=1,3
39 U(I,J)=B(I,1)*A(J,1)+B(I,2)*A(J,2)+B(I,3)*A(J,3)
C****************** TRANSLATION VECTOR *********************************
40 DO 41 I=1,3
41 T(I)=YC(I)-U(I,1)*XC(1)-U(I,2)*XC(2)-U(I,3)*XC(3)
C********************** RMS ERROR **************************************
50 DO 51 I=1,3
IF (E(I).LT.ZERO)E(I)=ZERO
51 E(I)=DSQRT(E(I))
IER=0
IF (E2.LE.(E1*1.0D-05))IER=-1
D=E3
IF (SIGMA.GE.0.0)GO TO 52
D=-D
IF ((E2-E3).LE.(E1*1.0D-05))IER=-1
52 D=D+E2+E1
RMS=E0-D-D
IF (RMS.LT.0.0)RMS=0.0
RETURN
END
C.....END U3B...........................................................
|
Romania abolished compulsory military service on October 23 , 2006 . This came about due to a 2003 constitutional amendment which allowed the parliament to make military service optional . The Romanian Parliament voted to abolish conscription in October 2005 , with the vote formalising one of many military modernisation and reform programmes that Romania agreed to when it joined NATO in March 2004 .
|
section "Arithmetic and Boolean Expressions"
theory AExp imports Main begin
subsection "Arithmetic Expressions"
type_synonym vname = string
type_synonym val = int
type_synonym state = "vname \<Rightarrow> val"
datatype aexp = N int | V vname | Plus aexp aexp
fun aval :: "aexp \<Rightarrow> state \<Rightarrow> val" where
"aval (N n) s = n" |
"aval (V x) s = s x" |
"aval (Plus a\<^sub>1 a\<^sub>2) s = aval a\<^sub>1 s + aval a\<^sub>2 s"
value "aval (Plus (V ''x'') (N 5)) (\<lambda>x. if x = ''x'' then 7 else 0)"
text \<open>The same state more concisely:\<close>
value "aval (Plus (V ''x'') (N 5)) ((\<lambda>x. 0) (''x'':= 7))"
text \<open>A little syntax magic to write larger states compactly:\<close>
definition null_state ("<>") where
"null_state \<equiv> \<lambda>x. 0"
syntax
"_State" :: "updbinds => 'a" ("<_>")
translations
"_State ms" == "_Update <> ms"
"_State (_updbinds b bs)" <= "_Update (_State b) bs"
text \<open>We can now write a series of updates to the function @{term "\<lambda>x. 0"} compactly:\<close>
lemma "<a := 1, b := 2> = (<> (a := 1)) (b := (2::int))"
by (rule refl)
value "aval (Plus (V ''x'') (N 5)) <''x'' := 7>"
text \<open>In the @{term "<a := b>"} syntax, variables that are not mentioned are 0 by default:\<close>
value "aval (Plus (V ''x'') (N 5)) <''y'' := 7>"
text \<open>Note that this \<open><\<dots>>\<close> syntax works for any function space \<open>\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2\<close> where \<open>\<tau>\<^sub>2\<close> has a 0.\<close>
subsection "Constant Folding"
text \<open>Evaluate constant subexpressions:\<close>
fun asimp_const :: "aexp \<Rightarrow> aexp" where
"asimp_const (N n) = N n" |
"asimp_const (V x) = V x" |
"asimp_const (Plus a\<^sub>1 a\<^sub>2) =
(case (asimp_const a\<^sub>1, asimp_const a\<^sub>2) of
(N n\<^sub>1, N n\<^sub>2) \<Rightarrow> N(n\<^sub>1+n\<^sub>2) |
(b\<^sub>1,b\<^sub>2) \<Rightarrow> Plus b\<^sub>1 b\<^sub>2)"
theorem aval_asimp_const:
"aval (asimp_const a) s = aval a s"
apply(induction a)
apply (auto split: aexp.split)
done
text \<open>Now we also eliminate all occurrences 0 in additions.
The standard method: optimized versions of the constructors:\<close>
fun plus :: "aexp \<Rightarrow> aexp \<Rightarrow> aexp" where
"plus (N i\<^sub>1) (N i\<^sub>2) = N(i\<^sub>1+i\<^sub>2)" |
"plus (N i) a = (if i=0 then a else Plus (N i) a)" |
"plus a (N i) = (if i=0 then a else Plus a (N i))" |
"plus a\<^sub>1 a\<^sub>2 = Plus a\<^sub>1 a\<^sub>2"
lemma aval_plus [simp]:
"aval (plus a1 a2) s = aval a1 s + aval a2 s"
apply(induction a1 a2 rule: plus.induct)
apply auto
done
fun asimp :: "aexp \<Rightarrow> aexp" where
"asimp (N n) = N n" |
"asimp (V x) = V x" |
"asimp (Plus a\<^sub>1 a\<^sub>2) = plus (asimp a\<^sub>1) (asimp a\<^sub>2)"
text \<open>Note that in @{const asimp_const} the optimized constructor was inlined.
Making it a separate function @{const plus} improves modularity of the code and the proofs.\<close>
value "asimp (Plus (Plus (N 0) (N 0)) (Plus (V ''x'') (N 0)))"
theorem aval_asimp[simp]:
"aval (asimp a) s = aval a s"
apply(induction a)
apply auto
done
end
|
%DEMO_PHASEPLOT Give demos of nice phaseplots
%
% This script creates a synthetic signal and then uses |phaseplot| on it,
% using several of the possible options.
%
% For real-life signal only small parts should be analyzed. In the chosen
% demo the fundamental frequency of the speaker can be nicely seen.
%
% .. figure::
%
% Synthetic signal
%
% Compare this to the pictures in reference 2 and 3. In
% the first two figures a synthetic signal is analyzed. It consists of a
% sinusoid, a small Delta peak, a periodic triangular function and a
% Gaussian. In the time-invariant version in the first part the periodicity
% of the sinusoid can be nicely seen also in the phase coefficients. Also
% the points of discontinuities can be seen as asymptotic lines approached
% by parabolic shapes. In the third part both properties, periodicity and
% discontinuities can be nicely seen. A comparison to the spectogram shows
% that the rectangular part in the middle of the signal can be seen by the
% phase plot, but not by the spectogram.
%
% In the frequency-invariant version, the fundamental frequency of the
% sinusoid can still be guessed as the position of an horizontal
% asymptotic line.
%
% .. figure::
%
% Synthetic signal, thresholded.
%
% This figure shows the same as Figure 1, except that values with low
% magnitude has been removed.
%
% .. figure::
%
% Speech signal.
%
% The figure shows a part of the 'linus' signal. The fundamental
% frequency of the speaker can be nicely seen.
%
% References: carmonmultiridge1 Carmona98practical gross1
disp('Type "help demo_phaseplot" to see a description of how this demo works.');
tt=0:98;
f1=sin(2*pi*tt/33); % sinusoid
f2=zeros(1,100);
f2(50)=1; % delta-like
f3=fftshift(firwin('tria',32)).';
f4 = fftshift(pgauss(100)).';
f4 = f4/max(f4);
sig = 0.9*[f1 0 f2 f3 -f3 f3 f4 0 0 0 0];
figure(1);
sgram(sig,'lin','nf');
figure(2);
subplot(3,1,1);
plot(sig);
title('Synthetic signal');
subplot(3,1,2);
phaseplot(sig,'freqinv');
title('Phaseplot of synthetic signal - frequency-invariant phase');
subplot(3,1,3);
phaseplot(sig,'timeinv')
title('Phaseplot of synthetic signal - time-invariant phase');
figure(3);
subplot(3,1,1);
plot(sig);
title('Synthetic signal');
subplot(3,1,2);
phaseplot(sig,'freqinv','thr',0.001)
title('Phaseplot of synthetic signal - thresholded version, freq. inv. phase');
subplot(3,1,3);
phaseplot(sig,'thr',0.001)
title('Phaseplot of synthetic signal - thresholded version, time inv. phase');
figure(4);
f=linus;
f = f(4500:8000);
subplot(3,1,1);
plot(f);
axis tight;
title('Speech signal: linus');
subplot(3,1,2);
phaseplot(f)
title('Phaseplot of linus');
subplot(3,1,3);
phaseplot(f,'thr',.001)
title('Phaseplot of linus - thresholded version');
|
Subroutine rbox(x1,x2,y1,y2,r)
dimension x(2),y(2)
*
x(1) = x1
x(2) = x1
y(1) = y1+r
y(2) = y2-r
Call Ipl(2,x,y)
x(1) = x2
x(2) = x2
Call Ipl(2,x,y)
y(1) = y1
y(2) = y1
x(1) = x1+r
x(2) = x2-r
Call Ipl(2,x,y)
y(1) = y2
y(2) = y2
Call Ipl(2,x,y)
call Igarc(x1+r,y1+r,r,r,180.,270.)
call Igarc(x1+r,y2-r,r,r, 90.,180.)
call Igarc(x2-r,y2-r,r,r, 0., 90.)
call Igarc(x2-r,y1+r,r,r,270., 0.)
*
End
|
module Minecraft.Base.PreClassic.Cobblestone.Item.Export
import public Minecraft.Core.Entity.Pickup.Export
import public Minecraft.Base.PreClassic.Cobblestone.Block
import public Minecraft.Base.PreClassic.Cobblestone.Item
import public Minecraft.Base.PreClassic.Cobblestone.ItemEntity
%default total
[cobblestoneItem']
Item Cobblestone.Item where
id = "minecraft:cobblestone"
stackable = Just 64
givenName = \x => x.base.givenName
proj = \x => new Cobblestone.ItemEntity Cobblestone.MkItemEntity
[putCobblestone']
Put Cobblestone.Item where
putItem self = new Cobblestone.Block Cobblestone.MkBlock
[cobblestoneVtbl']
Vtbl Cobblestone.Item where
vtable (Item Cobblestone.Item) = Just cobblestoneItem
vtable (Put Cobblestone.Item) = Just putCobblestone
vtable _ = Nothing
cobblestoneItem = cobblestoneItem'
putCobblestone = putCobblestone'
cobblestoneVtbl = cobblestoneVtbl'
|
module Fin where
data Nat : Set where
zero : Nat
succ : Nat -> Nat
data Fin : Nat -> Set where
fzero : {n : Nat} -> Fin (succ n)
fsucc : {n : Nat} -> Fin n -> Fin (succ n)
|
\documentclass[10pt]{article}
\usepackage{vmargin}
\setpapersize{A4}
% {left}{top}{right}{bottom}{headheight}{headsep}{footheight}{footskip}
\setmarginsrb{1.5in}{1in}{1in}{1.5in}{0cm}{0cm}{0pt}{0pt}
\title{spConfig-0.1.3 API Reference}
\author{Christopher Wright, [email protected]\\
Steve Mokris, [email protected]}
\date{\today}
\begin{document}
\maketitle
\tableofcontents
\section{Introduction}
To visualize the way configuration files work in spConfig, one simply needs to
think of a hierarchical filesystem. Configuration files are arranged in a
tree-like structure, composed of `nodes' (analogous to directories),
`attributes' (files), and `text' (which is a sort of metadata). Additionally,
commands such as `notices' can be inserted into the config tree. There are
plans for simple logic commands and an `include' operation in the future.
The syntax is similar to XML, and the parser should be able to grok most XML
files. We will perform a more extensive review of this in the future. For a
fairly complete example and documentation of the syntax, see demo/demo.conf.
\pagebreak
\section{Structure}
The base of any config tree is a particular node, known as the \textbf{root
node}. Typical operation involves using the library to read from a config file
(on disk or in memory) into a memory structure, then using the library-provided
routines to glean information from this structure. It is also possible to use
the library-provided routines to construct such a structure in its entirety.
In the future, it will be possible to write out these memory structures to disk
in our config file format.
\subsection{struct spConfigNode}
\begin{verbatim}typedef struct spConfigNodeS
{
char *name;
char confFlag;
char *text;
int numAttributes;
int numChildren;
spConfigAttribute **attributes;
struct spConfigNodeS **children;
struct spConfigNodeS *parent;
spConfigMessage *message;
void*(*callback)(struct spConfigNodeS*node,char*string);
} spConfigNode;\end{verbatim}
\begin{itemize}
\item[name] is obviously the name of the node. When reading in a configuration file from disk, the root node's name points to NULL, but
this is not a requirement for structures created in memory (more on this below).
\item[text] stores the text that happened to be located in the node, between the opening tag and closing tag, if any. It does not include text within child nodes.
\item[numAttributes] number of attributes this node has
\item[numChildren] number of children this node has
\item[**attributes] one-dimensional array of attributes
\item[**children] one-dimensional array of child nodes.
\item[parent] is a pointer to the parent node of this node, if there is any (NULL if root node).
Unfortunately, sharing a node between two parents will result in a pointer to only one.
\item[message] is the beginning of a linked list of messages attached to this node.
\item[confFlag] can have any of the following set:
\begin{verbatim}
SP_CONFIG_NODE_ALLOW_TEXT - this node accepts text if parsed
SP_CONFIG_NODE_ALLOW_ATTRIBUTES - this node accepts additional attributes
SP_CONFIG_NODE_ALLOW_NODES - this node accepts additional child nodes
\end{verbatim}
These flag values are not yet implemented, however.
\item[callback] is a function used to handle include files. Because we don't support
include files just yet, this field is presently useless.
\end{itemize}
\subsection{struct spConfigAttribute}
\noindent\textbf{attributes} are name/value pairs defined within the node begin tag.
\begin{verbatim}
typedef struct
{
char *name;
char type;
union
{
int i;
double d;
char *string;
} value;
} spConfigAttribute;
\end{verbatim}
\begin{itemize}
\item[name] stores the attributes name (this should always be set).
\item[type] determines which type of data this attribute holds, if any.\\
\begin{verbatim}SP_CONFIG_VALUE_NONE - no values are associated with this attribute
SP_CONFIG_VALUE_INT - the value is an int (use value.i)
SP_CONFIG_VALUE_FLOAT - the value is a double (use value.d)
SP_CONFIG_VALUE_STRING - the value is a string (use value.string);\end{verbatim}
\end{itemize}
\subsection{struct spConfigMessage}
\noindent And finally, we have \textbf{messages}.\\
\begin{verbatim}
typedef struct spConfigMessageS
{
int line,col;
char type;
char *text;
char *filename;
struct spConfigMessageS *next;
} spConfigMessage;
\end{verbatim}
\begin{itemize}
\item[line] number (of input file/string) upon which this message occurred
\item[col] column of message
\item[type] determines the severity of the message:\\
\begin{verbatim}
SP_CONFIG_MESSAGE_INFO - information, not a problem
SP_CONFIG_MESSAGE_NOTICE - small trivial warning, nothing too bad
SP_CONFIG_MESSAGE_WARNING - something may have been misparsed
but overall we're still on track.
SP_CONFIG_MESSAGE_ERROR - something is very wrong, parsing may
be wrong after this point.
\end{verbatim}
\item[text] stores the actual message.
\item[filename] stores which file the message
originated in (currently useless, we don't support include files yet).
\item[next] stores a pointer to the next message, or NULL of there
are no more messages.
\end{itemize}
\pagebreak
\section{API}
\subsection{Allocation / Duplication / Deallocation}
\subsubsection*{New}
\noindent\begin{tabular}{l l l @{}}
spConfigNode&\textbf{*spConfigNodeNew}&();\\
spConfigAttribute&\textbf{*spConfigAttributeNew}&();\\
spConfigMessage&\textbf{*spConfigMessageNew}&();\\
\end{tabular}
\noindent These functions return a newly allocated and initialized item, or
NULL on error (out of memory).
\subsubsection*{Clone}
\noindent\begin{tabular}{l l l @{}}
spConfigNode&\textbf{*spConfigNodeClone}&(spConfigNode *node);\\
spConfigAttribute&\textbf{*spConfigAttributeClone}&(spConfigAttribute *att);\\
spConfigMessage&\textbf{*spConfigMessageClone}&(spConfigMessage *msg);\\
\end{tabular}
\noindent These functions return a complete copy of the input item. Strings
are copied, child nodes and values are copied, and any other pointers are
duplicated, not shared. Thus it is safe to clone a tree and hack on one. The
other will be preserved.
\subsubsection*{Copy}
\noindent\begin{tabular}{l l l @{}}
spConfigNode&\textbf{*spConfigNodeCopy}&(spConfigNode *node);\\
spConfigAttribute&\textbf{*spConfigAttributeCopy}&(spConfigAttribute *att);\\
spConfigMessage&\textbf{*spConfigMessageCopy}&(spConfigMessage *msg);\\
\end{tabular}
\noindent These functions copy the values of the input item, returning a newly
allocated item with the same values. Pointers are shared, so copying a tree
and hacking on one will hack up the other as well. Very hard to cleanly
destruct (pointers could already be freed), these probably shouldn't be used
unless you know what you're doing.
\subsubsection*{Free}
\noindent\begin{tabular}{l l l @{}}
void&\textbf{spConfigNodeFree}&(spConfigNode *node);\\
void&\textbf{spConfigAttributeFree}&(spConfigAttribute *att);\\
void&\textbf{spConfigMessageFree}&(spConfigMessage *msg);\\
\end{tabular}
\noindent These functions free the memory used by the item passed to them.
Messages and nodes have their children freed as well.
\subsection{Node Management}
\subsubsection*{AddChild}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigNodeAddChild}&(spConfigNode *parent,spConfigNode *child);\\
\end{tabular}
\noindent This function adds node \textbf{child} to the list of child nodes in
\textbf{parent}.\\ Returns 0 on success, else error.
\subsubsection*{AddAttribute}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigNodeAddAttribute}&(spConfigNode *parent,spConfigAttribute *att);\\
\end{tabular}
\noindent This adds value \textbf{att} to the list of attributes in
\textbf{parent}.\\ Returns 0 on success, else error.
\subsubsection*{AddAttribute*}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigNodeAddAttributeInt}&(spConfigNode*parent,char*name,int i);\\
int&\textbf{spConfigNodeAddAttributeDouble}&(spConfigNode*parent,char*name,double d);\\
int&\textbf{spConfigNodeAddAttributeString}&(spConfigNode*parent,char*name,char*text);\\
\end{tabular}
\noindent These functions add an attribute named \textbf{name}, with a value of
the item passed, to the list of attributes in \textbf{parent}.\\ They return 0
on success, else error.
\subsubsection*{NodeName}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigNodeName}&(spConfigNode *node,char *name);\\
\end{tabular}
\noindent Sets the name of \textbf{node} to \textbf{name}, freeing the old name
if there was one.\\ Returns 0 on first name, else the old name was freed.
\subsubsection*{AttributeName}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigAttributeName}&(spConfigAttribute *att,char *name);\\
\end{tabular}
\noindent Sets the name of \textbf{attribute} to \textbf{name}, freeing the old
name if there was one.\\ Returns 0 on first name, else the old name was freed.
\subsection{Loading}
\subsubsection*{Load}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigLoad}&(spConfigNode *root,char *filename);\\
\end{tabular}
\noindent This function loads a config file from \textbf{filename}, parses it,
and stores whatever it successfully parses in \textbf{root}.\\ Returns 0 on
success, else error.
\subsubsection*{LoadStr}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigLoadStr}&(spConfigNode *root,char *name,char *string);\\
\end{tabular}
\noindent This function is identical to spConfigLoad above, but instead of a
file it takes a string. this allows the program to store a compressed and/or
encrypted configuration file, as long as this function gets plain text for
parsing. name is the name that will show up in messages as the file name.\\
Returns 0 on success, else error.
\subsection{Saving}
\subsubsection*{Save}
\noindent\begin{tabular}{l l l @{}}
int&\textbf{spConfigSave}&(spConfigNode *root,char *filename);\\
\end{tabular}
\noindent This function saves a configtree based on \textbf{root} in a file
named \textbf{filename}. This file can then be opened and reparsed with
spConfigLoad() to return an identical tree at a later time.\\ Returns 0 on
success, else error.
\subsubsection*{SaveStr (NOT YET IMPLEMENTED)}
\noindent\begin{tabular}{l l l @{}}
char&\textbf{*spConfigSaveStr}&(spConfigNode *root);\\
\end{tabular}
\noindent This function returns a string representing the config file in
parsable form. instead of saving the config tree in a file, this allows the
program to compress and/or encrypt the config string before saving, effectively
protecting the configuration from casual hacking.
\subsection{Messages}
\subsubsection*{MessagesList}
\noindent\begin{tabular}{l l l @{}}
char&\textbf{*spConfigMessagesList}&(spConfigNode *rootNode);\\
\end{tabular}
\noindent This function returns a string containing all the messages in
\textbf{rootNode}. This is suitable for printing on the screen or saving to a
file.
\subsubsection*{MessagesClear}
\noindent\begin{tabular}{l l l @{}}
void&\textbf{spConfigMessagesClear}&(spConfigNode*rootNode);\\
\end{tabular}
\noindent This function removes all the messages from \textbf{rootNode} and
reclaims the memory used by those messages.
\subsection{Finding Information}
\subsubsection*{Find}
\noindent\begin{tabular}{l l l @{}}
spConfigNode&\textbf{*spConfigNodeFind}\\
&(spConfigNode *root, char *name, char deep, spConfigNode *previous);\\
spConfigAttribute&\textbf{*spConfigAttributeFind}\\
&(spConfigNode *root, char *name, char deep, spConfigAttribute *previous);\\
\end{tabular}
\noindent These functions search through the entire config tree specified by
\textbf{root}, looking for nodes or attributes with the name \textbf{name}. If
\textbf{deep} is non-zero, child nodes of \textbf{root} are recursively
included in the search. If \textbf{previous} is non-null, it is assumed to be
a pointer to the previous node/attribute returned, and the function will return
the next node/attribute after that one. On the first call, \textbf{previous}
should be NULL.
\subsubsection*{FindPath}
\noindent\begin{tabular}{l l l @{}}
spConfigNode&\textbf{*spConfigNodeFindPath}&(spConfigNode *root,char *path);\\
spConfigAttribute&\textbf{*spConfigAttributeFindPath}&(spConfigNode *root,char *path);\\
\end{tabular}
\noindent These functions return a node or attribute pointing to the node or
attribute indicated in \textbf{path}. \textbf{path} is a string similar to a
unix path. NULL is returned if nothing is found. If some of the
\textbf{path} is correct, it returns the closest match (deepest matching
directory).
\subsubsection*{GetPath}
\noindent\begin{tabular}{l l l @{}}
char&\textbf{*spConfigNodeGetPath}&(spConfigNode *node);\\
\end{tabular}
\noindent This function returns the path string of \textbf{node}.\\
\subsection{Debugging}
For detailed information regarding debugging, please see {\tt doc/DEBUGGING}
\subsubsection*{File Output}
\noindent\begin{tabular}{l l l @{}}
void&\textbf{DEBUGFD\_spconfig}&(FILE *debuglog);\\
\end{tabular}
\noindent This is a debugging function used to specify where the debug messages
should go. \textbf{debuglog} should be a file opened for writing. The default
log is stderr. \textbf{This function has no effect if spconfig is compiled
without debugging.}
\subsubsection*{Debug Level}
\noindent\begin{tabular}{l l l @{}}
void&\textbf{DEBUGLEVEL\_spconfig}&(signed char level);\\
\end{tabular}
\noindent This is a debugging function used to specify the verbosity of the
debug messages. \textbf{level} is a value between -128 and 127. Lower levels
result in more messages. The default level is 0. \textbf{This function has no
effect if spconfig is compiled without debugging.}
\end{document}
|
State Before: k : Type u_2
M : Type u_1
N : Type ?u.22212
inst✝³ : OrderedRing k
inst✝² : OrderedAddCommGroup M
inst✝¹ : Module k M
inst✝ : OrderedSMul k M
a b : M
c : k
h : c • a < c • b
hc : c ≤ 0
⊢ b < a State After: k : Type u_2
M : Type u_1
N : Type ?u.22212
inst✝³ : OrderedRing k
inst✝² : OrderedAddCommGroup M
inst✝¹ : Module k M
inst✝ : OrderedSMul k M
a b : M
c : k
h : -c • b < -c • a
hc : c ≤ 0
⊢ b < a Tactic: rw [← neg_neg c, neg_smul, neg_smul (-c), neg_lt_neg_iff] at h State Before: k : Type u_2
M : Type u_1
N : Type ?u.22212
inst✝³ : OrderedRing k
inst✝² : OrderedAddCommGroup M
inst✝¹ : Module k M
inst✝ : OrderedSMul k M
a b : M
c : k
h : -c • b < -c • a
hc : c ≤ 0
⊢ b < a State After: no goals Tactic: exact lt_of_smul_lt_smul_of_nonneg h (neg_nonneg_of_nonpos hc) |
function [near,in,on,B,L] = near_mesh(V,F,Q,epsilon)
% NEAR_MESH test whether a list of points are in or near a given mesh
%
% [near] = near_mesh(V,F,Q)
% [near,in,on,B,L] = near_mesh(V,F,Q)
%
% Inputs:
% V #V by dim list of vertex positions
% F #F by 3 list of face indices
% Q #Q by dim list of query points
% epsilon minium distance allowed after collapses are complete, default is to
% use fraction of maximum edge length
% Outputs:
% near #Q list of flags revealing whether queries are near (V,F)
% in #Q list of flags revealing whether queries are in (V,F)
% on #Q list of flags revealing whether queries are on boundary of (V,F)
% B #B by 1 list of mesh outline edges
% L #loops+1 by 1 list of boundary loop start indices into B, the last
% entries is (by tradition) always the numel of B + 1
%
% See in_mesh, inpolygon
%
if ~exist('epsilon','var') || isempty(epsilon)
EE = edges(F);
% maximum edge length
%maxD = max(sqrt(sum((V(EE(:,1),:) - V(EE(:,2),:)).^2,2)));
minD = min(sqrt(sum((V(EE(:,1),:) - V(EE(:,2),:)).^2,2)));
epsilon = minD/2;
end
dim = size(V,2);
% only works in 2D
assert(dim == 2);
% first determine points strictly in or on mesh
[in,on,B,L] = in_mesh(V,F,Q);
% avoid sqrts
sqr_eps = epsilon.^2;
% boundary edges
BE = [B; B(2:end) B(1)]';
% compute projection of each point to each boundary line segment
[T,sqrD] = project_to_lines(Q,V(BE(:,1),:),V(BE(:,2),:));
% each vertex seen by each edge
QBE = repmat(Q,[1 1 size(BE,1)]);
% edge start positions
S = V(BE(:,1),:);
% edge destination positions
D = V(BE(:,2),:);
% distance of each point to each edge start
sqrDS = ...
squeeze(sum((QBE - permute(repmat(S,[1 1 size(Q,1)]),[3 2 1])).^2,2));
% distance of each point to each edge dest
sqrDD = ...
squeeze(sum((QBE - permute(repmat(D,[1 1 size(Q,1)]),[3 2 1])).^2,2));
% replace distances to edges when point is closest to start or dest endpoints
% respectively
sqrD(T<0) = sqrDS(T<0);
sqrD(T>1) = sqrDD(T>1);
% compute minimum distance to boundary
[minD] = min(sqrD,[],2);
% mask telling whether closest point for each edge is close enough
near = minD<sqr_eps;
% all strictly in points are also close
near = in | near;
end
|
module Prelude where
open import Level public hiding (zero) renaming (suc to sucℓ)
open import Size public
open import Function public
open import Data.List using (List; _∷_; []; [_]) public
open import Data.Unit using (⊤; tt) public
open import Data.Nat using (ℕ; suc; zero; _+_) public
open import Data.Sum using (inj₁; inj₂) renaming (_⊎_ to _⊕_)public
open import Data.Product public hiding (map; zip)
open import Codata.Thunk public
open import Relation.Unary hiding (_∈_; Empty) public
open import Relation.Binary.PropositionalEquality hiding ([_]) public
open import Relation.Ternary.Separation public
open import Relation.Ternary.Separation.Allstar public
|
Neoism was a public affairs show on KDVS hosted by Users/JessicaRockwell Jessica Rockwell from fall of 2007 through spring of 2008. Shows consist of interviews with people on various topics.
Burning Man prerecorded interviews of burners, live instudio interview with Mark Chang. (this was 30 min on France Kassing Frances show, Its About You).
Croatia and the Breakup of the Former Yugoslavia guest Danijel, visiting student from Croatia, mayor of an island in Dalmatia, and exsoldier during the war of the early 90s.
Astronomy Rachel of the UCD Astronomy Club at UC Davis Astronomy Club discusses the club, recent lunar eclipse, the Perseid meteor shower, and the upcoming Aurigid meteor shower.
Labor Day Show Union organizer Charles discusses labor day in the US, the history of the labor movement, and the AFSCME/SOC campaign to get Sodexho employees to become Labor Unions UC employees, as well as the role of unions in national politics. (this was on France Kassing Frances show, Its About You.
Breakdance Breakdancing Three breakdancers who practice at UCD are interviewed on why they breakdance, its history, and their injuries. The music played was of tunes they break to.
The Peoples Vanguard of Davis David Greenwald of the PVD discusses his online blog and the topics hes covered.
The Domes Danielle Fodor discusses her thesis on the domes.
Critical Mass & AntiWorker Legislation Supported by Democrats People active in Critical Mass discuss the upcoming mass, why theyre necessary, and your rights as a uni/bi/tri/quadricyclist. The second half of the show Ill discuss the antiworker legislation that has been and continues to be supported by the Democratic Party.
US Cotton Subsidies Mamadou, a visiting student at UCD, discusses how his country is harmed by US cotton subsidies. (this show may be in French).
Mumia AbuJamal Jeff Mackler, Director of Mobilization to Free Mumia AbuJamal and National Coordinator of Mumias Defense discusses Mumias case.
Homeless Homelessness in Davis and Yolo County Steve, of Homeless, American Indians, Gays & Lesbians, and Students joins me in the studio to discuss homelessness in Davis and Yolo County.
ArtScience Fusion Program Diane Ullman and Donna Billick, discuss their undergraduate Entomology course. Murals from past class projects can be found by the Arboretum arboretum and other places on campus as well as in the city.
20070829 20:14:37 nbsp Rock on, Jessica! Glad to see youve got a radio show... its infectious, isnt it? Users/KarlMogel
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import algebra.module.pi
/-!
# Bundled hom instances for module and multiplicative actions
This file defines instances for module, mul_action and related structures on bundled `_hom` types.
These are analogous to the instances in `algebra.module.pi`, but for bundled instead of unbundled
functions.
-/
variables {R S A B : Type*}
namespace add_monoid_hom
section
variables [monoid R] [monoid S] [add_monoid A] [add_comm_monoid B]
variables [distrib_mul_action R B] [distrib_mul_action S B]
instance : distrib_mul_action R (A →+ B) :=
{ smul := λ r f,
{ to_fun := r • f,
map_zero' := by simp,
map_add' := λ x y, by simp [smul_add] },
one_smul := λ f, by simp,
mul_smul := λ r s f, by simp [mul_smul],
smul_add := λ r f g, ext $ λ x, by simp [smul_add],
smul_zero := λ r, ext $ λ x, by simp [smul_zero] }
@[simp] lemma coe_smul (r : R) (f : A →+ B) : ⇑(r • f) = r • f := rfl
lemma smul_apply (r : R) (f : A →+ B) (x : A) : (r • f) x = r • f x := rfl
instance [smul_comm_class R S B] : smul_comm_class R S (A →+ B) :=
⟨λ a b f, ext $ λ x, smul_comm _ _ _⟩
instance [has_smul R S] [is_scalar_tower R S B] : is_scalar_tower R S (A →+ B) :=
⟨λ a b f, ext $ λ x, smul_assoc _ _ _⟩
instance [distrib_mul_action Rᵐᵒᵖ B] [is_central_scalar R B] : is_central_scalar R (A →+ B) :=
⟨λ a b, ext $ λ x, op_smul_eq_smul _ _⟩
end
instance [semiring R] [add_monoid A] [add_comm_monoid B] [module R B] :
module R (A →+ B) :=
{ add_smul := λ r s x, ext $ λ y, by simp [add_smul],
zero_smul := λ x, ext $ λ y, by simp [zero_smul],
..add_monoid_hom.distrib_mul_action }
end add_monoid_hom
|
% **********************Zip file 2: AF feature calculation
% AF feature
% calculation code package, which you can use to calculate the 14 AF
% features as the input feature metrix of SVM model. The AF 14 features
% inlcude the NFEn feature I sent you before.
%
% ***********************
clear all
close all
clc
data=load('RR_example.txt');
features = AF_features(data(7,1:53),250); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.