text
stringlengths 0
3.34M
|
---|
(* Title: HOL/Auth/n_germanSimp_lemma_on_inv__26.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_on_inv__26 imports n_germanSimp_base
begin
section{*All lemmas on causal relation between inv__26 and some rule r*}
lemma n_SendInv__part__0Vsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const E)) (eqn (IVar (Para (Ident ''InvSet'') p__Inv3)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const E)) (eqn (IVar (Para (Ident ''InvSet'') p__Inv3)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__26:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv3) ''Cmd'')) (Const Inv)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntE))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__26:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__26:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__0Vsinv__26:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__1Vsinv__26:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__26:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__26 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(* Title: HOL/Auth/fifo_on_inis.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The fifo Protocol Case Study*}
theory fifo_on_inis imports fifo_on_ini
begin
lemma on_inis:
assumes b1: "f \<in> (invariants N)" and b2: "ini \<in> {andList (allInitSpecs N)}" and b3: "formEval ini s"
shows "formEval f s"
proof -
have c1: "(f=inv__1 )"
apply (cut_tac b1, simp) done
moreover {
assume d1: "(f=inv__1 )"
have "formEval f s"
apply (rule iniImply_inv__1)
apply (cut_tac d1, assumption)
apply (cut_tac b2 b3, blast) done
}
ultimately show "formEval f s"
by satx
qed
end
|
include("test_common.jl")
@testset "import" begin
# Not support
# node"import path from 'path'"
path = require("path")
@test path == node"require('path')"
@test path.resolve(".") == abspath(".")[1:end-1]
node"const fs = require('fs')"
cd(@__DIR__)
isfile("package.json") || NPM.init("-y")
# Test with `canvas` that is a npm package with native dependencies.
NPM.is_installed("canvas") || NPM.install("canvas")
canvas_module = require("canvas")
canvas = canvas_module.createCanvas(10, 10)
ctx = canvas.getContext("2d")
ctx.strokeStyle = "rgba(0,0,0,0.5)"
ctx.beginPath()
ctx.lineTo(1, 7)
ctx.lineTo(10, 3)
ctx.stroke()
@test canvas.toDataURL() == "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+" *
"9AAAABmJLR0QA/wD/AP+gvaeTAAAAV0lEQVQYla3PsQ1AAABE0aehYQAliQEkJHaxroROYgUWEJ1Ko1AgCr/+" *
"l7vjb4KPTvYmhijRYLsTY1SosaDDfBXTM11gQo/12h+hRYIBI/anPblvx37kANwUCq3OvPHeAAAAAElFTkSuQmCC"
end
|
(***************************************************************************
* Preservation and Progress for System F<: with Closed Functions *
* - allow capture of type variables in closed functions *
* *
* (based on the F<: implementation in locally-nameless project) *
***************************************************************************)
Set Implicit Arguments.
Require Import LibLN.
Implicit Types x : var.
Implicit Types X : var.
(* ********************************************************************** *)
(** * Description of the Language *)
(** Representation of pre-types *)
Inductive typ : Set :=
| typ_top : typ
| typ_bvar : nat -> typ
| typ_fvar : var -> typ
| typ_arrow : typ -> typ -> typ
| typ_all : typ -> typ -> typ.
(** Representation of pre-terms *)
Inductive trm : Set :=
| trm_bvar : nat -> trm
| trm_fvar : var -> trm
| trm_abs : typ -> trm -> trm
| trm_cap : typ -> trm -> trm (* capsule lambda - closed *)
| trm_app : trm -> trm -> trm
| trm_tabs : typ -> trm -> trm
| trm_tapp : trm -> typ -> trm.
(** Opening up a type binder occuring in a type *)
Fixpoint open_tt_rec (K : nat) (U : typ) (T : typ) {struct T} : typ :=
match T with
| typ_top => typ_top
| typ_bvar J => If K = J then U else (typ_bvar J)
| typ_fvar X => typ_fvar X
| typ_arrow T1 T2 => typ_arrow (open_tt_rec K U T1) (open_tt_rec K U T2)
| typ_all T1 T2 => typ_all (open_tt_rec K U T1) (open_tt_rec (S K) U T2)
end.
Definition open_tt T U := open_tt_rec 0 U T.
(** Opening up a type binder occuring in a term *)
Fixpoint open_te_rec (K : nat) (U : typ) (e : trm) {struct e} : trm :=
match e with
| trm_bvar i => trm_bvar i
| trm_fvar x => trm_fvar x
| trm_abs V e1 => trm_abs (open_tt_rec K U V) (open_te_rec K U e1)
| trm_cap V e1 => trm_cap (open_tt_rec K U V) (open_te_rec K U e1)
| trm_app e1 e2 => trm_app (open_te_rec K U e1) (open_te_rec K U e2)
| trm_tabs V e1 => trm_tabs (open_tt_rec K U V) (open_te_rec (S K) U e1)
| trm_tapp e1 V => trm_tapp (open_te_rec K U e1) (open_tt_rec K U V)
end.
Definition open_te t U := open_te_rec 0 U t.
(** Opening up a term binder occuring in a term *)
Fixpoint open_ee_rec (k : nat) (f : trm) (e : trm) {struct e} : trm :=
match e with
| trm_bvar i => If k = i then f else (trm_bvar i)
| trm_fvar x => trm_fvar x
| trm_abs V e1 => trm_abs V (open_ee_rec (S k) f e1)
| trm_cap V e1 => trm_cap V (open_ee_rec (S k) f e1)
| trm_app e1 e2 => trm_app (open_ee_rec k f e1) (open_ee_rec k f e2)
| trm_tabs V e1 => trm_tabs V (open_ee_rec k f e1)
| trm_tapp e1 V => trm_tapp (open_ee_rec k f e1) V
end.
Definition open_ee t u := open_ee_rec 0 u t.
(** Notation for opening up binders with type or term variables *)
Notation "T 'open_tt_var' X" := (open_tt T (typ_fvar X)) (at level 67).
Notation "t 'open_te_var' X" := (open_te t (typ_fvar X)) (at level 67).
Notation "t 'open_ee_var' x" := (open_ee t (trm_fvar x)) (at level 67).
(** Types as locally closed pre-types *)
Inductive type : typ -> Prop :=
| type_top :
type typ_top
| type_var : forall X,
type (typ_fvar X)
| type_arrow : forall T1 T2,
type T1 ->
type T2 ->
type (typ_arrow T1 T2)
| type_all : forall L T1 T2,
type T1 ->
(forall X, X \notin L -> type (T2 open_tt_var X)) ->
type (typ_all T1 T2).
(** Terms as locally closed pre-terms *)
Inductive term : trm -> Prop :=
| term_var : forall x,
term (trm_fvar x)
| term_abs : forall L V e1,
type V ->
(forall x, x \notin L -> term (e1 open_ee_var x)) ->
term (trm_abs V e1)
| term_cap : forall L V e1,
type V ->
(forall x, x \notin L -> term (e1 open_ee_var x)) ->
term (trm_cap V e1)
| term_app : forall e1 e2,
term e1 ->
term e2 ->
term (trm_app e1 e2)
| term_tabs : forall L V e1,
type V ->
(forall X, X \notin L -> term (e1 open_te_var X)) ->
term (trm_tabs V e1)
| term_tapp : forall e1 V,
term e1 ->
type V ->
term (trm_tapp e1 V).
(** Binding are either mapping type or term variables.
[X ~<: T] is a subtyping asumption and [x ~: T] is
a typing assumption *)
Inductive bind : Set :=
| bind_sub : typ -> bind
| bind_typ : typ -> bind.
Notation "X ~<: T" := (X ~ bind_sub T)
(at level 23, left associativity) : env_scope.
Notation "x ~: T" := (x ~ bind_typ T)
(at level 23, left associativity) : env_scope.
(** Environment is an associative list of bindings. *)
Definition env := LibEnv.env bind.
(* get environment for typing types *)
Fixpoint typ_env (E: env) := match E with
| nil => nil
| cons (X, bind_sub U) E' => cons (X, bind_sub U) (typ_env E')
| cons (_, bind_typ _) E' => typ_env E'
end.
(** Well-formedness of a pre-type T in an environment E:
all the type variables of T must be bound via a
subtyping relation in E. This predicates implies
that T is a type *)
Inductive wft : env -> typ -> Prop :=
| wft_top : forall E,
wft E typ_top
| wft_var : forall U E X,
binds X (bind_sub U) E ->
wft E (typ_fvar X)
| wft_arrow : forall E T1 T2,
wft E T1 ->
wft E T2 ->
wft E (typ_arrow T1 T2)
| wft_all : forall L E T1 T2,
wft E T1 ->
(forall X, X \notin L ->
wft (E & X ~<: T1) (T2 open_tt_var X)) ->
wft E (typ_all T1 T2).
(** A environment E is well-formed if it contains no duplicate bindings
and if each type in it is well-formed with respect to the environment
it is pushed on to. *)
Inductive okt : env -> Prop :=
| okt_empty :
okt empty
| okt_sub : forall E X T,
okt E -> wft E T -> X # E -> okt (E & X ~<: T)
| okt_typ : forall E x T,
okt E -> wft E T -> x # E -> okt (E & x ~: T).
(** Subtyping relation *)
Inductive sub : env -> typ -> typ -> Prop :=
| sub_top : forall E S,
okt E ->
wft E S ->
sub E S typ_top
| sub_refl_tvar : forall E X,
okt E ->
wft E (typ_fvar X) ->
sub E (typ_fvar X) (typ_fvar X)
| sub_trans_tvar : forall U E T X,
binds X (bind_sub U) E ->
sub E U T ->
sub E (typ_fvar X) T
| sub_arrow : forall E S1 S2 T1 T2,
sub E T1 S1 ->
sub E S2 T2 ->
sub E (typ_arrow S1 S2) (typ_arrow T1 T2)
| sub_all : forall L E S1 S2 T1 T2,
sub E T1 S1 ->
(forall X, X \notin L ->
sub (E & X ~<: T1) (S2 open_tt_var X) (T2 open_tt_var X)) ->
sub E (typ_all S1 S2) (typ_all T1 T2).
(** Typing relation *)
Inductive typing : env -> trm -> typ -> Prop :=
| typing_var : forall E x T,
okt E ->
binds x (bind_typ T) E ->
typing E (trm_fvar x) T
| typing_abs : forall L E V e1 T1,
(forall x, x \notin L ->
typing (E & x ~: V) (e1 open_ee_var x) T1) ->
typing E (trm_abs V e1) (typ_arrow V T1)
| typing_cap: forall L E V e1 T1, (* v2: allow capture type variable *)
okt E ->
(forall x, x \notin L ->
typing ((typ_env E) & x ~: V) (e1 open_ee_var x) T1) ->
typing E (trm_cap V e1) (typ_arrow V T1)
| typing_app : forall T1 E e1 e2 T2,
typing E e1 (typ_arrow T1 T2) ->
typing E e2 T1 ->
typing E (trm_app e1 e2) T2
| typing_tabs : forall L E V e1 T1,
(forall X, X \notin L ->
typing (E & X ~<: V) (e1 open_te_var X) (T1 open_tt_var X)) ->
typing E (trm_tabs V e1) (typ_all V T1)
| typing_tapp : forall T1 E e1 T T2,
typing E e1 (typ_all T1 T2) ->
sub E T T1 ->
typing E (trm_tapp e1 T) (open_tt T2 T)
| typing_sub : forall S E e T,
typing E e S ->
sub E S T ->
typing E e T.
(** Values *)
Inductive value : trm -> Prop :=
| value_abs : forall V e1, term (trm_abs V e1) ->
value (trm_abs V e1)
| value_cap : forall V e1, term (trm_cap V e1) ->
value (trm_cap V e1)
| value_tabs : forall V e1, term (trm_tabs V e1) ->
value (trm_tabs V e1).
(** One-step reduction *)
Inductive red : trm -> trm -> Prop :=
| red_app_1 : forall e1 e1' e2,
term e2 ->
red e1 e1' ->
red (trm_app e1 e2) (trm_app e1' e2)
| red_app_2 : forall e1 e2 e2',
value e1 ->
red e2 e2' ->
red (trm_app e1 e2) (trm_app e1 e2')
| red_tapp : forall e1 e1' V,
type V ->
red e1 e1' ->
red (trm_tapp e1 V) (trm_tapp e1' V)
| red_abs : forall V e1 v2,
term (trm_abs V e1) ->
value v2 ->
red (trm_app (trm_abs V e1) v2) (open_ee e1 v2)
| red_cap : forall V e1 v2,
term (trm_cap V e1) ->
value v2 ->
red (trm_app (trm_cap V e1) v2) (open_ee e1 v2)
| red_tabs : forall V1 e1 V2,
term (trm_tabs V1 e1) ->
type V2 ->
red (trm_tapp (trm_tabs V1 e1) V2) (open_te e1 V2).
(** Our goal is to prove preservation and progress *)
Definition preservation := forall E e e' T,
typing E e T ->
red e e' ->
typing E e' T.
Definition progress := forall e T,
typing empty e T ->
value e
\/ exists e', red e e'.
(* ********************************************************************** *)
(** * Additional Definitions Used in the Proofs *)
(** Computing free type variables in a type *)
Fixpoint fv_tt (T : typ) {struct T} : vars :=
match T with
| typ_top => \{}
| typ_bvar J => \{}
| typ_fvar X => \{X}
| typ_arrow T1 T2 => (fv_tt T1) \u (fv_tt T2)
| typ_all T1 T2 => (fv_tt T1) \u (fv_tt T2)
end.
(** Computing free type variables in a term *)
Fixpoint fv_te (e : trm) {struct e} : vars :=
match e with
| trm_bvar i => \{}
| trm_fvar x => \{}
| trm_abs V e1 => (fv_tt V) \u (fv_te e1)
| trm_cap V e1 => (fv_tt V) \u (fv_te e1)
| trm_app e1 e2 => (fv_te e1) \u (fv_te e2)
| trm_tabs V e1 => (fv_tt V) \u (fv_te e1)
| trm_tapp e1 V => (fv_tt V) \u (fv_te e1)
end.
(** Computing free term variables in a type *)
Fixpoint fv_ee (e : trm) {struct e} : vars :=
match e with
| trm_bvar i => \{}
| trm_fvar x => \{x}
| trm_abs V e1 => (fv_ee e1)
| trm_cap V e1 => (fv_ee e1)
| trm_app e1 e2 => (fv_ee e1) \u (fv_ee e2)
| trm_tabs V e1 => (fv_ee e1)
| trm_tapp e1 V => (fv_ee e1)
end.
(** Substitution for free type variables in types. *)
Fixpoint subst_tt (Z : var) (U : typ) (T : typ) {struct T} : typ :=
match T with
| typ_top => typ_top
| typ_bvar J => typ_bvar J
| typ_fvar X => If X = Z then U else (typ_fvar X)
| typ_arrow T1 T2 => typ_arrow (subst_tt Z U T1) (subst_tt Z U T2)
| typ_all T1 T2 => typ_all (subst_tt Z U T1) (subst_tt Z U T2)
end.
(** Substitution for free type variables in terms. *)
Fixpoint subst_te (Z : var) (U : typ) (e : trm) {struct e} : trm :=
match e with
| trm_bvar i => trm_bvar i
| trm_fvar x => trm_fvar x
| trm_abs V e1 => trm_abs (subst_tt Z U V) (subst_te Z U e1)
| trm_cap V e1 => trm_cap (subst_tt Z U V) (subst_te Z U e1)
| trm_app e1 e2 => trm_app (subst_te Z U e1) (subst_te Z U e2)
| trm_tabs V e1 => trm_tabs (subst_tt Z U V) (subst_te Z U e1)
| trm_tapp e1 V => trm_tapp (subst_te Z U e1) (subst_tt Z U V)
end.
(** Substitution for free term variables in terms. *)
Fixpoint subst_ee (z : var) (u : trm) (e : trm) {struct e} : trm :=
match e with
| trm_bvar i => trm_bvar i
| trm_fvar x => If x = z then u else (trm_fvar x)
| trm_abs V e1 => trm_abs V (subst_ee z u e1)
| trm_cap V e1 => trm_cap V (subst_ee z u e1)
| trm_app e1 e2 => trm_app (subst_ee z u e1) (subst_ee z u e2)
| trm_tabs V e1 => trm_tabs V (subst_ee z u e1)
| trm_tapp e1 V => trm_tapp (subst_ee z u e1) V
end.
(** Substitution for free type variables in environment. *)
Definition subst_tb (Z : var) (P : typ) (b : bind) : bind :=
match b with
| bind_sub T => bind_sub (subst_tt Z P T)
| bind_typ T => bind_typ (subst_tt Z P T)
end.
(* ********************************************************************** *)
(** * Tactics *)
(** Constructors as hints. *)
Hint Constructors type term wft ok okt value red.
Hint Resolve
sub_top sub_refl_tvar sub_arrow
typing_var typing_app typing_tapp typing_sub.
(** Gathering free names already used in the proofs *)
Ltac gather_vars :=
let A := gather_vars_with (fun x : vars => x) in
let B := gather_vars_with (fun x : var => \{x}) in
let C := gather_vars_with (fun x : trm => fv_te x) in
let D := gather_vars_with (fun x : trm => fv_ee x) in
let E := gather_vars_with (fun x : typ => fv_tt x) in
let F := gather_vars_with (fun x : env => dom x) in
constr:(A \u B \u C \u D \u E \u F).
(** "pick_fresh x" tactic create a fresh variable with name x *)
Ltac pick_fresh x :=
let L := gather_vars in (pick_fresh_gen L x).
(** "apply_fresh T as x" is used to apply inductive rule which
use an universal quantification over a cofinite set *)
Tactic Notation "apply_fresh" constr(T) "as" ident(x) :=
apply_fresh_base T gather_vars x.
Tactic Notation "apply_fresh" "*" constr(T) "as" ident(x) :=
apply_fresh T as x; autos*.
(** These tactics help applying a lemma which conclusion mentions
an environment (E & F) in the particular case when F is empty *)
Ltac get_env :=
match goal with
| |- wft ?E _ => E
| |- sub ?E _ _ => E
| |- typing ?E _ _ => E
end.
Tactic Notation "apply_empty_bis" tactic(get_env) constr(lemma) :=
let E := get_env in rewrite <- (concat_empty_r E);
eapply lemma; try rewrite concat_empty_r.
Tactic Notation "apply_empty" constr(F) :=
apply_empty_bis (get_env) F.
Tactic Notation "apply_empty" "*" constr(F) :=
apply_empty F; autos*.
(** Tactic to undo when Coq does too much simplification *)
Ltac unsimpl_map_bind :=
match goal with |- context [ ?B (subst_tt ?Z ?P ?U) ] =>
unsimpl ((subst_tb Z P) (B U)) end.
Tactic Notation "unsimpl_map_bind" "*" :=
unsimpl_map_bind; autos*.
(* ********************************************************************** *)
(** * Properties of Substitutions *)
(* ********************************************************************** *)
(** ** Properties of type substitution in type *)
(** Substitution on indices is identity on well-formed terms. *)
Lemma open_tt_rec_type_core : forall T j V U i, i <> j ->
(open_tt_rec j V T) = open_tt_rec i U (open_tt_rec j V T) ->
T = open_tt_rec i U T.
Proof.
induction T; introv Neq H; simpl in *; inversion H; f_equal*.
case_nat*. case_nat*.
Qed.
Lemma open_tt_rec_type : forall T U,
type T -> forall k, T = open_tt_rec k U T.
Proof.
induction 1; intros; simpl; f_equal*. unfolds open_tt.
pick_fresh X. apply* (@open_tt_rec_type_core T2 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_tt_fresh : forall Z U T,
Z \notin fv_tt T -> subst_tt Z U T = T.
Proof.
induction T; simpl; intros; f_equal*.
case_var*.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_tt_open_tt_rec : forall T1 T2 X P n, type P ->
subst_tt X P (open_tt_rec n T2 T1) =
open_tt_rec n (subst_tt X P T2) (subst_tt X P T1).
Proof.
introv WP. generalize n.
induction T1; intros k; simpls; f_equal*.
case_nat*.
case_var*. rewrite* <- open_tt_rec_type.
Qed.
Lemma subst_tt_open_tt : forall T1 T2 X P, type P ->
subst_tt X P (open_tt T1 T2) =
open_tt (subst_tt X P T1) (subst_tt X P T2).
Proof.
unfold open_tt. autos* subst_tt_open_tt_rec.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_tt_open_tt_var : forall X Y U T, Y <> X -> type U ->
(subst_tt X U T) open_tt_var Y = subst_tt X U (T open_tt_var Y).
Proof.
introv Neq Wu. rewrite* subst_tt_open_tt.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_tt_intro : forall X T2 U,
X \notin fv_tt T2 -> type U ->
open_tt T2 U = subst_tt X U (T2 open_tt_var X).
Proof.
introv Fr Wu. rewrite* subst_tt_open_tt.
rewrite* subst_tt_fresh. simpl. case_var*.
Qed.
(* ********************************************************************** *)
(** ** Properties of type substitution in terms *)
Lemma open_te_rec_term_core : forall e j u i P ,
open_ee_rec j u e = open_te_rec i P (open_ee_rec j u e) ->
e = open_te_rec i P e.
Proof.
induction e; intros; simpl in *; inversion H; f_equal*; f_equal*.
Qed.
Lemma open_te_rec_type_core : forall e j Q i P, i <> j ->
open_te_rec j Q e = open_te_rec i P (open_te_rec j Q e) ->
e = open_te_rec i P e.
Proof.
induction e; intros; simpl in *; inversion H0; f_equal*;
match goal with H: ?i <> ?j |- ?t = open_tt_rec ?i _ ?t =>
apply* (@open_tt_rec_type_core t j) end.
Qed.
Lemma open_te_rec_term : forall e U,
term e -> forall k, e = open_te_rec k U e.
Proof.
intros e U WF. induction WF; intros; simpl;
f_equal*; try solve [ apply* open_tt_rec_type ].
unfolds open_ee. pick_fresh x.
apply* (@open_te_rec_term_core e1 0 (trm_fvar x)).
unfolds open_ee. pick_fresh x.
apply* (@open_te_rec_term_core e1 0 (trm_fvar x)).
unfolds open_te. pick_fresh X.
apply* (@open_te_rec_type_core e1 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_te_fresh : forall X U e,
X \notin fv_te e -> subst_te X U e = e.
Proof.
induction e; simpl; intros; f_equal*; autos* subst_tt_fresh.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_te_open_te : forall e T X U, type U ->
subst_te X U (open_te e T) =
open_te (subst_te X U e) (subst_tt X U T).
Proof.
intros. unfold open_te. generalize 0.
induction e; intros; simpls; f_equal*;
autos* subst_tt_open_tt_rec.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_te_open_te_var : forall X Y U e, Y <> X -> type U ->
(subst_te X U e) open_te_var Y = subst_te X U (e open_te_var Y).
Proof.
introv Neq Wu. rewrite* subst_te_open_te.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_te_intro : forall X U e,
X \notin fv_te e -> type U ->
open_te e U = subst_te X U (e open_te_var X).
Proof.
introv Fr Wu. rewrite* subst_te_open_te.
rewrite* subst_te_fresh. simpl. case_var*.
Qed.
(* ********************************************************************** *)
(** ** Properties of term substitution in terms *)
Lemma open_ee_rec_term_core : forall e j v u i, i <> j ->
open_ee_rec j v e = open_ee_rec i u (open_ee_rec j v e) ->
e = open_ee_rec i u e.
Proof.
induction e; introv Neq H; simpl in *; inversion H; f_equal*.
case_nat*. case_nat*.
Qed.
Lemma open_ee_rec_type_core : forall e j V u i,
open_te_rec j V e = open_ee_rec i u (open_te_rec j V e) ->
e = open_ee_rec i u e.
Proof.
induction e; introv H; simpls; inversion H; f_equal*.
Qed.
Lemma open_ee_rec_term : forall u e,
term e -> forall k, e = open_ee_rec k u e.
Proof.
induction 1; intros; simpl; f_equal*.
unfolds open_ee. pick_fresh x.
apply* (@open_ee_rec_term_core e1 0 (trm_fvar x)).
unfolds open_ee. pick_fresh x.
apply* (@open_ee_rec_term_core e1 0 (trm_fvar x)).
unfolds open_te. pick_fresh X.
apply* (@open_ee_rec_type_core e1 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_ee_fresh : forall x u e,
x \notin fv_ee e -> subst_ee x u e = e.
Proof.
induction e; simpl; intros; f_equal*.
case_var*.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_ee_open_ee : forall t1 t2 u x, term u ->
subst_ee x u (open_ee t1 t2) =
open_ee (subst_ee x u t1) (subst_ee x u t2).
Proof.
intros. unfold open_ee. generalize 0.
induction t1; intros; simpls; f_equal*.
case_nat*.
case_var*. rewrite* <- open_ee_rec_term.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_ee_open_ee_var : forall x y u e, y <> x -> term u ->
(subst_ee x u e) open_ee_var y = subst_ee x u (e open_ee_var y).
Proof.
introv Neq Wu. rewrite* subst_ee_open_ee.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_ee_intro : forall x u e,
x \notin fv_ee e -> term u ->
open_ee e u = subst_ee x u (e open_ee_var x).
Proof.
introv Fr Wu. rewrite* subst_ee_open_ee.
rewrite* subst_ee_fresh. simpl. case_var*.
Qed.
(** Interactions between type substitutions in terms and opening
with term variables in terms. *)
Lemma subst_te_open_ee_var : forall Z P x e,
(subst_te Z P e) open_ee_var x = subst_te Z P (e open_ee_var x).
Proof.
introv. unfold open_ee. generalize 0.
induction e; intros; simpl; f_equal*. case_nat*.
Qed.
(** Interactions between term substitutions in terms and opening
with type variables in terms. *)
Lemma subst_ee_open_te_var : forall z u e X, term u ->
(subst_ee z u e) open_te_var X = subst_ee z u (e open_te_var X).
Proof.
introv. unfold open_te. generalize 0.
induction e; intros; simpl; f_equal*.
case_var*. symmetry. autos* open_te_rec_term.
Qed.
(** Substitutions preserve local closure. *)
Lemma subst_tt_type : forall T Z P,
type T -> type P -> type (subst_tt Z P T).
Proof.
induction 1; intros; simpl; auto.
case_var*.
apply_fresh* type_all as X. rewrite* subst_tt_open_tt_var.
Qed.
Lemma subst_te_term : forall e Z P,
term e -> type P -> term (subst_te Z P e).
Proof.
lets: subst_tt_type. induction 1; intros; simpl; auto.
apply_fresh* term_abs as x. rewrite* subst_te_open_ee_var.
apply_fresh* term_cap as x. rewrite* subst_te_open_ee_var.
apply_fresh* term_tabs as x. rewrite* subst_te_open_te_var.
Qed.
Lemma subst_ee_term : forall e1 Z e2,
term e1 -> term e2 -> term (subst_ee Z e2 e1).
Proof.
induction 1; intros; simpl; auto.
case_var*.
apply_fresh* term_abs as y. rewrite* subst_ee_open_ee_var.
apply_fresh* term_cap as y. rewrite* subst_ee_open_ee_var.
apply_fresh* term_tabs as Y. rewrite* subst_ee_open_te_var.
Qed.
Hint Resolve subst_tt_type subst_te_term subst_ee_term.
(* ********************************************************************** *)
(** * Properties of well-formedness of a type in an environment *)
(** If a type is well-formed in an environment then it is locally closed. *)
Lemma wft_type : forall E T,
wft E T -> type T.
Proof.
induction 1; eauto.
Qed.
(** Through weakening *)
Lemma wft_weaken : forall G T E F,
wft (E & G) T ->
ok (E & F & G) ->
wft (E & F & G) T.
Proof.
intros. gen_eq K: (E & G). gen E F G.
induction H; intros; subst; eauto.
(* case: var *)
apply (@wft_var U). apply* binds_weaken.
(* case: all *)
apply_fresh* wft_all as Y. apply_ih_bind* H1.
Qed.
(** Through narrowing *)
Lemma wft_narrow : forall V F U T E X,
wft (E & X ~<: V & F) T ->
ok (E & X ~<: U & F) ->
wft (E & X ~<: U & F) T.
Proof.
intros. gen_eq K: (E & X ~<: V & F). gen E F.
induction H; intros; subst; eauto.
destruct (binds_middle_inv H) as [K|[K|K]]; try destructs K.
applys wft_var. apply* binds_concat_right.
subst. applys wft_var. apply~ binds_middle_eq.
applys wft_var. apply~ binds_concat_left.
apply* binds_concat_left.
apply_fresh* wft_all as Y. apply_ih_bind* H1.
Qed.
(** Through strengthening *)
Lemma wft_strengthen : forall E F x U T,
wft (E & x ~: U & F) T -> wft (E & F) T.
Proof.
intros. gen_eq G: (E & x ~: U & F). gen F.
induction H; intros F EQ; subst; auto.
apply* (@wft_var U0).
destruct (binds_concat_inv H) as [?|[? ?]].
apply~ binds_concat_right.
destruct (binds_push_inv H1) as [[? ?]|[? ?]].
subst. false.
apply~ binds_concat_left.
(* todo: binds_cases tactic *)
apply_fresh* wft_all as Y. apply_ih_bind* H1.
Qed.
(** Through type substitution *)
Lemma wft_subst_tb : forall F Q E Z P T,
wft (E & Z ~<: Q & F) T ->
wft E P ->
ok (E & map (subst_tb Z P) F) ->
wft (E & map (subst_tb Z P) F) (subst_tt Z P T).
Proof.
introv WT WP. gen_eq G: (E & Z ~<: Q & F). gen F.
induction WT; intros F EQ Ok; subst; simpl subst_tt; auto.
case_var*.
apply_empty* wft_weaken.
destruct (binds_concat_inv H) as [?|[? ?]].
apply (@wft_var (subst_tt Z P U)).
apply~ binds_concat_right.
unsimpl_map_bind. apply~ binds_map.
destruct (binds_push_inv H1) as [[? ?]|[? ?]].
subst. false~.
applys wft_var. apply* binds_concat_left.
apply_fresh* wft_all as Y.
unsimpl ((subst_tb Z P) (bind_sub T1)).
lets: wft_type.
rewrite* subst_tt_open_tt_var.
apply_ih_map_bind* H0.
Qed.
(** Through type reduction *)
Lemma wft_open : forall E U T1 T2,
ok E ->
wft E (typ_all T1 T2) ->
wft E U ->
wft E (open_tt T2 U).
Proof.
introv Ok WA WU. inversions WA. pick_fresh X.
autos* wft_type. rewrite* (@subst_tt_intro X).
lets K: (@wft_subst_tb empty).
specializes_vars K. clean_empty K. apply* K.
(* todo: apply empty ? *)
Qed.
(* ********************************************************************** *)
(** * Relations between well-formed environment and types well-formed
in environments *)
(** If an environment is well-formed, then it does not contain duplicated keys. *)
Lemma ok_from_okt : forall E,
okt E -> ok E.
Proof.
induction 1; auto.
Qed.
Hint Extern 1 (ok _) => apply ok_from_okt.
(** Extraction from a subtyping assumption in a well-formed environments *)
Lemma wft_from_env_has_sub : forall x U E,
okt E -> binds x (bind_sub U) E -> wft E U.
Proof.
induction E using env_ind; intros Ok B.
false* binds_empty_inv.
inversions Ok.
false (empty_push_inv H0).
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3. apply_empty* wft_weaken.
apply_empty* wft_weaken.
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3.
apply_empty* wft_weaken.
Qed.
(** Extraction from a typing assumption in a well-formed environments *)
Lemma wft_from_env_has_typ : forall x U E,
okt E -> binds x (bind_typ U) E -> wft E U.
Proof.
induction E using env_ind; intros Ok B.
false* binds_empty_inv.
inversions Ok.
false (empty_push_inv H0).
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3.
apply_empty* wft_weaken.
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3. apply_empty* wft_weaken.
apply_empty* wft_weaken.
Qed.
(** Extraction from a well-formed environment *)
Lemma wft_from_okt_typ : forall x T E,
okt (E & x ~: T) -> wft E T.
Proof.
intros. inversions* H.
false (empty_push_inv H1).
destruct (eq_push_inv H0) as [? [? ?]]. false.
destruct (eq_push_inv H0) as [? [? ?]]. inversions~ H4.
Qed.
Lemma wft_from_okt_sub : forall x T E,
okt (E & x ~<: T) -> wft E T.
Proof.
intros. inversions* H.
false (empty_push_inv H1).
destruct (eq_push_inv H0) as [? [? ?]]. inversions~ H4.
destruct (eq_push_inv H0) as [? [? ?]]. false.
Qed.
(** Automation *)
Lemma wft_weaken_right : forall T E F,
wft E T ->
ok (E & F) ->
wft (E & F) T.
Proof.
intros. apply_empty* wft_weaken.
Qed.
Hint Resolve wft_weaken_right.
Hint Resolve wft_from_okt_typ wft_from_okt_sub.
Hint Immediate wft_from_env_has_sub wft_from_env_has_typ.
Hint Resolve wft_subst_tb.
(* ********************************************************************** *)
(** ** Properties of well-formedness of an environment *)
(** Inversion lemma *)
Lemma okt_push_inv : forall E X B,
okt (E & X ~ B) -> exists T, B = bind_sub T \/ B = bind_typ T.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&?&?): (eq_push_inv H). subst*.
lets (?&?&?): (eq_push_inv H). subst*.
Qed.
Lemma okt_push_sub_inv : forall E X T,
okt (E & X ~<: T) -> okt E /\ wft E T /\ X # E.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&M&?): (eq_push_inv H). subst. inverts~ M.
lets (?&?&?): (eq_push_inv H). false.
Qed.
Lemma okt_push_sub_type : forall E X T,
okt (E & X ~<: T) -> type T.
Proof. intros. applys wft_type. forwards*: okt_push_sub_inv. Qed.
Lemma okt_push_typ_inv : forall E x T,
okt (E & x ~: T) -> okt E /\ wft E T /\ x # E.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&?&?): (eq_push_inv H). false.
lets (?&M&?): (eq_push_inv H). subst. inverts~ M.
Qed.
Lemma okt_push_typ_type : forall E X T,
okt (E & X ~: T) -> type T.
Proof. intros. applys wft_type. forwards*: okt_push_typ_inv. Qed.
Hint Immediate okt_push_sub_type okt_push_typ_type.
(** Through narrowing *)
Lemma okt_narrow : forall V (E F:env) U X,
okt (E & X ~<: V & F) ->
wft E U ->
okt (E & X ~<: U & F).
Proof.
introv O W. induction F using env_ind.
rewrite concat_empty_r in *. lets*: (okt_push_sub_inv O).
rewrite concat_assoc in *.
lets (T&[?|?]): okt_push_inv O; subst.
lets (?&?&?): (okt_push_sub_inv O).
applys~ okt_sub. applys* wft_narrow.
lets (?&?&?): (okt_push_typ_inv O).
applys~ okt_typ. applys* wft_narrow.
Qed.
(** Through strengthening *)
Lemma okt_strengthen : forall x T (E F:env),
okt (E & x ~: T & F) ->
okt (E & F).
Proof.
introv O. induction F using env_ind.
rewrite concat_empty_r in *. lets*: (okt_push_typ_inv O).
rewrite concat_assoc in *.
lets (U&[?|?]): okt_push_inv O; subst.
lets (?&?&?): (okt_push_sub_inv O).
applys~ okt_sub. applys* wft_strengthen.
lets (?&?&?): (okt_push_typ_inv O).
applys~ okt_typ. applys* wft_strengthen.
Qed.
(** Through type substitution *)
Lemma okt_subst_tb : forall Q Z P (E F:env),
okt (E & Z ~<: Q & F) ->
wft E P ->
okt (E & map (subst_tb Z P) F).
Proof.
introv O W. induction F using env_ind.
rewrite map_empty. rewrite concat_empty_r in *.
lets*: (okt_push_sub_inv O).
rewrite map_push. rewrite concat_assoc in *.
lets (U&[?|?]): okt_push_inv O; subst.
lets (?&?&?): (okt_push_sub_inv O).
applys~ okt_sub. applys* wft_subst_tb.
lets (?&?&?): (okt_push_typ_inv O).
applys~ okt_typ. applys* wft_subst_tb.
Qed.
(** Automation *)
Hint Resolve okt_narrow okt_subst_tb wft_weaken.
Hint Immediate okt_strengthen.
(* ********************************************************************** *)
(** ** Environment is unchanged by substitution from a fresh name *)
Lemma notin_fv_tt_open : forall Y X T,
X \notin fv_tt (T open_tt_var Y) ->
X \notin fv_tt T.
Proof.
introv. unfold open_tt. generalize 0.
induction T; simpl; intros k Fr; auto.
specializes IHT1 k. specializes IHT2 k. auto.
specializes IHT1 k. specializes IHT2 (S k). auto.
Qed.
Lemma notin_fv_wf : forall E X T,
wft E T -> X # E -> X \notin fv_tt T.
Proof.
induction 1; intros Fr; simpl.
eauto.
rewrite notin_singleton. intro. subst. applys binds_fresh_inv H Fr.
notin_simpl; auto.
notin_simpl; auto. pick_fresh Y. apply* (@notin_fv_tt_open Y).
Qed.
Lemma map_subst_tb_id : forall G Z P,
okt G -> Z # G -> G = map (subst_tb Z P) G.
Proof.
induction 1; intros Fr; autorewrite with rew_env_map; simpl.
auto.
rewrite* <- IHokt. rewrite* subst_tt_fresh. apply* notin_fv_wf.
rewrite* <- IHokt. rewrite* subst_tt_fresh. apply* notin_fv_wf.
Qed.
(* ********************************************************************** *)
(** ** Properties of set *)
Lemma subset_trans: forall (T: Type) (a b c: fset T),
a \c b -> b \c c -> a \c c.
Proof. unfolds subset. autos. Qed.
Lemma subset_strengthen: forall (T: Type) (a b: fset T) (x: T),
a \c (b \u \{x}) -> x \notin a -> a \c b.
Proof. unfolds subset. intros. forwards K: (H x0 H1).
rewrite in_union in K. destruct* K.
rewrite in_singleton in H2. subst.
tryfalse.
Qed.
(* ********************************************************************** *)
(** * Properties of environment *)
Lemma typ_env_dist: forall E F, typ_env (E & F) = typ_env E & typ_env F.
Proof. rewrite concat_def. intros. gen E. induction F; intros E; autos.
rewrite LibList.app_cons. destruct a. destruct* b.
simpl. rewrite LibList.app_cons. rewrite* <- IHF.
Qed.
Lemma typ_env_dom_subset : forall E, dom (typ_env E) \c dom E.
Proof. intros. induction E.
simpls. apply subset_refl.
destruct a. destruct b.
simpls. repeat(rewrite cons_to_push). repeat(rewrite dom_push).
eapply subset_union_2. eapply subset_refl. eauto.
simpls. rewrite cons_to_push. rewrite dom_push.
eapply subset_trans. eauto. apply subset_union_weak_r.
Qed.
Lemma typ_env_binds: forall E U x, ok E ->
binds x (bind_sub U) (typ_env E) ->
binds x (bind_sub U) E.
Proof. intros. induction E.
simpls. autos.
destruct a. destruct b.
simpls. rewrite cons_to_push in *. destruct (binds_push_inv H0).
destructs H1. inversions H2. apply binds_push_eq.
destructs H1. apply* binds_push_neq.
simpls. rewrite cons_to_push in *. apply binds_push_neq.
autos* ok_concat_inv_l.
intros Ha. subst. lets: IHE (ok_concat_inv_l H) H0.
rewrite <- concat_empty_r in H. lets: ok_middle_inv_l H.
apply (binds_fresh_inv H1 H2).
Qed.
Lemma typ_env_binds_reverse: forall E U x,
binds x (bind_sub U) E -> binds x (bind_sub U) (typ_env E).
Proof. intros. induction E.
simpl in *. autos.
destruct a. destruct b.
simpls. rewrite cons_to_push in *. destruct (binds_push_inv H).
destructs H0. inversions H1. apply binds_push_eq.
destructs H0. apply* binds_push_neq.
simpls. rewrite cons_to_push in *. apply IHE.
eapply binds_push_neq_inv. exact H.
intros HI. subst. lets: binds_push_eq_inv H. inversion H0.
Qed.
Lemma typ_env_no_var : forall x T E, ~binds x (bind_typ T) (typ_env E).
Proof. intros. intros H. induction E.
simpl in H. rewrite <- empty_def in H. destruct (binds_empty_inv H).
destruct a. destruct b; autos.
simpl in H. apply IHE. rewrite cons_to_push in H. destruct (binds_push_inv H).
destruct H0. subst. inversion H1.
destruct* H0.
Qed.
Lemma typ_env_wft: forall E V, ok E -> wft (typ_env E) V -> wft E V.
Proof. intros. remember (typ_env E) as G. gen E. inductions H0; intros; subst; autos.
eapply wft_var. apply* typ_env_binds.
apply_fresh* wft_all as Y. apply* H1. repeat(rewrite <- cons_to_push). autos.
Qed.
Lemma typ_env_wft_weaken: forall E F G V,
ok (E & F & G) -> wft (E & (typ_env F) & G) V -> wft (E & F & G) V.
Proof. intros. inductions H0; intros; subst; autos.
eapply wft_var. binds_cases H0.
apply binds_concat_left; autos. apply* binds_concat_left_ok.
apply binds_concat_left; autos. apply* binds_concat_right. apply* typ_env_binds.
lets*: ok_concat_inv_r (ok_concat_inv_l H).
apply binds_concat_right. auto.
apply_fresh* wft_all as Y.
assert (HI: ok (E & F & (G & Y ~<: T1 ))).
rewrite concat_assoc. apply* ok_push.
forwards~ HII: (H1 Y). apply HI. rewrite* concat_assoc.
rewrite* <- concat_assoc.
Qed.
Lemma typ_env_wft_reverse: forall E V, wft E V -> wft (typ_env E) V.
Proof. intros. inductions H; autos.
eapply wft_var. apply* typ_env_binds_reverse.
apply_fresh* wft_all as Y. forwards~ HI: (H1 Y).
rewrite typ_env_dist in HI. rewrite single_def in *. autos.
Qed.
Lemma typ_env_okt : forall E,
okt E -> okt (typ_env E).
Proof. intros. induction* E.
destruct a. destruct b; simpl; rewrite cons_to_push in *.
apply okt_sub. apply IHE. lets*: okt_push_sub_inv H.
lets(_ & HI & HII): okt_push_sub_inv H.
autos* typ_env_wft_reverse.
lets(_ & _ & HI): okt_push_sub_inv H.
lets: typ_env_dom_subset E. unfolds subset.
intros Ha. apply HI. autos.
apply IHE. lets*: okt_push_typ_inv H.
Qed.
Lemma typ_env_map : forall E Z P, typ_env (map (subst_tb Z P) E) = map (subst_tb Z P) (typ_env E).
Proof. intros. induction E.
simpl. rewrite <- empty_def. rewrite map_empty. rewrite empty_def. reflexivity.
destruct a. destruct b; simpl.
repeat(rewrite cons_to_push). repeat(rewrite map_push). simpl.
rewrite <- cons_to_push. simpl. rewrite cons_to_push. rewrite* IHE.
repeat(rewrite cons_to_push). repeat(rewrite map_push). simpl.
rewrite <- cons_to_push. simpl. rewrite* IHE.
Qed.
Lemma typ_env_eq : forall E, typ_env (typ_env E) = typ_env E.
Proof. intros. induction E; autos.
destruct a. destruct b; autos.
simpl. rewrite* IHE.
Qed.
(* ********************************************************************** *)
(** ** Regularity of relations *)
(** The subtyping relation is restricted to well-formed objects. *)
Lemma sub_regular : forall E S T,
sub E S T -> okt E /\ wft E S /\ wft E T.
Proof.
induction 1. autos*. autos*. autos*. jauto_set; auto. (* autos* too slow *)
split. autos*. split;
apply_fresh* wft_all as Y;
forwards~: (H1 Y); apply_empty* (@wft_narrow T1).
Qed.
(** The typing relation is restricted to well-formed objects. *)
Lemma typing_regular : forall E e T,
typing E e T -> okt E /\ term e /\ wft E T.
Proof.
induction 1.
splits*.
splits.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_typ_inv.
apply_fresh* term_abs as y.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_typ_inv.
specializes H0 y. destructs~ H0.
pick_fresh y. specializes H0 y. destructs~ H0.
apply* wft_arrow.
forwards*: okt_push_typ_inv.
apply_empty* wft_strengthen.
splits*.
apply_fresh* term_cap as y.
pick_fresh y. specializes H1 y. destructs~ H1.
forwards*: (okt_push_typ_inv H1).
specializes H1 y. destructs~ H1.
pick_fresh y. specializes H1 y. destructs~ H1.
apply* wft_arrow.
apply* typ_env_wft.
apply* typ_env_wft.
rewrite <- (@concat_empty_r bind (typ_env E)).
eapply wft_strengthen. rewrite* concat_empty_r.
splits*. destructs IHtyping1. inversion* H3.
splits.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_sub_inv.
apply_fresh* term_tabs as y.
pick_fresh y. forwards~ K: (H0 y). destructs K.
forwards*: okt_push_sub_inv.
forwards~ K: (H0 y). destructs K. auto.
apply_fresh* wft_all as Y.
pick_fresh y. forwards~ K: (H0 y). destructs K.
forwards*: okt_push_sub_inv.
forwards~ K: (H0 Y). destructs K.
forwards*: okt_push_sub_inv.
splits*; destructs (sub_regular H0).
apply* term_tapp. applys* wft_type.
applys* wft_open T1.
splits*. destructs~ (sub_regular H0).
Qed.
(** The value relation is restricted to well-formed objects. *)
Lemma value_regular : forall t,
value t -> term t.
Proof.
induction 1; autos*.
Qed.
(** The reduction relation is restricted to well-formed objects. *)
Lemma red_regular : forall t t',
red t t' -> term t /\ term t'.
Proof.
induction 1; split; autos* value_regular.
inversions H. pick_fresh y. rewrite* (@subst_ee_intro y).
inversions H. pick_fresh y. rewrite* (@subst_ee_intro y).
inversions H. pick_fresh Y. rewrite* (@subst_te_intro Y).
Qed.
(** Automation *)
Hint Extern 1 (okt ?E) =>
match goal with
| H: sub _ _ _ |- _ => apply (proj31 (sub_regular H))
| H: typing _ _ _ |- _ => apply (proj31 (typing_regular H))
end.
Hint Extern 1 (wft ?E ?T) =>
match goal with
| H: typing E _ T |- _ => apply (proj33 (typing_regular H))
| H: sub E T _ |- _ => apply (proj32 (sub_regular H))
| H: sub E _ T |- _ => apply (proj33 (sub_regular H))
end.
Hint Extern 1 (type ?T) =>
let go E := apply (@wft_type E); auto in
match goal with
| H: typing ?E _ T |- _ => go E
| H: sub ?E T _ |- _ => go E
| H: sub ?E _ T |- _ => go E
end.
Hint Extern 1 (term ?e) =>
match goal with
| H: typing _ ?e _ |- _ => apply (proj32 (typing_regular H))
| H: red ?e _ |- _ => apply (proj1 (red_regular H))
| H: red _ ?e |- _ => apply (proj2 (red_regular H))
end.
(** In parentheses are given the label of the corresponding
lemma in the description of the POPLMark Challenge. *)
(* ********************************************************************** *)
(** * Properties of Subtyping *)
(* ********************************************************************** *)
(** Reflexivity (1) *)
Lemma sub_reflexivity : forall E T,
okt E ->
wft E T ->
sub E T T .
Proof.
introv Ok WI. lets W: (wft_type WI). gen E.
induction W; intros; inversions WI; eauto.
apply_fresh* sub_all as Y.
Qed.
(* ********************************************************************** *)
(** Weakening (2) *)
Lemma sub_weakening : forall E F G S T,
sub (E & G) S T ->
okt (E & F & G) ->
sub (E & F & G) S T.
Proof.
introv Typ. gen F. inductions Typ; introv Ok; auto.
(* case: fvar trans *)
apply* sub_trans_tvar. apply* binds_weaken.
(* case: all *)
apply_fresh* sub_all as Y. apply_ih_bind* H0.
Qed.
Lemma sub_weakening_env : forall E F G S T,
sub (E & typ_env F & G) S T ->
okt (E & F & G) ->
sub (E & F & G) S T.
Proof. intros. inductions H; eauto.
apply* sub_top. apply* typ_env_wft_weaken.
apply* sub_refl_tvar. apply* typ_env_wft_weaken.
apply* sub_trans_tvar. binds_cases H.
apply binds_concat_left; autos. apply* binds_concat_left_ok. eapply ok_concat_inv_l.
eapply ok_from_okt. eauto.
apply binds_concat_left; autos. apply* binds_concat_right. apply* typ_env_binds.
lets*: ok_concat_inv_r (ok_concat_inv_l (ok_from_okt H1)).
apply binds_concat_right. auto.
apply_fresh* sub_all as X. lets: (H1 X).
rewrite <- concat_assoc in H3. rewrite <- concat_assoc.
apply* H3. rewrite concat_assoc. apply* okt_sub.
destructs (sub_regular H). apply* typ_env_wft_weaken.
Qed.
(* ********************************************************************** *)
(** Narrowing and transitivity (3) *)
Section NarrowTrans.
Definition transitivity_on Q := forall E S T,
sub E S Q -> sub E Q T -> sub E S T.
Hint Unfold transitivity_on.
Hint Resolve wft_narrow.
Lemma sub_narrowing_aux : forall Q F E Z P S T,
transitivity_on Q ->
sub (E & Z ~<: Q & F) S T ->
sub E P Q ->
sub (E & Z ~<: P & F) S T.
Proof.
introv TransQ SsubT PsubQ.
inductions SsubT; introv.
apply* sub_top.
apply* sub_refl_tvar.
tests EQ: (X = Z).
lets M: (@okt_narrow Q).
apply (@sub_trans_tvar P).
asserts~ N: (ok (E & Z ~<: P & F)).
lets: ok_middle_inv_r N.
apply~ binds_middle_eq.
apply TransQ.
do_rew* concat_assoc (apply_empty* sub_weakening).
binds_get H. autos*.
apply* (@sub_trans_tvar U). binds_cases H; auto.
apply* sub_arrow.
apply_fresh* sub_all as Y. apply_ih_bind* H0.
Qed.
Lemma sub_transitivity : forall Q,
transitivity_on Q.
Proof.
intro Q. introv SsubQ QsubT. asserts* W: (type Q).
gen E S T. set_eq Q' EQ: Q. gen Q' EQ.
induction W; intros Q' EQ E S SsubQ;
induction SsubQ; try discriminate; inversions EQ;
intros T QsubT; inversions keep QsubT;
eauto 4 using sub_trans_tvar.
(* case: all / top -> only needed to fix well-formedness,
by building back what has been deconstructed too much *)
assert (sub E (typ_all S1 S2) (typ_all T1 T2)).
apply_fresh* sub_all as y.
autos*.
(* case: all / all *)
apply_fresh sub_all as Y. autos*.
applys~ (H0 Y). lets: (IHW T1).
apply_empty* (@sub_narrowing_aux T1).
Qed.
Lemma sub_narrowing : forall Q E F Z P S T,
sub E P Q ->
sub (E & Z ~<: Q & F) S T ->
sub (E & Z ~<: P & F) S T.
Proof.
intros.
apply* sub_narrowing_aux.
apply* sub_transitivity.
Qed.
End NarrowTrans.
(* ********************************************************************** *)
(** Type substitution preserves subtyping (10) *)
Lemma sub_through_subst_tt : forall Q E F Z S T P,
sub (E & Z ~<: Q & F) S T ->
sub E P Q ->
sub (E & map (subst_tb Z P) F) (subst_tt Z P S) (subst_tt Z P T).
Proof.
introv SsubT PsubQ.
inductions SsubT; introv; simpl subst_tt.
apply* sub_top.
case_var.
apply* sub_reflexivity.
apply* sub_reflexivity.
inversions H0. binds_cases H3.
apply* (@wft_var U).
apply* (@wft_var (subst_tt Z P U)). unsimpl_map_bind*.
case_var.
apply (@sub_transitivity Q).
apply_empty* sub_weakening.
rewrite* <- (@subst_tt_fresh Z P Q).
binds_get H. autos*.
apply* (@notin_fv_wf E).
apply* (@sub_trans_tvar (subst_tt Z P U)).
rewrite* (@map_subst_tb_id E Z P).
binds_cases H; unsimpl_map_bind*.
apply* sub_arrow.
apply_fresh* sub_all as X.
unsimpl (subst_tb Z P (bind_sub T1)).
do 2 rewrite* subst_tt_open_tt_var.
apply_ih_map_bind* H0.
Qed.
(* ********************************************************************** *)
(** * Properties of Typing *)
(* ********************************************************************** *)
(** Weakening (5) *)
Lemma typing_weakening : forall E F G e T,
typing (E & G) e T ->
okt (E & F & G) ->
typing (E & F & G) e T.
Proof.
introv Typ. gen F. inductions Typ; introv Ok.
apply* typing_var. apply* binds_weaken.
apply_fresh* typing_abs as x. forwards~ K: (H x).
apply_ih_bind (H0 x); eauto.
apply_fresh* typing_cap as x. repeat(rewrite typ_env_dist in *).
assert (HI: okt(typ_env E & typ_env F & typ_env G)).
apply typ_env_okt in Ok. repeat(rewrite typ_env_dist in Ok). autos.
lets: H1 x. rewrite <- concat_assoc in H2. rewrite <- concat_assoc.
apply* H2. rewrite concat_assoc. apply okt_typ. auto.
apply* wft_weaken. forwards~: (H0 x). destructs (typing_regular H3).
rewrite <- (@concat_empty_r bind (typ_env E & typ_env G)).
eapply wft_strengthen. rewrite concat_empty_r. eauto.
unfolds. intros HII. repeat(rewrite dom_concat in HII).
assert (Ha: x \notin dom E \u dom F \u dom G) by autos. apply Ha.
repeat(rewrite in_union in *). rewrite or_assoc in HII. branches HII.
branch 1. lets*: typ_env_dom_subset E.
branch 2. lets*: typ_env_dom_subset F.
branch 3. lets*: typ_env_dom_subset G.
apply* typing_app.
apply_fresh* typing_tabs as X. forwards~ K: (H X).
apply_ih_bind (H0 X); eauto.
apply* typing_tapp. apply* sub_weakening.
apply* typing_sub. apply* sub_weakening.
Qed.
Lemma typing_weakening_env : forall E F G e T,
typing (E & (typ_env F) & G) e T ->
okt (E & F & G) ->
typing (E & F & G) e T.
Proof. intros. inductions H.
apply* typing_var. destruct (binds_concat_inv H0).
apply* binds_concat_right.
destruct H2. apply* binds_concat_left. destruct (binds_concat_inv H3).
destruct (typ_env_no_var _ H4). destruct H4.
apply* binds_concat_left_ok. lets*: ok_concat_inv_l (ok_from_okt H1).
apply_fresh* typing_abs as x. forwards~ K: (H x).
apply_ih_bind (H0 x); eauto.
destruct (typing_regular K). apply okt_typ; autos.
lets(_ & HI & _): okt_push_typ_inv H2.
apply* typ_env_wft_weaken.
apply_fresh* typing_cap as x. repeat(rewrite typ_env_dist in *).
rewrite typ_env_eq in H0. apply* H0.
apply* typing_app.
apply_fresh* typing_tabs as X. forwards~ K: (H X).
apply_ih_bind (H0 X); eauto. apply* okt_sub.
eapply typ_env_wft_weaken; autos. destructs (typing_regular K).
eapply wft_from_okt_sub; eauto.
apply* typing_tapp. apply* sub_weakening_env.
eapply typing_sub. apply* IHtyping. apply* sub_weakening_env.
Qed.
(* ********************************************************************** *)
(** Strengthening (6) *)
Lemma sub_strengthening : forall x U E F S T,
sub (E & x ~: U & F) S T ->
sub (E & F) S T.
Proof.
intros x U E F S T SsubT.
inductions SsubT; introv; autos* wft_strengthen.
(* case: fvar trans *)
apply* (@sub_trans_tvar U0). binds_cases H; autos*.
(* case: all *)
apply_fresh* sub_all as X. apply_ih_bind* H0.
Qed.
Lemma sub_strengthening_env : forall E S T,
sub E S T -> sub (typ_env E) S T.
Proof. intros. inductions H; autos.
apply sub_top. apply* typ_env_okt. apply* typ_env_wft_reverse.
apply sub_refl_tvar. apply* typ_env_okt. apply* typ_env_wft_reverse.
eapply sub_trans_tvar. eapply typ_env_binds_reverse. eauto. auto.
apply sub_all with L. auto. intros. forwards~ : H1 X.
rewrite typ_env_dist in H3.
replace (typ_env (X ~<: T1)) with (X ~<: T1) in H3 by (rewrite* single_def).
auto.
Qed.
(************************************************************************ *)
(** Preservation by Type Narrowing (7) *)
Lemma typing_narrowing : forall Q E F X P e T,
sub E P Q ->
typing (E & X ~<: Q & F) e T ->
typing (E & X ~<: P & F) e T.
Proof.
introv PsubQ Typ. gen_eq E': (E & X ~<: Q & F). gen E F.
inductions Typ; introv PsubQ EQ; subst; simpl.
binds_cases H0; apply* typing_var.
apply_fresh* typing_abs as y. apply_ih_bind* H0.
apply_fresh* typing_cap as y. repeat(rewrite typ_env_dist in *).
rewrite <- concat_assoc. lets: H1 y. rewrite <- concat_assoc in H2.
replace (typ_env (X ~<: P)) with (X ~<: P) by (rewrite* single_def).
apply* H2. apply* sub_strengthening_env.
replace (typ_env (X ~<: Q)) with (X ~<: Q) by (rewrite* single_def). auto.
apply* typing_app.
apply_fresh* typing_tabs as Y. apply_ih_bind* H0.
apply* typing_tapp. apply* (@sub_narrowing Q).
apply* typing_sub. apply* (@sub_narrowing Q).
Qed.
(************************************************************************ *)
(** Preservation by Term Substitution (8) *)
Lemma open_tt_fv_subset: forall k U T,
fv_tt T \c fv_tt (open_tt_rec k U T).
Proof. intros. gen k. induction T; intros; simpls.
apply subset_empty_l.
apply subset_empty_l.
apply subset_refl.
apply* subset_union_2.
apply* subset_union_2.
Qed.
Lemma open_te_fv_subset: forall k U e,
fv_te e \c fv_te (open_te_rec k U e).
Proof. intros. gen k. induction e; intros; simpls.
apply subset_empty_l.
apply subset_empty_l.
apply* subset_union_2. apply open_tt_fv_subset.
apply* subset_union_2. apply open_tt_fv_subset.
apply* subset_union_2.
autos.
apply* subset_union_2. apply open_tt_fv_subset.
apply* subset_union_2. apply open_tt_fv_subset.
Qed.
Lemma open_ee_fv_subset: forall k u e,
fv_ee e \c fv_ee (open_ee_rec k u e).
Proof. intros. gen k. induction e; intros; simpls.
apply subset_empty_l.
apply subset_refl.
autos.
autos.
apply* subset_union_2.
autos.
autos.
Qed.
Lemma open_ee_te_fv_eq: forall k U e,
fv_ee e = fv_ee (open_te_rec k U e).
Proof. intros. gen k. induction e; intros; simpls; autos.
rewrites (IHe1 k).
rewrites (IHe2 k).
reflexivity.
Qed.
Lemma open_te_ee_fv_subset: forall k u e,
fv_te e \c fv_te (open_ee_rec k u e).
Proof. intros. gen k. induction e; intros; simpls; autos.
apply subset_empty_l.
apply subset_empty_l.
apply* subset_union_2. apply subset_refl.
apply* subset_union_2. apply subset_refl.
apply* subset_union_2.
apply* subset_union_2. apply subset_refl.
apply* subset_union_2. apply subset_refl.
Qed.
Lemma open_tt_tt_fv_subset: forall k T1 T2,
fv_tt (open_tt_rec k T1 T2) \c fv_tt T1 \u fv_tt T2.
Proof. intros. gen k. induction T2; intros; simpls; autos.
apply subset_empty_l.
destruct (prop_degeneracy (k = n)).
(* k = n*)
apply is_True_inv in H. rewrite* If_l. apply subset_union_weak_l.
(* k != n*)
apply is_False_inv in H. rewrite* If_r. simpl. apply subset_empty_l.
apply subset_union_weak_r.
lets*: (subset_union_2 (IHT2_1 k) (IHT2_2 k)).
rewrite union_assoc in H.
rewrite union_comm in H.
replace ((fv_tt T1 \u fv_tt T2_1) \u fv_tt T1) with (fv_tt T1 \u fv_tt T2_1) in H.
rewrite union_assoc. rewrite union_comm. autos.
rewrite union_comm. rewrite <- union_assoc.
rewrite union_same. reflexivity.
lets*: (subset_union_2 (IHT2_1 k) (IHT2_2 (S k))).
rewrite union_assoc in H.
rewrite union_comm in H.
replace ((fv_tt T1 \u fv_tt T2_1) \u fv_tt T1) with (fv_tt T1 \u fv_tt T2_1) in H.
rewrite union_assoc. rewrite union_comm. autos.
rewrite union_comm. rewrite <- union_assoc.
rewrite union_same. reflexivity.
Qed.
Lemma wft_fv_tt: forall E T,
wft E T -> fv_tt T \c dom E.
Proof.
intros. inductions H; simpls.
apply subset_empty_l.
lets: get_some_inv (binds_get H). unfolds subset. intros.
rewrite in_singleton in H1. rewrite* H1.
rewrite <- union_same. apply* subset_union_2.
rewrite <- union_same. apply* subset_union_2.
pick_fresh X. forwards~ HI: (H1 X). simpls. rewrite dom_concat in HI.
rewrite dom_single in HI. eapply subset_strengthen. eapply subset_trans.
apply open_tt_fv_subset. exact HI. autos.
Qed.
Lemma subtyping_env_fv : forall E S T, sub E S T ->
fv_tt S \c dom E /\ fv_tt T \c dom E.
Proof. intros. inductions H; simpls.
splits. apply* wft_fv_tt. apply subset_empty_l.
lets*: wft_fv_tt H0.
destructs IHsub. splits*. lets: get_some_inv (binds_get H).
unfolds. intros. rewrite in_singleton in H4. subst*.
destructs IHsub1. destructs IHsub2.
splits; rewrite <- union_same; apply* subset_union_2.
destruct IHsub. pick_fresh X. forwards~ HI: H1 X.
rewrite dom_concat in HI. rewrite dom_single in HI. destruct HI.
splits.
rewrite <- union_same. apply* subset_union_2. apply subset_strengthen with X; autos.
eapply subset_trans. apply open_tt_fv_subset. eauto.
rewrite <- union_same. apply* subset_union_2. apply subset_strengthen with X; autos.
eapply subset_trans. apply open_tt_fv_subset. eauto.
Qed.
Lemma typing_env_fv : forall E e T, typing E e T ->
fv_ee e \c dom E /\ fv_te e \c dom E /\ fv_tt T \c dom E.
Proof. intros. inductions H; substs.
(* var *)
forwards~ HI: get_some_inv (binds_get H0).
simpls. splits.
unfolds subset. intros. rewrite in_singleton in *. subst*.
apply subset_empty_l.
apply* wft_fv_tt.
(* abs *)
pick_fresh x. forwards~ (HI & HII & HIII): H0 x. simpls.
rewrite dom_concat in *. rewrite dom_single in *.
unfolds open_ee. unfolds open_te.
assert (Ha: fv_tt V \c dom E).
forwards~ Htyp: (H x).
destruct (typing_regular Htyp) as [He _].
apply subset_strengthen with x; autos.
rewrite <- dom_single with (v:= bind_typ V). rewrite <- dom_concat.
apply* wft_fv_tt; autos.
splits.
apply* subset_strengthen. eapply subset_trans. apply open_ee_fv_subset. exact HI.
rewrite <- union_same. apply* subset_union_2. apply* subset_strengthen.
eapply subset_trans. apply open_te_ee_fv_subset. exact HII.
rewrite <- union_same. apply* subset_union_2. apply* subset_strengthen.
(* cap *)
pick_fresh x. forwards~ (HI & HII & HIII): H1 x. simpls.
rewrite dom_concat in *. rewrite dom_single in *. unfolds open_ee.
assert (Ha: fv_tt V \c dom (typ_env E)).
forwards~ Htemp: (H0 x).
destruct (typing_regular Htemp) as [He _].
apply* wft_fv_tt.
splits.
apply* subset_strengthen. eapply subset_trans. apply open_ee_fv_subset.
eapply subset_trans. eauto. apply subset_union_2.
apply typ_env_dom_subset. apply subset_refl.
rewrite <- union_same. apply* subset_union_2.
eapply subset_trans. exact Ha. apply typ_env_dom_subset.
apply* subset_strengthen. eapply subset_trans. apply open_te_ee_fv_subset.
eapply subset_trans. eauto. apply subset_union_2.
apply typ_env_dom_subset. apply subset_refl.
rewrite <- union_same. apply* subset_union_2.
eapply subset_trans. eauto. apply typ_env_dom_subset.
apply* subset_strengthen. eapply subset_trans. eauto.
apply subset_union_2. apply typ_env_dom_subset. apply subset_refl.
(* app *)
forwards(Ma & Mb & Mc): IHtyping1. forwards(Na & Nb & Nc): IHtyping2.
simpls. splits.
rewrite <- union_same. apply* subset_union_2.
rewrite <- union_same. apply* subset_union_2.
eapply subset_trans. apply subset_union_weak_r. exact Mc.
(* tabs *)
simpls. pick_fresh X. forwards~ (HI & HII & HIII) : (H0 X).
rewrite dom_concat in *. rewrite dom_single in *.
unfolds open_ee. unfolds open_te. unfolds open_tt.
rewrite <- open_ee_te_fv_eq in HI.
assert (Ha: fv_tt V \c dom E).
forwards~ Htyp: (H X).
destruct (typing_regular Htyp) as [He _].
apply subset_strengthen with X; autos.
rewrite <- dom_single with (v:= bind_sub V). rewrite <- dom_concat.
apply* wft_fv_tt; autos.
splits.
eapply subset_strengthen. exact HI. autos.
rewrite <- union_same. apply* subset_union_2. apply subset_strengthen with X; autos.
eapply subset_trans. apply open_te_fv_subset. exact HII.
rewrite <- union_same. apply* subset_union_2. apply subset_strengthen with X; autos.
eapply subset_trans. apply open_tt_fv_subset. exact HIII.
(* tapp *)
lets(HI & HII & HIII): IHtyping. simpls.
assert (Ha: fv_tt T \c dom E) by (lets*: subtyping_env_fv H0).
splits; autos.
rewrite <- union_same. apply* subset_union_2.
eapply subset_trans. eapply open_tt_tt_fv_subset.
rewrite <- union_same. apply* subset_union_2.
eapply subset_trans. apply subset_union_weak_r. apply HIII.
(* sub *)
destructs IHtyping. splits; autos. lets*: subtyping_env_fv H0.
Qed.
Lemma typing_cap_closed_trm : forall e T E F x U V,
typing (E & x ~: U & F) (trm_cap V e) T -> x \notin fv_ee e.
Proof. intros. inductions H; eauto.
repeat(rewrite typ_env_dist in *). lets: (ok_middle_inv (ok_from_okt H)).
replace (typ_env (x ~: U)) with (@empty bind) in *
by (rewrite single_def; simpl; rewrite* empty_def).
rewrite concat_empty_r in *.
assert (HI: typing (E & F) (trm_cap V e) (typ_arrow V T1)).
apply* typing_cap. rewrite* typ_env_dist.
destructs (typing_env_fv HI). simpls. rewrite <- notin_union in H2.
unfolds subset. intros Hc. apply H2. rewrite <- dom_concat. autos.
Qed.
Lemma typing_cap_closed_typ : forall e V T,
typing empty (trm_cap V e) (typ_arrow V T) ->
forall X, X \notin fv_te e /\ X \notin fv_tt V /\ X \notin fv_tt T.
Proof. intros. lets(_ & HI & HII): typing_env_fv H.
simpls. rewrite dom_empty in *. unfolds subset.
specialize HI with X. specialize HII with X. rewrite in_union in *.
splits; intros Hin; rewrite* <- (@in_empty var X).
Qed.
Lemma typing_through_subst_ee : forall U E F x T e u,
typing (E & x ~: U & F) e T ->
typing E u U ->
typing (E & F) (subst_ee x u e) T.
Proof.
introv TypT TypU. inductions TypT; introv; simpl.
case_var.
binds_get H0. apply_empty* typing_weakening.
binds_cases H0; apply* typing_var.
apply_fresh* typing_abs as y.
rewrite* subst_ee_open_ee_var.
apply_ih_bind* H0.
apply typing_cap with L; eauto.
rewrite* subst_ee_fresh. repeat(rewrite typ_env_dist in *).
replace (typ_env (x ~: U)) with (@empty bind) in *
by (rewrite single_def; simpl; rewrite* empty_def).
rewrite concat_empty_r in *. autos.
eapply typing_cap_closed_trm. eapply typing_cap; eauto.
apply* typing_app.
apply_fresh* typing_tabs as Y.
rewrite* subst_ee_open_te_var.
apply_ih_bind* H0.
apply* typing_tapp. apply* sub_strengthening.
apply* typing_sub. apply* sub_strengthening.
Qed.
(************************************************************************ *)
(** Preservation by Type Substitution (11) *)
Lemma typing_through_subst_te : forall Q E F Z e T P,
typing (E & Z ~<: Q & F) e T ->
sub E P Q ->
typing (E & map (subst_tb Z P) F) (subst_te Z P e) (subst_tt Z P T).
Proof.
introv Typ PsubQ.
inductions Typ; introv; simpls subst_tt; simpls subst_te.
apply* typing_var. rewrite* (@map_subst_tb_id E Z P).
binds_cases H0; unsimpl_map_bind*.
apply_fresh* typing_abs as y.
unsimpl (subst_tb Z P (bind_typ V)).
rewrite* subst_te_open_ee_var.
apply_ih_map_bind* H0.
apply_fresh* typing_cap as y.
unsimpl (subst_tb Z P (bind_typ V)).
rewrite* subst_te_open_ee_var. rewrite typ_env_dist.
rewrite typ_env_map. apply_ih_map_bind* H1.
repeat(rewrite typ_env_dist).
replace (typ_env (Z ~<: Q)) with (Z ~<:Q) by (rewrite* single_def).
rewrite concat_assoc. reflexivity.
apply* sub_strengthening_env.
apply* typing_app.
apply_fresh* typing_tabs as Y.
unsimpl (subst_tb Z P (bind_sub V)).
rewrite* subst_te_open_te_var.
rewrite* subst_tt_open_tt_var.
apply_ih_map_bind* H0.
rewrite* subst_tt_open_tt. apply* typing_tapp.
apply* sub_through_subst_tt.
apply* typing_sub. apply* sub_through_subst_tt.
Qed.
(* ********************************************************************** *)
(** * Preservation *)
(* ********************************************************************** *)
(** Inversions for Typing (13) *)
Lemma typing_inv_abs : forall E S1 e1 T,
typing E (trm_abs S1 e1) T ->
forall U1 U2, sub E T (typ_arrow U1 U2) ->
sub E U1 S1
/\ exists S2, exists L, forall x, x \notin L ->
typing (E & x ~: S1) (e1 open_ee_var x) S2 /\ sub E S2 U2.
Proof.
introv Typ. gen_eq e: (trm_abs S1 e1). gen S1 e1.
induction Typ; intros S1 b1 EQ U1 U2 Sub; inversions EQ.
inversions* Sub. autos* (@sub_transitivity T).
Qed.
Lemma typing_inv_cap : forall E S1 e1 T,
typing E (trm_cap S1 e1) T ->
forall U1 U2, sub E T (typ_arrow U1 U2) ->
sub E U1 S1
/\ exists S2, exists L, forall x, x \notin L ->
typing (E & x ~: S1) (e1 open_ee_var x) S2 /\ sub E S2 U2.
Proof.
introv Typ. gen_eq e: (trm_cap S1 e1). gen S1 e1.
induction Typ; intros S1 b1 EQ U1 U2 Sub; inversions EQ.
inversions* Sub. splits*. exists T1.
let L1 := gather_vars in exists L1. split; auto.
rewrite <- (@concat_empty_l bind E).
apply typing_weakening_env. rewrite* concat_empty_l.
rewrite concat_empty_l. apply* okt_typ.
autos* (@sub_transitivity T).
Qed.
Lemma typing_inv_tabs : forall E S1 e1 T,
typing E (trm_tabs S1 e1) T ->
forall U1 U2, sub E T (typ_all U1 U2) ->
sub E U1 S1
/\ exists S2, exists L, forall X, X \notin L ->
typing (E & X ~<: U1) (e1 open_te_var X) (S2 open_tt_var X)
/\ sub (E & X ~<: U1) (S2 open_tt_var X) (U2 open_tt_var X).
Proof.
intros E S1 e1 T H. gen_eq e: (trm_tabs S1 e1). gen S1 e1.
induction H; intros S1 b EQ U1 U2 Sub; inversion EQ.
inversions Sub. splits. auto.
exists T1. let L1 := gather_vars in exists L1.
intros Y Fr. splits.
apply_empty* (@typing_narrowing S1). auto.
autos* (@sub_transitivity T).
Qed.
(* ********************************************************************** *)
(** Preservation Result (20) *)
Lemma preservation_result : preservation.
Proof.
introv Typ. gen e'. induction Typ; introv Red;
try solve [ inversion Red ].
(* case: app *)
inversions Red; try solve [ apply* typing_app ].
destruct~ (typing_inv_abs Typ1 (U1:=T1) (U2:=T2)) as [P1 [S2 [L P2]]].
apply* sub_reflexivity.
pick_fresh X. forwards~ K: (P2 X). destruct K.
rewrite* (@subst_ee_intro X).
apply_empty (@typing_through_subst_ee V).
apply* (@typing_sub S2). apply_empty* sub_weakening.
autos*.
destruct~ (typing_inv_cap Typ1 (U1:=T1) (U2:=T2)) as [P1 [S2 [L P2]]].
apply* sub_reflexivity.
pick_fresh X. forwards~ K: (P2 X). destruct K.
rewrite* (@subst_ee_intro X).
apply_empty (@typing_through_subst_ee V).
apply* (@typing_sub S2). apply_empty* sub_weakening.
autos*.
(* case: tapp *)
inversions Red; try solve [ apply* typing_tapp ].
destruct~ (typing_inv_tabs Typ (U1:=T1) (U2:=T2)) as [P1 [S2 [L P2]]].
apply* sub_reflexivity.
pick_fresh X. forwards~ K: (P2 X). destruct K.
rewrite* (@subst_te_intro X).
rewrite* (@subst_tt_intro X).
(* todo: apply empty here *)
asserts_rewrite (E = E & map (subst_tb X T) empty).
rewrite map_empty. rewrite~ concat_empty_r.
apply* (@typing_through_subst_te T1).
rewrite* concat_empty_r.
(* case sub *)
apply* typing_sub.
Qed.
(* ********************************************************************** *)
(** * Progress *)
(* ********************************************************************** *)
(** Canonical Forms (14) *)
Lemma canonical_form_abs : forall t U1 U2,
value t -> typing empty t (typ_arrow U1 U2) ->
exists V, exists e1, t = trm_abs V e1 \/ t = trm_cap V e1.
Proof.
introv Val Typ. gen_eq E: (@empty bind).
gen_eq T: (typ_arrow U1 U2). gen U1 U2.
induction Typ; introv EQT EQE;
try solve [ inversion Val | inversion EQT | eauto ].
subst. inversion H.
false (binds_empty_inv H0).
inversions H0. forwards*: IHTyp.
Qed.
Lemma canonical_form_tabs : forall t U1 U2,
value t -> typing empty t (typ_all U1 U2) ->
exists V, exists e1, t = trm_tabs V e1.
Proof.
introv Val Typ. gen_eq E: (@empty bind).
gen_eq T: (typ_all U1 U2). gen U1 U2.
induction Typ; introv EQT EQE;
try solve [ inversion Val | inversion EQT | eauto ].
subst. inversion H.
false* binds_empty_inv.
inversions H0. forwards*: IHTyp.
Qed.
(* ********************************************************************** *)
(** Progress Result (16) *)
Lemma progress_result : progress.
Proof.
introv Typ. gen_eq E: (@empty bind). lets Typ': Typ.
induction Typ; intros EQ; subst.
(* case: var *)
false* binds_empty_inv.
(* case: abs *)
left*.
(* case: cap *)
left*.
(* case: app *)
right. destruct* IHTyp1 as [Val1 | [e1' Rede1']].
destruct* IHTyp2 as [Val2 | [e2' Rede2']].
destruct (canonical_form_abs Val1 Typ1) as [S [e3 EQ]].
destruct EQ; subst; exists* (open_ee e3 e2).
(* case: tabs *)
left*.
(* case: tapp *)
right. destruct~ IHTyp as [Val1 | [e1' Rede1']].
destruct (canonical_form_tabs Val1 Typ) as [S [e3 EQ]].
subst. exists* (open_te e3 T).
exists* (trm_tapp e1' T).
(* case: sub *)
autos*.
Qed.
|
import algebra.ring
import commutative_algebra.bool_field
import tactic.pi_instances tactic.squeeze
universes u v
variables (U : Type u) (V : Type v)
def ring_of_bool_subsets := (U → bool)
instance : comm_ring (ring_of_bool_subsets U) :=
by { unfold ring_of_bool_subsets, pi_instance, }
lemma val_zero (x : U) : (0 : ring_of_bool_subsets U) x = ff := rfl
lemma val_one (x : U) : (1 : ring_of_bool_subsets U) x = tt := rfl
lemma val_neg (f : ring_of_bool_subsets U) (x : U) : (- f) x = f x := rfl
lemma val_add (f g : ring_of_bool_subsets U) (x : U) : (f + g) x = bxor (f x) (g x) := rfl
lemma val_mul (f g : ring_of_bool_subsets U) (x : U) : (f * g) x = band (f x) (g x) := rfl
open classical
def ring_of_subsets := set V
namespace ring_of_subsets
local attribute [instance] classical.dec
instance : has_zero (ring_of_subsets V) := ⟨(∅ : set V)⟩
instance : has_one (ring_of_subsets V) := ⟨(set.univ : set V)⟩
instance : has_neg (ring_of_subsets V) := ⟨(λ s, s)⟩
instance : has_add (ring_of_subsets V) :=
⟨(λ s t : set V, (s \ t ∪ t \ s : set V))⟩
instance : has_mul (ring_of_subsets V) :=
⟨(λ s t : set V, (s ∩ t : set V))⟩
instance : has_mem V (ring_of_subsets V) :=
⟨λ (x : V) (s : set V), x ∈ s⟩
variable (x : V)
lemma mem_zero_iff : x ∈ (0 : ring_of_subsets V) ↔ false :=
(iff_false _).mpr (set.not_mem_empty x)
lemma mem_one_iff : x ∈ (1 : ring_of_subsets V) ↔ true :=
(iff_true _).mpr (set.mem_univ x)
lemma mem_neg_iff (s : ring_of_subsets V) : x ∈ (- s) ↔ x ∈ s :=
by { change x ∈ s ↔ x ∈ s, refl }
lemma mem_add_iff (s t : ring_of_subsets V) :
x ∈ s + t ↔ ((x ∈ s ∧ x ∉ t) ∨ (x∈ t ∧ x ∉ s)) :=
by { unfold has_add.add, rw[set.mem_union,set.mem_diff,set.mem_diff],}
lemma mem_mul_iff (s t : ring_of_subsets V) :
x ∈ s * t ↔ (x ∈ s ∧ x ∈ t) :=
by { unfold has_mul.mul, rw[set.mem_inter_iff], }
instance : comm_ring (ring_of_subsets V) := {
zero := (0 : ring_of_subsets V), one := 1,
neg := has_neg.neg, add := (+), mul := (*),
add_zero := λ a, by { ext x, rw[mem_add_iff, mem_zero_iff], simp },
zero_add := λ a, by { ext x, rw[mem_add_iff, mem_zero_iff], simp },
add_left_neg := λ a, begin
ext x, rw[mem_add_iff, mem_neg_iff, mem_zero_iff],
by_cases ha : x ∈ a;
simp[ha]
end,
add_comm := λ a b, begin
ext x, rw[mem_add_iff, mem_add_iff],
by_cases ha : x ∈ a; by_cases hb : x ∈ b;
simp[ha,hb]
end,
add_assoc := λ a b c, begin
ext x, repeat { rw[mem_add_iff] },
by_cases ha : x ∈ a; by_cases hb : x ∈ b; by_cases hc : x ∈ c;
simp[ha,hb,hc],
end,
mul_one := λ a, by { ext x, rw[mem_mul_iff, mem_one_iff], simp },
one_mul := λ a, by { ext x, rw[mem_mul_iff, mem_one_iff], simp },
mul_comm := λ a b, begin
ext x, rw[mem_mul_iff, mem_mul_iff, and_comm],
end,
mul_assoc := λ a b c, begin
ext x, repeat { rw[mem_mul_iff] }, rw[and_assoc],
end,
left_distrib := λ a b c, begin
ext x, rw[mem_add_iff, mem_mul_iff, mem_add_iff, mem_mul_iff, mem_mul_iff],
by_cases ha : x ∈ a; by_cases hb : x ∈ b;
simp[ha,hb]
end,
right_distrib := λ a b c, begin
ext x, rw[mem_add_iff, mem_mul_iff, mem_add_iff, mem_mul_iff, mem_mul_iff],
by_cases ha : x ∈ a; by_cases hb : x ∈ b;
simp[ha,hb]
end
}
variable {V}
noncomputable def indicator : ring_of_subsets V →+* ring_of_bool_subsets V := {
to_fun := λ s x, (if x ∈ s then tt else ff),
map_zero' := by { ext x, rw[mem_zero_iff V x,if_false], refl },
map_add' := λ s t, by {
ext x,
rw[mem_add_iff, val_add],
by_cases hs : x ∈ s; by_cases ht : x ∈ t; simp[hs,ht],
},
map_one' := by { ext x, rw[mem_one_iff V x,if_true], refl },
map_mul' := λ s t, by {
ext x,
rw[mem_mul_iff, val_mul],
by_cases hs : x ∈ s; by_cases ht : x ∈ t;
simp[hs, ht],
}
}
end ring_of_subsets
namespace ring_of_bool_subsets
variable {U}
def support : (ring_of_bool_subsets U) →+* (ring_of_subsets U) := {
to_fun := λ (f : U → bool), (λ x, f x = tt),
map_zero' := begin
ext x, rw[set.mem_def, val_zero, ring_of_subsets.mem_zero_iff], simp only [],
end,
map_add' := λ s t, begin
ext x, rw[ring_of_subsets.mem_add_iff],
repeat { rw[set.mem_def] },
rw[val_add],
cases (s x); cases (t x); rw[bxor]; exact dec_trivial,
end,
map_one' := begin
ext x, rw[set.mem_def, val_one, ring_of_subsets.mem_one_iff, eq_self_iff_true]
end,
map_mul' := λ s t, begin
ext x, rw[ring_of_subsets.mem_mul_iff],
repeat { rw[set.mem_def] },
rw[val_mul],
cases (s x); cases (t x); rw[band]; exact dec_trivial,
end,
}
end ring_of_bool_subsets
namespace ring_of_subsets
noncomputable def bool_equiv : (ring_of_subsets U) ≃ (ring_of_bool_subsets U) := {
to_fun := @indicator U,
inv_fun := @ring_of_bool_subsets.support U,
left_inv := λ s, begin
haveI := classical.dec,
rw[ring_of_bool_subsets.support,indicator],
ext x,dsimp[has_mem.mem,set.mem],
split_ifs;simp only [h,iff_self,eq_self_iff_true],
end,
right_inv := λ f, begin
ext x,
dsimp[indicator,ring_of_bool_subsets.support,has_mem.mem,set.mem],
cases f x; simp,
end
}
end ring_of_subsets
|
{-# OPTIONS --cubical #-}
module cubical where
open import Cubical.Core.Primitives
--- Sharp of a type: you can raise any term of type A to the sharp to get a term of type sharp-A
data ♯_ {ℓ : Level} (A : Type ℓ) : Type ℓ where
_↑♯ : A → ♯ A
-- do we need a duplicate of sharp-on-Types for crisp types?
-- data ♯c_ {@♭ ℓ : Level} (@♭ A : Type ℓ) : Type ℓ where
-- _↑♯c : A → ♯c A
-- having something crisply in sharp-A gets you something in a
-- the constructor is also the computation rule
_↓♯ : {@♭ ℓ : Level} {@♭ A : Type ℓ} (@♭ x : ♯ A) → A
(x ↑♯) ↓♯ = x
lower-then-upper : {@♭ ℓ : Level} {@♭ A : Type ℓ} (@♭ x : ♯ A) → (x ↓♯) ↑♯ ≡ x
lower-then-upper x = λ i → x
--- I is the interval pre-type
--- i0 : I
--- i1 : I
|
module Main
import System
-- Copied from: https://github.com/edwinb/Blodwen/blob/master/tests/Main.idr
%default covering
tests : List String
tests =
["test001", "test002"]
chdir : String -> IO Bool
chdir dir = do
ok <- foreign FFI_C "chdir" (String -> IO Int) dir
pure (ok == 0)
fail : String -> IO ()
fail err = do
putStrLn err
exitWith (ExitFailure 1)
runTest : String -> String -> String -> IO Bool
runTest dir prog test = do
chdir (dir ++ "/" ++ test)
putStr $ dir ++ "/" ++ test ++ ": "
system $ "sh ./run " ++ prog ++ " > output"
Right out <- readFile "output"
| Left err => do
print err
pure False
Right exp <- readFile "expected"
| Left err => do
print err
pure False
if (out == exp)
then putStrLn "success"
else putStrLn "FAILURE"
chdir "../.."
pure (out == exp)
main : IO ()
main = do
[_, deps] <- getArgs
| _ => do putStrLn "Usage: runtests [depsPath]"
results <- traverse (runTest "deps" deps) tests
if any not results
then exitWith (ExitFailure 1)
else exitWith ExitSuccess
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
Declare(SSEUnparser);
_toReal := v -> Cond(IsValue(v), Value(TReal, v.v), tcast(TReal, v));
@Value := @.cond(IsValue);
@TInt := @.cond(x->IsIntT(x.t));
@TReal := @.cond(x->IsRealT(x.t));
@TRealInt := @.cond(x->IsIntT(x.t) or IsRealT(x.t));
@_scalar := @.cond(x->IsIntT(x.t) or IsRealT(x.t) or IsPtrT(x.t));
@TVect := @.cond(x->IsVecT(x.t));
@TVectUChar := @.cond(x->IsVecT(x.t) and ObjId(x.t.t)=TUChar);
_isa := self -> self.opts.vector.isa;
_epi_or_px := (self, o) -> When(_isa(self).isFixedPoint or IsOrdT(o.t.t),
"epi" :: self.ctype_suffixval(o.t, _isa(self)),
self.ctype_suffix(o.t, _isa(self)));
_epu_or_px := (self, o) -> When(_isa(self).isFixedPoint,
"epu" :: self.ctype_suffixval(o.t, _isa(self)),
self.ctype_suffix(o.t, _isa(self)));
_epi := (self, o) -> Concat("epi", self.ctype_suffixval(o.t, _isa(self)));
_epu_to_epi := (s) -> Cond( s{[1..3]} = "epu", "epi" :: s{[4..Length(s)]}, s );
Class(SSEUnparser, CMacroUnparserProg, rec(
# -----------------------------
# ISA independent constructs
# -----------------------------
nth := (self, o, i, is) >> self.printf("$1[$2]", [o.loc, o.idx]),
fdiv := (self, o, i, is) >> self.printf("(((double)$1) / ($2))", o.args),
div := (self, o, i, is) >> self.printf("(($1) / ($2))", o.args),
idiv := (self, o, i, is) >> self.printf("(($1) / ($2))", o.args),
# --------------------------------
# ISA constructs, general
# -------------------------------
# This is a general suffix for intrinsics that is determine from the data type
ctype_suffix := (self, t, isa) >> Cond(
t = TVect(T_Int(128), 1), "epi128",
t = TVect(T_UInt(128), 1), "epu128",
t = TVect(T_Real(32), 4) or
t = TVect(T_Real(32), 2) and isa=SSE_2x32f or
t = TVect(TReal, 4) and isa=SSE_4x32f or
t = TVect(TReal, 2) and isa=SSE_2x32f,
"ps",
# no way to create __m64 type directly from floats, have to go thru integers
t = TVect(T_Real(32), 2) and isa=SSE_4x32f or
t = TVect(TReal, 2) and isa=SSE_4x32f,
"ps_half",
t = TVect(TInt, 4) or
t = TVect(T_Int(32), 4) or
t = TVect(TReal, 4) and isa.isFixedPoint,
"epi32",
t = TVect(T_UInt(32), 4), "epu32",
t = TVect(T_Real(64), 2) or
t = TVect(TReal, 2) and isa=SSE_2x64f,
"pd",
t = TVect(TInt, 2) or
t = TVect(T_Int(64), 2) or
t = TVect(TReal, 2) and isa.isFixedPoint,
"epi64",
t = TVect(T_UInt(64), 2), "epu64",
t = TVect(T_Int(16), 8), "epi16",
t = TVect(T_UInt(16), 8), "epu16",
t = TVect(TInt, 8), "epi16",
t = TVect(TReal, 8), "epi16",
t = TVect(T_Int(8), 16), "epi8",
t = TVect(T_UInt(8), 16), "epu8",
t = TVect(TReal, 16), When(isa.isSigned, "epi8", "epu8"),
t = TVect(TInt, 16), "epi8",
t = TVect(TUChar, 16), "epu8",
""
),
mul_suffix := (t,isa) -> Cond(
t = TVect(T_Real(32), 2), "_ps",
t = TVect(T_Real(32), 4), "_ps",
t = TVect(T_Real(64), 2), "_pd",
t = TVect(TReal, 2) and isa = SSE_2x32f, "_ps",
t = TVect(TReal, 2), "_pd",
t = TVect(TInt, 2), "_epi64",
t = TVect(TReal, 4), "_ps",
t = TVect(TInt, 4), "_epi32",
t = TVect(T_Int(32), 4), "lo_epi32",
t = TVect(TReal, 8), "lo_epi16",
t = TVect(TReal, 16), Error("16-way multiplication is not supported"),
t = TVect(TUChar, 16), Error("16-way multiplication is not supported"),
""
),
# This is a general suffix for intrinsics that is determine from the data type
ctype_suffixval := (t, isa) -> Cond(
t = TVect(TReal, 2), "64",
t = TVect(TInt, 4), "32",
t = TVect(TReal, 4), "32",
t = TVect(T_Int(32), 4), "32",
t = TVect(T_UInt(32), 4), "32",
t = TVect(TReal, 8), "16",
t = TVect(TInt, 8), "16",
t = TVect(T_Int(16), 8), "16",
t = TVect(T_UInt(16), 8), "16",
t = TVect(TReal, 16), "8",
t = TVect(TInt, 16), "8",
t = TVect(T_Int(8), 16), "8",
t = TVect(T_UInt(8), 16), "8",
t = TVect(TUChar, 16), "8",
t = TVect(T_Real(32), 4), "32",
t = TVect(T_Real(64), 2), "64",
""
),
# This is the type used for declarations of vector variables
ctype := (self, t, isa) >> Cond(
# NOTE: used for unaligned vector pointers,for single prec, it should be "float"
t in [TReal, TVect(TReal, 1)],
Cond(isa = SSE_2x64f, "double",
isa = SSE_2x64i, "__int64",
isa = SSE_4x32f, "float",
isa = SSE_2x32f, "float",
isa = SSE_4x32i, "__int32",
isa = SSE_8x16i, "short",
isa = SSE_16x8i, Cond(isa.isSigned, "char", "unsigned char"),
isa.ctype),
t = TVect(TReal, 2),
Cond(isa = SSE_2x64f, "__m128d",
isa = SSE_2x64i, "__m128i",
isa = SSE_4x32f, "__m64",
isa = SSE_8x16i, "__int32",
isa = SSE_16x8i, "__int16",
isa = SSE_2x32f, "__m64"),
t = TVect(TReal, 4),
Cond(isa = SSE_4x32f, "__m128",
isa = SSE_2x32f, "__m128",
isa = SSE_4x32i, "__m128i"),
t = TVect(TInt, 2), "__m128i",
t = TVect(TInt, 4), "__m128i",
t = TVect(TInt, 8), "__m128i",
t = TVect(TReal, 8), "__m128i",
t = TVect(TInt, 16), "__m128i",
t = TVect(TUChar, 16), "__m128i",
t = TVect(TReal, 16), "__m128i",
t = TInt,
Cond(isa = SSE_2x64i, "__int64",
isa = SSE_4x32i, "__int32",
isa = SSE_8x16i, "short",
isa = SSE_16x8i, "char",
"int"),
t = TVect(T_Int(128), 1), "__m128i",
t = TVect(T_Int(64), 2), "__m128i",
t = TVect(T_Int(32), 4), "__m128i",
t = TVect(T_Int(16), 8), "__m128i",
t = TVect(T_Int(8), 16), "__m128i",
t = TVect(T_UInt(128), 1), "__m128i",
t = TVect(T_UInt(64), 2), "__m128i",
t = TVect(T_UInt(32), 4), "__m128i",
t = TVect(T_UInt(16), 8), "__m128i",
t = TVect(T_UInt(8), 16), "__m128i",
t = TVect(T_Real(32), 2), "__m64",
t = TVect(T_Real(32), 4), "__m128",
t = TVect(T_Real(64), 2), "__m128d",
Error(self,".ctype doesn't know type ",t)
),
cvalue_suffix := (self, t) >> let( isa := _isa(self), Cond(
(t = TReal and isa in [SSE_2x32f, SSE_4x32f]) or t = T_Real(32), "f",
(t = TReal) or t = T_Real(64), "",
Error(self,".cvalue_suffix doesn't know type ",t)
)),
vhex := (self, o, i, is) >> Print("_mm_set_", _epi(self, o), "(", self.infix(Reversed(o.p), ", "), ")"),
Value := (self, o, i, is) >> let(zero := "0" :: self.cvalue_suffix(TReal), Cond(
o.t = TString, Print(o.v),
o.t = TReal or ObjId(o.t)=T_Real, let(v := When(IsCyc(o.v), ReComplex(Complex(o.v)), Double(o.v)),
When(v<0, Print("(", v, self.cvalue_suffix(o.t), ")"), Print(v, self.cvalue_suffix(o.t)))),
#IsComplexT(o.t),
# Print("COMPLEX(", ReComplex(Complex(o.v)), self.cvalue_suffix(o.t.realType()), ", ",
# ImComplex(Complex(o.v)), self.cvalue_suffix(o.t.realType()), ")"),
IsIntT(o.t) or IsUIntT(o.t),
When(o.v < 0, Print("(", o.v, ")"), Print(o.v)),
ObjId(o.t) = TVect and _isa(self) = SSE_2x32f,
Cond(self.cx.isInside(Value) and Length(self.cx.Value) >= 2, # nested in an array
Print( "{", zero, ", ", zero, ", ", self.infix((o.v), ", "), "}"),
Print("_mm_set_ps(", zero, ", ", zero, ", ", self.infix(Reversed(o.v), ", "), ")")),
ObjId(o.t) = TVect and Length(Set(o.v)) = 1,
Cond(self.cx.isInside(Value) and Length(self.cx.Value) >= 2, # nested in an array
Print("{", self.infix(Replicate(o.t.size, o.v[1]), ", "), "}"),
Print("_mm_set1_", _epi_or_px(self, o), "(", self(o.v[1], i, is), ")")),
ObjId(o.t) = TVect,
Cond(self.cx.isInside(Value) and Length(self.cx.Value) >= 2, # nested in an array
Print( "{", self.infix((o.v), ", "), "}"),
Print("_mm_set_", _epi_or_px(self, o), "(", self.infix(Reversed(o.v), ", "), ")")),
IsArray(o.t),
Print("{", self.infix(o.v, ", "), "}"),
ObjId(o.t) = TSym,
Print("(", self.declare(o.t, [], 0, 0), ") ", o.v),
o.t = TBool, Print(When(o.v = true, "1", "0")),
Inherited(o, i, is)
)),
vpack := (self, o, i, is) >> let(
sfx := _epi_or_px(self, o),
Print("_mm_set_", sfx, "(", self.infix(Reversed(o.args), ", "), ")")),
vdup := (self, o, i, is) >> let(
sfx := _epi_or_px(self, o),
CondPat(o,
[vdup, nth, @.cond(x->x.t=TInt and x.v=2)], self.printf("_mm_loaddup_$1(&($2))", [sfx, o.args[1]]),
[vdup, @, @TInt], self.printf("_mm_set1_$1($2)", [sfx, o.args[1]]))),
# --------------------------------
# Declarations
_declTVect := (self, t, vars, i, is) >> let(ctype := self.ctype(t, _isa(self)), Print(ctype, " ", self.infix(vars, ", ", i+is))),
_unparseTVect := (self, t, i, is) >> let(ctype := self.ctype(t, _isa(self)), Print(ctype)),
TVect := arg >> When(Length(arg)=5, arg[1]._declTVect(arg[2], arg[3], arg[4], arg[5]),
arg[1]._unparseTVect(arg[2], arg[3], arg[4])),
TReal := ~.TVect,
TInt := (self, t, vars, i, is) >> Print("int ", self.infix(vars, ", ", i+is)),
TBool := (self, t, vars, i, is) >> Print("int ", self.infix(vars, ", ", i+is)),
# --------------------------------
# Arithmetic
#
mul := (self, o, i, is) >> let(n := Length(o.args), Cond(
not IsVecT(o.t),
Print("(",self.pinfix(o.args, ")*("),")"),
n > 2 and n mod 2 <> 0,
self(mul(o.args[1], ApplyFunc(mul, Drop(o.args, 1))), i, is),
n > 2,
self(mul(ApplyFunc(mul, o.args{[1..n/2]}), ApplyFunc(mul, o.args{[n/2+1..n]})), i, is),
CondPat(o,
[mul, @TReal, @TVect], Cond(_isa(self) = SSE_2x32f,
self(mul(vdup(o.args[1], 4), o.args[2]), i, is), # NOTE: HACK for SSE_2x32f
self(mul(vdup(o.args[1], o.t.size), o.args[2]), i, is)),
[mul, @TVect, @TReal], self(mul(o.args[1], vdup(o.args[2],o.t.size)), i, is),
# NOTE: This hack is probably no longer necessary (was used for PRDFTs)
[mul, @(1, cond, e -> e.t=TInt), @TVect],
self(mul(cond(o.args[1].args[1],
vdup(o.t.t.value(o.args[1].args[2]), o.t.size),
vdup(o.t.t.value(o.args[1].args[3]), o.t.size)), o.args[2]), i, is),
[mul, @TInt, @TVect], self(mul(vdup(_toReal(o.args[1]),o.t.size), o.args[2]), i, is),
[mul, @TVect, @TInt], self(mul(o.args[1], vdup(_toReal(o.args[2]),o.t.size)), i, is),
[mul, @TVect, @TVect], self.printf("_mm_mul$1($2, $3)", [self.mul_suffix(o.t, _isa(self)), o.args[1], o.args[2]]),
Error("Don't know how to unparse <o>. Unrecognized type combination")
))),
fpmul := (self, o, i, is) >> let(isa := _isa(self), CondPat(o,
# preparing for SSSE3 _mm_mulhrs_epi16 (__m128i a, __m128i b)
# self.printf("_mm_mulhrs_epi16($1, $2)", [o.args[2], o.args[3].t.value(List(o.args[3].v, i->bin_shl(i,1)))]),
[fpmul, @, @TVect, @], self.printf("$1($2($3, $4), $5)",
[isa.vlshift, isa.vmul, o.args[2], o.args[3], isa.bits-o.args[1]]),
[fpmul, @, @, @], self.printf("$1($2(_mm_set1_$3($4), $5), $6)",
[isa.vlshift, isa.vmul, self.ctype_suffix(o.t, _isa(self)), o.args[2], o.args[3], isa.bits-o.args[1]]))),
add := (self, o, i, is) >> let(n := Length(o.args), Cond(
not IsVecT(o.t),
self.pinfix(o.args, " + "),
n > 2 and n mod 2 <> 0,
self(add(o.args[1], ApplyFunc(add, Drop(o.args, 1))), i, is),
n > 2,
self(add(ApplyFunc(add, o.args{[1..n/2]}), ApplyFunc(add, o.args{[n/2+1..n]})), i, is),
let(isa := _isa(self), # ugly, backward compatibility, use <adds> instead
saturated := When(IsBound(isa.isFloat) and IsBound(isa.saturatedArithmetic) and not isa.isFloat and isa.saturatedArithmetic, "s", ""),
_sfx := self.ctype_suffix(o.t, isa),
sfx := Cond( saturated="", _epu_to_epi(_sfx), _sfx),
CondPat(o,
[add, @TVect, @TVect], self.printf("_mm_add$1_$2($3, $4)", [saturated, sfx, o.args[1], o.args[2]]),
Error("Don't know how to unparse <o>. Unrecognized type combination"))))),
adds := (self, o, i, is) >> CondPat(o,
[adds, @TVect, @TVect, ...],
Cond( Length(o.args)>2,
self(adds(o.args[1], brackets(ApplyFunc(adds, Drop(o.args, 1)))), i, is),
self.printf("_mm_adds_$1($2, $3)", [self.ctype_suffix(o.t, rec()), o.args[1], o.args[2]])),
Inherited(o, i, is)),
_sub := (self, t, a, i, is) >> let(
isa := _isa(self),
sfx := _epu_to_epi(self.ctype_suffix(t, isa)),
saturated := When(IsBound(isa.isFloat) and IsBound(isa.saturatedArithmetic) and not isa.isFloat and isa.saturatedArithmetic, "s", ""),
CondPat(a,
[ListClass, @TVect, @TVect], self.printf("_mm_sub$1_$2($3, $4)", [saturated, sfx, a[1], a[2]]),
[ListClass, @, @], self.printf("($1 - ($2))", a),
Error("Don't know how to unparse subtraction of a[1] and a[2]. Unrecognized type combination"))),
sub := (self, o, i, is) >> self._sub(o.t, o.args, i, is),
neg := (self, o, i, is) >> CondPat(o,
[@, @TVect], self._sub(o.t, [o.t.zero(), o.args[1]], i, is),
self.printf("(-$1)", o.args)),
stickyNeg := ~.neg,
sqrt := (self, o, i, is) >> Cond( IsVecT(o.t),
Checked( IsRealT(o.t.t), self.printf("_mm_sqrt_$1($2)", [self.ctype_suffix(o.t, _isa(self)), o.args[1]])),
Inherited(o, i, is)),
rsqrt := (self, o, i, is) >> Cond( IsVecT(o.t), let( sfx := self.ctype_suffix(o.t, _isa(self)),
Checked( sfx="ps", self.printf("_mm_rsqrt_ps($1)", [o.args[1]]))),
Inherited(o, i, is)),
# assuming we have ICC <ia32intrin.h> here
log := (self, o, i, is) >> Cond( IsVecT(o.t), let( sfx := self.ctype_suffix(o.t, _isa(self)),
Checked( sfx in ["ps", "pd"], Cond(
Length(o.args)>1 and (o.args[2]=2 or o.args[2]=o.t.value(2)),
self.printf("_mm_log2_$1($2)", [sfx, o.args[1]]),
Length(o.args)>1 and (o.args[2]=10 or o.args[2]=o.t.value(10)),
self.printf("_mm_log10_$1($2)", [sfx, o.args[1]]),
Length(o.args)=1 or o.args[2]=d_exp(1) or o.args[2]=o.t.value(d_exp(1)),
self.printf("_mm_log_$1($2)", [sfx, o.args[1]]),
self.printf("_mm_div_$1(_mm_log_$1($2), _mm_log_$1($3))", [sfx, o.args[1]])))),
Inherited(o, i, is)),
# assuming we have ICC <ia32intrin.h> here
exp := (self, o, i, is) >> Cond( IsVecT(o.t), let( sfx := self.ctype_suffix(o.t, _isa(self)),
Checked( sfx in ["ps", "pd"], self.printf("_mm_exp_$1($2)", [sfx, o.args[1]]))),
Inherited(o, i, is)),
# assuming we have ICC <ia32intrin.h> here
pow := (self, o, i, is) >> Cond( IsVecT(o.t), let( sfx := self.ctype_suffix(o.t, _isa(self)),
Checked( sfx in ["ps", "pd"], Cond(
o.args[1]=2 or o.args[1]=o.t.value(2),
self.printf("_mm_exp2_$1($2)", [sfx, o.args[2]]),
o.args[1]=d_exp(1) or o.args[1]=o.t.value(d_exp(1)),
self.printf("_mm_exp_$1($2)", [sfx, o.args[2]]),
self.printf("_mm_pow_$1($2, $3)", [sfx, o.args[1], o.args[2]])))),
Inherited(o, i, is)),
imod := (self, o, i, is) >> Cond( IsIntT(o.t.base_t()) and Is2Power(o.args[2]),
# in two's complement arithmetics this will work for both positive and negative o.args[1]
self(bin_and(o.args[1], o.args[2]-1), i, is),
self.printf("(($1) % ($2))", o.args)),
# --------------------------------
# logic
#
arith_shl := (self, o, i, is) >> self.prefix(_isa(self).vlshift, o.args),
arith_shr := (self, o, i, is) >> CondPat( o,
[arith_shr, @.cond(x->x.t=TVect(T_Int(32), 4)), @],
self.prefix("_mm_srai_epi32", o.args),
[arith_shr, @.cond(x->x.t=TVect(T_Int(16), 8)), @],
self.prefix("_mm_srai_epi16", o.args),
[arith_shr, @TVect, @],
self.prefix(_isa(self).vrshift, o.args),
Inherited(o, i, is)),
bin_xor := (self, o, i, is) >> CondPat(o,
[bin_xor, @TVect, @TVect], self.prefix("_mm_xor_si128", o.args),
Inherited(o, i, is)),
bin_and := (self, o, i, is) >> CondPat(o,
[bin_and, @TVect, @TVect], self.prefix("_mm_and_si128", o.args),
Inherited(o, i, is)),
bin_andnot := (self, o, i, is) >> self.prefix("_mm_andnot_si128", o.args),
bin_or := (self, o, i, is) >> CondPat(o,
[bin_or, @TVect, @TVect], let(sfx := self.ctype_suffix(o.t, _isa(self)),
Cond( not (sfx in ["ps", "pd", "ps_half"]), #was: _isa(self).isFixedPoint,
self.printf("_mm_or_si128($1, $2)", o.args),
self.printf("_mm_castsi128_$3(_mm_or_si128(_mm_cast$3_si128($1), _mm_cast$3_si128($2)))",
o.args :: [sfx]))),
[bin_or, @TReal, @TReal], self.printf("(($1) | ($2))", o.args),
Inherited(o, i, is)),
min := (self, o, i, is) >> CondPat(o,
[min, @TVect, @TVect], self.prefix("_mm_min_" :: self.ctype_suffix(o.t, _isa(self)), o.args),
Inherited(o, i, is)),
max := (self, o, i, is) >> let(n := Length(o.args), When(
IsVecT(o.t) and n > 2, self.printf("_mm_max_$1($2, $3)", [self.ctype_suffix(o.t, _isa(self)), o.args[1], ApplyFunc(max, Drop(o.args, 1))]),
CondPat(o,
[max, @TVect, @TVect], self.prefix("_mm_max_" :: self.ctype_suffix(o.t, _isa(self)), o.args),
Inherited(o, i, is)))),
abs := (self, o, i, is) >> CondPat(o,
[abs, @TVect], let( sfx := self.ctype_suffix(o.t, _isa(self)),
Cond( sfx = "ps", self.printf("_mm_castsi128_ps(_mm_and_si128(_mm_castps_si128($1), _mm_set_epi32(0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF)))", o.args),
sfx = "pd", self.printf("_mm_castsi128_pd(_mm_and_si128(_mm_castpd_si128($1), _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF)))", o.args),
Error("not implemented"))),
Inherited(o, i, is)),
bin_shl := (self, o, i, is) >> CondPat(o,
[bin_shl, @TVect, @TInt], let(
sfx := self.ctype_suffix(o.t, _isa(self)),
Cond( _isa(self).isFixedPoint, # legacy
self.printf("_mm_slli_si128($1, $2)", o.args), # legacy
sfx in ["epi16", "epi32", "epi64", "epu16", "epu32", "epu64"],
self.printf("_mm_slli_$3($1, $2)", o.args :: [_epu_to_epi(sfx)]),
sfx in ["epi8", "epu8"],
Error("bin_shl is undefined for epi8 and epu8"),
sfx in ["epi128", "epu128"], # shift with byte granularity
self.printf("_mm_slli_si128($1, $2)", [o.args[1], idiv(o.args[2], 8)] ),
# else, shift whole register with shift argument specified in bytes (legacy, fix using epi128 in ISAs first)
self.printf("_mm_castsi128_$3(_mm_slli_si128(_mm_cast$3_si128($1), $2))", o.args :: [sfx]))),
[bin_shl, @TReal, @TInt], self.printf("(($1) << ($2))", o.args),
[bin_shl, @TInt, @TInt], self.printf("(($1) << ($2))", o.args),
[bin_shl, @, @], self.prefix("_mm_slli_" :: self.ctype_suffix(o.t, _isa(self)), o.args)),
bin_shr := (self, o, i, is) >> CondPat(o,
[bin_shr, @TVect, @TInt], let(
sfx := self.ctype_suffix(o.t, _isa(self)),
Cond( _isa(self).isFixedPoint, # legacy
self.printf("_mm_srli_si128($1, $2)", o.args), # legacy
sfx in ["epi16", "epi32", "epi64", "epu16", "epu32", "epu64"],
self.printf("_mm_srli_$3($1, $2)", o.args :: [_epu_to_epi(sfx)]),
sfx in ["epi8", "epu8"],
Error("bin_shr is undefined for epi8 and epu8"),
sfx in ["epi128", "epu128"], # shift with byte granularity
self.printf("_mm_srli_si128($1, $2)", [o.args[1], idiv(o.args[2], 8)] ),
# else, shift whole register with shift argument specified in bytes (legacy, fix using epi128 in ISAs first)
self.printf("_mm_castsi128_$3(_mm_srli_si128(_mm_cast$3_si128($1), $2))", o.args :: [sfx]))),
# default
[bin_shr, @TReal, @TInt], self.printf("(($1) >> ($2))", o.args),
[bin_shr, @TInt, @TInt], self.printf("(($1) >> ($2))", o.args),
# what's this?
[bin_shr, @, @], self.prefix("_mm_srli_" :: self.ctype_suffix(o.t, _isa(self)), o.args)),
# vector shifts
vec_shr := (self, o, i, is) >> let(
isa := _isa(self),
sfx := self.ctype_suffix(o.t, isa),
# making sure this is SSE data type
t := Checked(IsVecT(o.t) and sfx<>"", o.t),
a := o.args[1],
s := o.args[2] * 16 / t.size,
# may need typecasts to please compiler
Cond( self.ctype(t, isa)="__m128i",
self.printf("_mm_srli_si128($1, $2)", [a, s] ),
self.printf("_mm_castsi128_$3(_mm_srli_si128(_mm_cast$3_si128($1), $2))", [a, s, sfx])
)),
vec_shl := (self, o, i, is) >> let(
isa := _isa(self),
sfx := self.ctype_suffix(o.t, isa),
# making sure this is SSE data type
t := Checked(IsVecT(o.t) and sfx<>"", o.t),
a := o.args[1],
s := o.args[2] * 16 / t.size,
# may need typecasts to please compiler
Cond( self.ctype(t, isa)="__m128i",
self.printf("_mm_slli_si128($1, $2)", [a, s] ),
self.printf("_mm_castsi128_$3(_mm_slli_si128(_mm_cast$3_si128($1), $2))", [a, s, sfx])
)),
# --------------------------------
# comparison
#
eq := (self, o, i, is) >> let( ctype := self.ctype_suffix(o.args[1].t, _isa(self)),
sfx := _epu_to_epi(ctype),
Cond(IsVecT(o.t), self.prefix("_mm_cmpeq_" :: sfx, o.args),
Inherited(o, i, is))),
lt := (self, o, i, is) >> Cond(IsVecT(o.t),
self.prefix("_mm_cmplt_" :: self.ctype_suffix(o.args[1].t, _isa(self)), o.args),
Inherited(o, i, is)),
gt := (self, o, i, is) >> Cond(ObjId(o.t)=TVect,
self.prefix("_mm_cmpgt_" :: self.ctype_suffix(o.args[1].t, _isa(self)), o.args),
Inherited(o, i, is)),
mask_eq := ~.eq,
mask_lt := ~.lt,
mask_gt := ~.gt,
vparam := (self, o, i, is) >> iclshuffle(o.p),
# --------------------------------
# ISA specific : SSE_2x64f
#
vunpacklo_2x64f := (self, o, i, is) >> self.prefix("_mm_unpacklo_pd", o.args),
vunpackhi_2x64f := (self, o, i, is) >> self.prefix("_mm_unpackhi_pd", o.args),
vshuffle_2x64f := (self, o, i, is) >> self.prefix("_mm_shuffle_pd", o.args),
vushuffle_2x64f := (self, o, i, is) >> self(o.binop(o.args[1], o.args[1], o.args[2]), i, is),
vload1sd_2x64f := (self, o, i, is) >> self.prefix("_mm_load_sd", o.args),
vload_1l_2x64f := (self, o, i, is) >> self.prefix("_mm_loadl_pd", o.args),
vload_1h_2x64f := (self, o, i, is) >> self.prefix("_mm_loadh_pd", o.args),
vloadu_2x64f := (self, o, i, is) >> self.prefix("_mm_loadu_pd", o.args),
vstore_1l_2x64f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storel_pd", o.args), ";\n"),
vstore_1h_2x64f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeh_pd", o.args), ";\n"),
vstoreu_2x64f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeu_pd", o.args), ";\n"),
addsub_2x64f := (self, o, i, is) >> Checked(Length(o.args) = 2,
CondPat(o,
[addsub_2x64f, @TReal, @TVect], self(addsub_2x64f(vdup(o.args[1],o.t.size), o.args[2]), i, is),
[addsub_2x64f, @TVect, @TReal], self(addsub_2x64f(o.args[1], vdup(o.args[2], o.t.size)), i, is),
[addsub_2x64f, @TInt, @TVect], self(addsub_2x64f(vdup(_toReal(o.args[1]), o.t.size), o.args[2]), i, is),
[addsub_2x64f, @TVect, @TInt], self(addsub_2x64f(o.args[1], vdup(_toReal(o.args[2]), o.t.size)), i, is),
[addsub_2x64f, @TVect, @TVect], self.printf("_mm_addsub_pd($1, $2)", o.args),
Error("Don't know how to unparse <o>. Unrecognized type combination")
)),
hadd_2x64f := (self, o, i, is) >> self.printf("_mm_hadd_pd($1, $2)", [o.args[1], o.args[2]]),
chslo_2x64f := (self, o, i, is) >> self.printf(
"_mm_castsi128_pd(_mm_xor_si128(_mm_castpd_si128($1), _mm_set_epi32(0, 0, 0x80000000, 0)))", o.args),
chshi_2x64f := (self, o, i, is) >> self.printf(
"_mm_castsi128_pd(_mm_xor_si128(_mm_castpd_si128($1), _mm_set_epi32(0x80000000, 0, 0, 0)))", o.args),
chshi_4x32f := (self, o, i, is) >> self.printf(
"_mm_castsi128_ps(_mm_xor_si128(_mm_castps_si128($1), _mm_set_epi32(0x80000000, 0, 0x80000000, 0)))", o.args),
chslo_4x32f := (self, o, i, is) >> self.printf(
"_mm_castsi128_ps(_mm_xor_si128(_mm_castps_si128($1), _mm_set_epi32(0, 0x80000000, 0, 0x80000000)))", o.args),
vcvt_64f32f := (self, o, i, is) >> self.prefix("_mm_cvtps_pd", o.args),
cmpge_2x64f := (self, o, i, is) >> self.prefix("_mm_cmpge_pd", o.args),
cmple_2x64f := (self, o, i, is) >> self.prefix("_mm_cmple_pd", o.args),
cmpeq_2x64f := (self, o, i, is) >> self.prefix("_mm_cmpeq_pd", o.args),
# --------------------------------
# ISA specific : SSE_2x32f
#
vunpacklo_2x32f := (self, o, i, is) >> self.prefix("_mm_unpacklo_ps", o.args),
vunpackhi_2x32f := (self, o, i, is) >> self.prefix("_mm_unpackhi_ps", o.args),
vshuffle_2x32f := (self, o, i, is) >> self.prefix("_mm_shuffle_ps", o.args),
vushuffle_2x32f := (self, o, i, is) >> self(o.binop(o.args[1], o.args[1], o.args[2]), i, is),
vload_2x32f := (self, o, i, is) >> self.prefix("_mm_loadl_pi", o.args),
vstore_2x32f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storel_pi", o.args), ";\n"),
vstoreu_2x32f := (self, o, i, is) >> Print(Blanks(i), self.printf("_mm_storel_epi64($1, _mm_castps_si128($2));\n", o.args)),
vloadu_2x32f := (self, o, i, is) >> self.printf("_mm_castsi128_ps(_mm_loadl_epi64($1))", o.args),
# --------------------------------
# ISA specific : SSE_4x32f
#
prefix_cast := (self, prefix, t, o) >> Cond( self.ctype(t, _isa(self)) = self.ctype(o.t, _isa(self)), self.prefix(prefix, o.args),
self(tcast(o.t, ApplyFunc(ObjId(o), List(o.args, a -> Cond(IsExp(a), tcast(t, a), a)))), 0, 1)),
vunpacklo_4x32f := (self, o, i, is) >> self.prefix_cast("_mm_unpacklo_ps", TVect(T_Real(32), 4), o),
vunpackhi_4x32f := (self, o, i, is) >> self.prefix_cast("_mm_unpackhi_ps", TVect(T_Real(32), 4), o),
vshuffle_4x32f := (self, o, i, is) >> self.prefix_cast("_mm_shuffle_ps", TVect(T_Real(32), 4), o),
vushuffle_4x32f := (self, o, i, is) >> self(o.binop(o.args[1], o.args[1], o.args[2]), i, is),
hadd_4x32f := (self, o, i, is) >> self.printf("_mm_hadd_ps($1, $2)", [o.args[1], o.args[2]]),
vldup_4x32f := (self, o, i, is) >> self.prefix("_mm_moveldup_ps", o.args),
vhdup_4x32f := (self, o, i, is) >> self.prefix("_mm_movehdup_ps", o.args),
vinsert_4x32f := (self, o, i, is) >> self.printf(
"_mm_castsi128_ps(_mm_insert_epi32(_mm_castps_si128($1), $2, $3))", [o.args[1], o.args[2], o.args[3].p-1]),
vextract_4x32f := (self, o, i, is) >> Print(Blanks(i),
self.printf("$1 = _mm_extract_ps($2, $3)", [deref(o.args[1]), o.args[2], o.args[3]-1]), ";\n"),
vload1_4x32f := (self, o, i, is) >> self.prefix("_mm_load_ss", o.args),
vload_2l_4x32f := (self, o, i, is) >> self.prefix("_mm_loadl_pi", o.args),
vload_2h_4x32f := (self, o, i, is) >> self.prefix("_mm_loadh_pi", o.args),
vloadu_4x32f := (self, o, i, is) >> self.printf("_mm_loadu_ps($1)", o.args),
vloadu2_4x32f := (self, o, i, is) >> self.printf("_mm_castsi128_ps(_mm_loadl_epi64($1))", o.args),
vstore1_4x32f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_store_ss", o.args), ";\n"),
vstore_2l_4x32f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storel_pi", o.args), ";\n"),
vstore_2h_4x32f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeh_pi", o.args), ";\n"),
vstoreu_4x32f := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeu_ps", o.args), ";\n"),
vstoreu2_4x32f := (self, o, i, is) >> Print(Blanks(i), self.printf("_mm_storel_epi64($1, _mm_castps_si128($2));\n",
o.args)),
vstoremsk_4x32f := (self, o, i, is) >> Print(Blanks(i),
self.printf("_mm_maskmoveu_si128(_mm_castps_si128($2), _mm_set_epi32($3, $4, $5, $6), $1);\n",
[o.args[1], o.args[2]] :: List(Reversed(o.args[3].v), e->e.v))),
alignr_4x32f := (self, o, i, is) >> self.printf(
"_mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128($1), _mm_castps_si128($2), $3))", [o.args[1], o.args[2], o.args[3].p]),
# --------------------------------
# ISA specific : SSE_8x16i
#
vzero_8x16i := (self, o, i, is) >> Print("_mm_setzero_si128()"),
vunpacklo_8x16i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi16", o.args),
vunpackhi_8x16i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi16", o.args),
vunpacklo2_8x16i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi32", o.args),
vunpackhi2_8x16i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi32", o.args),
vunpacklo4_8x16i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi64", o.args),
vunpackhi4_8x16i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi64", o.args),
vpacks_8x16i := (self, o, i, is) >> self.prefix("_mm_packs_epi16", o.args),
vpackus_8x16i := (self, o, i, is) >> self.prefix("_mm_packus_epi16", o.args),
vshuffle2_8x16i := (self, o, i, is) >> self.printf(
"_mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps($1), _mm_castsi128_ps($2), $3))", o.args),
vshuffle4_8x16i := (self, o, i, is) >> self.printf(
"_mm_castpd_si128(_mm_shuffle_pd(_mm_castsi128_pd($1), _mm_castsi128_pd($2), $3))", o.args),
vload1_8x16i := (self, o, i, is) >> self.printf("_mm_insert_epi16($1, $2, $3)", o.args),
vload2_8x16i := (self, o, i, is) >> self.prefix("_mm_cvtsi32_si128", o.args),
vload4_8x16i := (self, o, i, is) >> self.prefix("_mm_loadl_epi64", o.args),
vloadu_8x16i := (self, o, i, is) >> self.prefix("_mm_loadu_si128", o.args),
vextract1_8x16i := (self, o, i, is) >> self.prefix("_mm_extract_epi16", o.args),
vextract2_8x16i := (self, o, i, is) >> self.prefix("_mm_cvtsi128_si32", o.args),
vstoreu_8x16i := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeu_si128", o.args), ";\n"),
vstore4_8x16i := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storel_epi64", o.args), ";\n"),
vstoremsk_8x16i := (self, o, i, is) >> Print(Blanks(i),
self.printf("_mm_maskmoveu_si128($2, _mm_set_epi16($3), $1);\n",
[o.args[1], o.args[2], () -> PrintCS(Reversed(o.args[3]))])),
vushuffle2_8x16i := (self, o, i, is) >> self(o.binop(o.args[1], o.args[1], o.args[2]), i, is),
vushufflelo_8x16i := (self, o, i, is) >> self.prefix("_mm_shufflelo_epi16", o.args),
vushufflehi_8x16i := (self, o, i, is) >> self.prefix("_mm_shufflehi_epi16", o.args),
interleavedmask_8x16i := (self, o, i, is) >> self.printf(
"_mm_movemask_epi8(_mm_unpacklo_epi8(_mm_packs_epi16($1, _mm_setzero_si128()), _mm_packs_epi16($2, _mm_setzero_si128())))",
o.args),
alignr_8x16i := (self, o, i, is) >> self.printf("_mm_alignr_epi8($1, $2, $3)", [o.args[1], o.args[2], o.args[3].p]),
# FF: NOTE: couldnt figure out how to use the general case with type propagation etc...
cmplt_8x16i := (self, o, i, is) >> self.printf("_mm_cmplt_epi16($1, $2)", o.args),
# SSSE3 8x16i instructions
chs_8x16i := (self, o, i, is) >> self.prefix("_mm_sign_epi16", o.args),
vushuffle_8x16i := (self, o, i, is) >> self.prefix("_mm_shuffle_epi8", o.args),
# --------------------------------
# ISA specific : SSE_16x8i
#
vloadu_16x8i := (self, o, i, is) >> self.prefix("_mm_loadu_si128", o.args),
vstoreu_16x8i := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeu_si128", o.args), ";\n"),
vunpacklo_16x8i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi8", o.args),
vunpackhi_16x8i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi8", o.args),
vunpacklo2_16x8i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi16", o.args),
vunpackhi2_16x8i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi16", o.args),
vunpacklo4_16x8i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi32", o.args),
vunpackhi4_16x8i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi32", o.args),
vunpacklo8_16x8i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi64", o.args),
vunpackhi8_16x8i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi64", o.args),
vushufflelo2_16x8i := (self, o, i, is) >> self.prefix("_mm_shufflelo_epi16", o.args),
vushufflehi2_16x8i := (self, o, i, is) >> self.prefix("_mm_shufflehi_epi16", o.args),
vushuffle4_16x8i := (self, o, i, is) >> self.prefix("_mm_shuffle_epi32", o.args),
interleavedmasklo_16x8i := (self, o, i, is) >> Print("_mm_movemask_epi8(_mm_unpacklo_epi8(",o.args[1],",",o.args[2],"))"),
interleavedmaskhi_16x8i := (self, o, i, is) >> Print("_mm_movemask_epi8(_mm_unpackhi_epi8(",o.args[1],",",o.args[2],"))"),
average_16x8i := (self, o, i, is) >> Print("_mm_avg_epu8(",o.args[1],",",o.args[2],")"),
vmovemask_16x8i := (self, o, i, is) >> self.prefix("_mm_movemask_epi8", o.args),
# XXX NOTE XXXX
# Also fix other vstoremsk's. The problem here is that after latest changes to Spiral
# the last argument (list of strings) in vstoremsg gets wrapped into V, and strings too
# this is super stupid+ugly
vstoremsk_16x8i := (self, o, i, is) >> Print(Blanks(i), self.printf("_mm_maskmoveu_si128($2, _mm_set_epi8($3), $1);\n",
[o.args[1], o.args[2], () -> PrintCS(Reversed(List(_unwrapV(o.args[3]), _unwrapV)))])),
addsub_4x32f := (self, o, i, is) >> Checked(Length(o.args) = 2,
CondPat(o,
[addsub_4x32f, @TReal, @TVect], self(addsub_4x32f(vdup(o.args[1],o.t.size), o.args[2]), i, is),
[addsub_4x32f, @TVect, @TReal], self(addsub_4x32f(o.args[1], vdup(o.args[2],o.t.size)), i, is),
[addsub_4x32f, @TInt, @TVect], self(addsub_4x32f(vdup(_toReal(o.args[1]),o.t.size), o.args[2]), i, is),
[addsub_4x32f, @TVect, @TInt], self(addsub_4x32f(o.args[1], vdup(_toReal(o.args[2]),o.t.size)), i, is),
[addsub_4x32f, @TVect, @TVect], self.printf("_mm_addsub_ps($1, $2)", [o.args[1], o.args[2]]),
Error("Don't know how to unparse <o>. Unrecognized type combination")
)),
hadd_4x32f := (self, o, i, is) >> self.printf("_mm_hadd_ps($1, $2)", [o.args[1], o.args[2]]),
vloadu_16x8i := (self, o, i, is) >> self.prefix("_mm_loadu_si128", o.args),
# --------------------------------
# ISA specific : SSE_4x32i
#
vunpacklo_4x32i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi32", o.args),
vunpackhi_4x32i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi32", o.args),
vpacks_4x32i := (self, o, i, is) >> self.prefix("_mm_packs_epi32", o.args),
# 32 bit integer shuffles are *not* the same as 32 bit float, but similar to 16 bit integer shuffles
vushuffle_4x32i := (self, o, i, is) >> self.prefix("_mm_shuffle_epi32", o.args),
vshuffle_4x32i := (self, o, i, is) >> self.printf(
"_mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps($1), _mm_castsi128_ps($2), $3))", o.args),
# subvector unparsing not yet done...
vload1_4x32i := (self, o, i, is) >> self.prefix("_mm_cvtsi32_si128", o.args), #svpcprint guy
vload2_4x32i := (self, o, i, is) >> self.prefix("_mm_loadl_epi64", o.args),
vload2_4x32i := (self, o, i, is) >> self.prefix("_mm_loadl_epi64", o.args),
vloadu_4x32i := (self, o, i, is) >> self.prefix("_mm_loadu_si128", o.args),
vextract_4x32i := (self, o, i, is) >> self.prefix("_mm_cvtsi128_si32", o.args),
vstoreu_4x32i := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storeu_si128", o.args), ";\n"),
vstore2_4x32i := (self, o, i, is) >> Print(Blanks(i), self.prefix("_mm_storel_epi64", o.args), ";\n"),
vstoremsk_4x32i := (self, o, i, is) >> Print(Blanks(i),
self.printf("_mm_maskmoveu_si128($2, _mm_set_epi16($3), $1);\n", [o.args[1], o.args[2], ()->PrintCS(Reversed(o.args[3]))])),
# complicated, buggy
interleavedmask_4x32i := (self, o, i, is) >> self.printf(
"_mm_movemask_epi8(_mm_packs_epi16(_mm_unpacklo_epi16(_mm_packs_epi16($1, $3), _mm_packs_epi16($2, $3)), $3))",
o.args :: ["_mm_setzero_si128()"]),
vcvt_4x32_i2f := (self, o, i, is) >> self.prefix("_mm_cvtepi32_ps", o.args),
vcvt_4x32_f2i := (self, o, i, is) >> self.prefix("_mm_cvtps_epi32", o.args),
vcvtt_4x32_f2i := (self, o, i, is) >> self.prefix("_mm_cvttps_epi32", o.args),
testz_4x32i := (self, o, i, is) >> self.prefix("_mm_testz_si128", o.args),
testc_4x32i := (self, o, i, is) >> self.prefix("_mm_testc_si128", o.args),
testnzc_4x32i := (self, o, i, is) >> self.prefix("_mm_testnzc_si128", o.args),
# --------------------------------
# ISA specific : SSE_2x64i
#
vunpacklo_2x64i := (self, o, i, is) >> self.prefix("_mm_unpacklo_epi64", o.args),
vunpackhi_2x64i := (self, o, i, is) >> self.prefix("_mm_unpackhi_epi64", o.args),
vshuffle_2x64i := (self, o, i, is) >> self.printf(
"_mm_castpd_si128(_mm_shuffle_pd(_mm_castsi128_pd($1), _mm_castsi128_pd($2), $3))", o.args),
vushuffle_2x64i := (self, o, i, is) >> self(o.binop(o.args[1], o.args[1], o.args[2]), i, is),
# --------------------------------
# tcast __m128 <-> __m128i
tcast := (self, o, i, is) >> let(
isa := _isa(self),
i128 := @.cond(x-> let( t := When(IsType(x), x, x.t), IsVecT(t) and self.ctype(t, isa)="__m128i")),
f128 := @.cond(x-> let( t := When(IsType(x), x, x.t), IsVecT(t) and self.ctype(t, isa)="__m128" )),
d128 := @.cond(x-> let( t := When(IsType(x), x, x.t), IsVecT(t) and self.ctype(t, isa)="__m128d" )),
CondPat(o,
[tcast, i128, f128], self.prefix("_mm_castps_si128", [o.args[2]]),
[tcast, i128, d128], self.prefix("_mm_castpd_si128", [o.args[2]]),
[tcast, f128, i128], self.prefix("_mm_castsi128_ps", [o.args[2]]),
[tcast, i128, i128], self(o.args[2], i, is),
[tcast, f128, f128], self(o.args[2], i, is),
Inherited(o, i, is))),
tcvt := (self, o, i, is) >> self.printf("(($1)($2))", [o.args[1], o.args[2]]),
#NOTE: finish this, it should look at TVect.size and instruction set for figuring out exactly what to do
vcastizxlo := (self, o, i, is) >> self(vunpacklo_16x8i(o.args[1], o.t.zero()), i, is),
vcastizxhi := (self, o, i, is) >> self(vunpackhi_16x8i(o.args[1], o.t.zero()), i, is),
vcastuzxlo := ~.vcastizxlo,
vcastuzxhi := ~.vcastizxhi,
average := (self, o, i, is) >> CondPat(o,
[average, @TVect, @TVect], let(
sfx := self.ctype_suffix(o.t, _isa(self)),
Cond( sfx in ["epu8", "epu16"],
self.printf("_mm_avg_$1($2, $3)",[sfx, o.args[1],o.args[2]]),
Error("finish SSE unparser"))),
Inherited(o, i, is)),
));
|
#pragma once
#include <boost/shared_ptr.hpp>
#include "types.hpp"
#include "KeyTypeName.hpp"
namespace BitProfile{
class KeyAdapter
{
public:
template<class Key>
KeyAdapter(const Key &);
const address_t & getAddress() const;
std::pair<bool, std::string> getAuthData(const std::string &password);
std::string getTypeName() const;
template<class Key>
void reset(const Key &);
template<class Key>
void operator = (const Key &);
private:
class KeyHolder;
template<class Key> class KeyHolderImpl;
private:
boost::shared_ptr<KeyHolder> _holder;
};
}
#include "KeyAdapter.ipp"
|
lemma compact_scaling: fixes s :: "'a::real_normed_vector set" assumes "compact s" shows "compact ((\<lambda>x. c *\<^sub>R x) ` s)" |
module Flexidisc.Header
import public Flexidisc.Dec.IsYes
import public Flexidisc.Header.Label
import public Flexidisc.Header.Row
import public Flexidisc.Header.Sub
import public Flexidisc.Header.Type
import public Flexidisc.OrdList
%default total
%access public export
||| A proof that a header some keys added to an given header leads to another one
||| @skipped keys of the original header that are not in the subset
||| @subset part of thecoriginal header
||| @subset the original header
data CompWithKeys : (skipped : List k) ->
(subset, orig : Header' k a) ->
Type where
||| Wrap `OrdList.CompWithKeys`
S : {xs : OrdList k o a} -> {ys : OrdList k o a} ->
CompWithKeys keys xs ys -> CompWithKeys keys (H xs) (H ys)
||| A proof that two `Header'` don't share a key.
data Disjoint : (xs, ys : Header' k a) -> Type where
D : {xs : OrdList k o a} -> {ys : OrdList k o a} ->
Disjoint xs ys -> Disjoint (H xs) (H ys)
||| A proof that a label is not already in a `Header'`
data Fresh : (l : label) -> (xs : Header' label a) -> Type where
F : {xs : OrdList k o a} -> Fresh l xs -> Fresh l (H xs)
%name Header.Fresh fresh, prf, new
||| Decision procedure for freshness
decFresh : (DecEq label) => (l : label) -> (xs : Header' label a) ->
Dec (Fresh l xs)
decFresh l (H xs) with (decFresh l xs)
| (Yes prf) = Yes (F prf)
| (No contra) = No (\(F x) => contra x)
||| Move the `decFresh` procedure to the type level
IsFresh : (DecEq label) => (l : label) -> (xs : Header' label a) -> Type
IsFresh l xs = IsYes (decFresh l xs)
||| A proof that labels that are in both lists have the same values
data HereOrNot : (xs, ys : Header' k a) -> Type where
HN : {xs, ys : OrdList k o a} ->
HereOrNot xs ys -> HereOrNot (H xs) (H ys)
toSub : {xs : Header k} -> HereOrNot xs ys -> Maybe (Sub xs ys)
toSub (HN compat) = map S (toSub compat)
||| A proof that a Header' has no duplicated key
data Nub : (Header' label a) -> Type where
N : {xs : OrdList k o a} -> Nub xs -> Nub (H xs)
IsNub : DecEq label => (xs : Header' label a) -> Type
IsNub (H xs) = IsYes (decNub xs)
namespace SubWithKeys
||| Proof that an `Header'` has some given `keys` and is a subset of an other
data SubWithKeys : (List k) -> (xs, ys : Header' k a) -> Type where
S : {xs, ys : OrdList k o a} ->
SubWithKeys keys xs ys -> SubWithKeys keys (H xs) (H ys)
namespace SameOrd
||| Both Header' are ordered with the same Ord typeclass
||| (thank you non unique typeclasses)
data SameOrd : (xs, ys : Header' k a) -> Type where
S : {xs, ys : OrdList k o a} -> SameOrd xs ys -> SameOrd (H xs) (H ys)
namespace Decomp
||| A Decomposition of a Header' into
||| a set of required values and a set of optional values
||| @required the set keys and types that must be in the original header
||| @optional the set keys and types that can be in the original header
||| @xs the original header
data Decomp : (required, optional : Header k) -> (xs : Header k) -> Type where
D : Header.Sub.Sub required xs -> HereOrNot optional xs -> Decomp required optional xs
||| Output the keys of the first Header' that are not in the second Header'
diffKeys : DecEq k => (xs, ys : Header' k a) -> Header' k a
diffKeys (H xs) (H ys) = H (diffKeys xs ys)
||| Apply a patch `xs` to an `Header'` `ys`.
||| The label of `ys` that are in `xs` are updated,
||| and the fresh element of `xs` are added
patch : DecEq k => (xs, ys : Header' k a) -> Header' k a
patch (H xs) (H ys) = H (patch xs ys)
|
#####################################################################
# CS671: Machine Learning
# Copyright 2015 Pejman Ghorbanzade <[email protected]>
# More info: https://github.com/ghorbanzade/beacon
#####################################################################
# Load dataset
letters <- read.csv("dat/iris.csv", header=TRUE, sep=",")
# show summary of loaded dataset on console
str(letters)
# alternatively, to view the dataset in RStudio
# View(letters)
# We first need to randomize the dataset
# To do it, we first generate 150 numbers in the interval [0,1]
rand <- runif(150)
# Then we replace them with their order,
# as if we generated 150 random integer numbers from 1 to 150
rand <- order(rand)
# Then we shuffle the dataset, using the random array
iris_rand <- iris[rand, ]
# R packages specialized in SVM are Kernlab, svmlight, libsvm, e1071
# We use kernlab
# Make sure kernlab package is installed
# pkg <- select.list(sort(.packages(all.available = TRUE)), graphics=TRUE)
# if(nchar(pkg)) library(pkg, character.only=TRUE)
# library("kernlab")
# Select first 120 samples as training dataset
iris_train <- iris_rand[1:120, ]
# Select the remaining 30 samples as test dataset
iris_test <- iris_rand[121:150, ]
# Build a classifer using a linear kernel function
iris_classifier <- ksvm(Species~., data=iris_train, kernel="vanilladot")
# show classifier properties
iris_classifier
# apply classifier on the test dataset
iris_prediction <- predict(iris_classifier, iris_test)
# show header
head(iris_prediction)
# show confusion matrix
table(iris_prediction, iris_test$Species)
# show number of correct and incorrect recognitions
agreement <- iris_prediction == iris_test$Species
table(agreement)
|
/*
* Copyright (c) 2016-2021 lymastee, All rights reserved.
* Contact: [email protected]
*
* This file is part of the gslib project.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef type_f33c5e03_70f2_4879_9504_332323832a40_h
#define type_f33c5e03_70f2_4879_9504_332323832a40_h
#include <ariel/config.h>
#include <gslib/type.h>
#include <gslib/string.h>
#include <gslib/math.h>
#include <gslib/std.h>
__ariel_begin__
struct ariel_export color
{
union
{
struct { byte red, green, blue, alpha; };
uint _data;
};
public:
color() { data() = 0; }
color(int r, int g, int b) { set_color(r, g, b); }
color(int r, int g, int b, int a) { set_color(r, g, b, a); }
uint& data() { return _data; }
uint data() const { return _data; }
void set_color(int r, int g, int b) { red = r, green = g, blue = b, alpha = 255; }
void set_color(int r, int g, int b, int a) { red = r, green = g, blue = b, alpha = a; }
bool operator !=(const color& cr) const { return blue != cr.blue || green != cr.green || red != cr.red || alpha != cr.alpha; }
bool operator ==(const color& cr) const { return blue == cr.blue && green == cr.green && red == cr.red && alpha == cr.alpha; }
color& lerp(const color& c1, const color& c2, float s)
{
assert(s >= 0.f && s <= 1.f);
float t = 1.f - s;
red = (int)(s * c1.red + t * c2.red);
green = (int)(s * c1.green + t * c2.green);
blue = (int)(s * c1.blue + t * c2.blue);
alpha = (int)(s * c1.alpha + t * c2.alpha);
return *this;
}
};
struct ariel_export font
{
enum
{
declare_mask(ftm_italic, 0),
declare_mask(ftm_underline, 1),
declare_mask(ftm_strikeout, 2),
};
public:
string name;
int size;
int escape;
int orient;
int weight; /* 0-9 */
uint mask;
private:
friend class fsys_win32;
friend class fsys_dwrite;
mutable uint sysfont;
public:
font()
{
size = 0;
escape = 0;
orient = 0;
weight = 3;
mask = 0;
sysfont = 0;
}
font(const font& that)
{
name = that.name;
size = that.size;
escape = that.escape;
orient = that.orient;
weight = that.weight;
mask = that.mask;
sysfont = that.sysfont;
}
font(const gchar* n, int sz)
{
name.assign(n);
size = sz;
escape = 0;
orient = 0;
weight = 3;
mask = 0;
sysfont = 0;
}
font& operator = (const font& that)
{
name = that.name;
size = that.size;
escape = that.escape;
orient = that.orient;
weight = that.weight;
mask = that.mask;
return *this;
}
bool operator == (const font& that) const
{
if(name != that.name)
return false;
if(size != that.size)
return false;
if(escape != that.escape)
return false;
if(orient != that.orient)
return false;
if(weight != that.weight)
return false;
if(mask != that.mask)
return false;
return true;
}
bool operator != (const font& that) const
{
if(name != that.name)
return true;
if(size != that.size)
return true;
if(escape != that.escape)
return true;
if(orient != that.orient)
return true;
if(weight != that.weight)
return true;
if(mask != that.mask)
return true;
return false;
}
size_t hash_value() const
{
hasher h;
h.add_bytes((const byte*)name.c_str(), name.length() * sizeof(gchar));
h.add_bytes((const byte*)&size, sizeof(size));
h.add_bytes((const byte*)&escape, sizeof(escape));
h.add_bytes((const byte*)&orient, sizeof(orient));
h.add_bytes((const byte*)&weight, sizeof(weight));
return h.add_bytes((const byte*)&mask, sizeof(mask));
}
};
struct ariel_export viewport
{
float left;
float top;
float width;
float height;
float min_depth;
float max_depth;
};
struct ariel_export axis_aligned_bound_box
{
float left = FLT_MAX;
float right = -FLT_MAX;
float top = FLT_MAX;
float bottom = -FLT_MAX;
float front = FLT_MAX;
float back = -FLT_MAX;
public:
void reset()
{
left = top = front = FLT_MAX;
right = bottom = back = -FLT_MAX;
}
float width() const { return right - left; }
float height() const { return bottom - top; }
float depth() const { return back - front; }
};
struct ariel_export origin_bound_sphere
{
float radius = 0.f;
};
struct ariel_export bound_sphere
{
vec3 origin;
float radius = 0.f;
};
enum res_type
{
res_mesh,
};
class __gs_novtable ariel_export res_node abstract
{
public:
virtual ~res_node() {}
virtual res_type get_type() const = 0;
virtual const string& get_name() const { return _name; }
virtual bool has_name() const { return !_name.empty(); }
protected:
string _name;
public:
void set_name(const string& name) { _name = name; }
};
__ariel_end__
namespace std {
template<>
class hash<gs::ariel::font>
#if defined(_MSC_VER) && (_MSC_VER < 1914)
: public unary_function<gs::ariel::font, size_t>
#endif
{
public:
size_t operator()(const gs::ariel::font& ft) const { return ft.hash_value(); }
};
};
#endif
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import data.set.basic
/-!
# Sets in sigma types
This file defines `set.sigma`, the indexed sum of sets.
-/
namespace set
variables {ι ι' : Type*} {α β : ι → Type*} {s s₁ s₂ : set ι} {t t₁ t₂ : Π i, set (α i)}
{u : set (Σ i, α i)} {x : Σ i, α i} {i : ι} {a : α i}
/-- Indexed sum of sets. `s.sigma t` is the set of dependent pairs `⟨i, a⟩` such that `i ∈ s` and
`a ∈ t i`.-/
protected def sigma (s : set ι) (t : Π i, set (α i)) : set (Σ i, α i) := {x | x.1 ∈ s ∧ x.2 ∈ t x.1}
@[simp] lemma mem_sigma_iff : x ∈ s.sigma t ↔ x.1 ∈ s ∧ x.2 ∈ t x.1 := iff.rfl
@[simp] lemma mk_sigma_iff : (⟨i, a⟩ : Σ i, α i) ∈ s.sigma t ↔ i ∈ s ∧ a ∈ t i := iff.rfl
lemma mk_mem_sigma (hi : i ∈ s) (ha : a ∈ t i) : (⟨i, a⟩ : Σ i, α i) ∈ s.sigma t := ⟨hi, ha⟩
lemma sigma_mono (hs : s₁ ⊆ s₂) (ht : ∀ i, t₁ i ⊆ t₂ i) : s₁.sigma t₁ ⊆ s₂.sigma t₂ :=
λ x hx, ⟨hs hx.1, ht _ hx.2⟩
lemma sigma_subset_iff : s.sigma t ⊆ u ↔ ∀ ⦃i⦄, i ∈ s → ∀ ⦃a⦄, a ∈ t i → (⟨i, a⟩ : Σ i, α i) ∈ u :=
⟨λ h i hi a ha, h $ mk_mem_sigma hi ha, λ h ⟨i, a⟩ ha, h ha.1 ha.2⟩
lemma forall_sigma_iff {p : (Σ i, α i) → Prop} :
(∀ x ∈ s.sigma t, p x) ↔ ∀ ⦃i⦄, i ∈ s → ∀ ⦃a⦄, a ∈ t i → p ⟨i, a⟩ :=
sigma_subset_iff
lemma exists_sigma_iff {p : (Σ i, α i) → Prop} :
(∃ x ∈ s.sigma t, p x) ↔ ∃ (i ∈ s) (a ∈ t i), p ⟨i, a⟩ :=
⟨λ ⟨⟨i, a⟩, ha, h⟩, ⟨i, ha.1, a, ha.2, h⟩, λ ⟨i, hi, a, ha, h⟩, ⟨⟨i, a⟩, ⟨hi, ha⟩, h⟩⟩
@[simp] lemma sigma_empty : s.sigma (λ _, (∅ : set (α i))) = ∅ := ext $ λ _, and_false _
@[simp] lemma empty_sigma : (∅ : set ι).sigma t = ∅ := ext $ λ _, false_and _
lemma univ_sigma_univ : (@univ ι).sigma (λ _, @univ (α i)) = univ := ext $ λ _, true_and _
@[simp] lemma sigma_univ : s.sigma (λ _, univ : Π i, set (α i)) = sigma.fst ⁻¹' s :=
ext $ λ _, and_true _
@[simp] lemma singleton_sigma : ({i} : set ι).sigma t = sigma.mk i '' t i :=
ext $ λ x, begin
split,
{ obtain ⟨j, a⟩ := x,
rintro ⟨(rfl : j = i), ha⟩,
exact mem_image_of_mem _ ha },
{ rintro ⟨b, hb, rfl⟩,
exact ⟨rfl, hb⟩ }
end
@[simp] lemma sigma_singleton {a : Π i, α i} :
s.sigma (λ i, ({a i} : set (α i))) = (λ i, sigma.mk i $ a i) '' s :=
by { ext ⟨x, y⟩, simp [and.left_comm, eq_comm] }
lemma singleton_sigma_singleton {a : Π i, α i} :
({i} : set ι).sigma (λ i, ({a i} : set (α i))) = {⟨i, a i⟩} :=
by rw [sigma_singleton, image_singleton]
@[simp] lemma union_sigma : (s₁ ∪ s₂).sigma t = s₁.sigma t ∪ s₂.sigma t :=
ext $ λ _, or_and_distrib_right
@[simp] lemma sigma_union : s.sigma (λ i, t₁ i ∪ t₂ i) = s.sigma t₁ ∪ s.sigma t₂ :=
ext $ λ _, and_or_distrib_left
lemma sigma_inter_sigma : s₁.sigma t₁ ∩ s₂.sigma t₂ = (s₁ ∩ s₂).sigma (λ i, t₁ i ∩ t₂ i) :=
by { ext ⟨x, y⟩, simp [and_assoc, and.left_comm] }
lemma insert_sigma : (insert i s).sigma t = (sigma.mk i '' t i) ∪ s.sigma t :=
by rw [insert_eq, union_sigma, singleton_sigma]
lemma sigma_insert {a : Π i, α i} :
s.sigma (λ i, insert (a i) (t i)) = ((λ i, ⟨i, a i⟩) '' s) ∪ s.sigma t :=
by simp_rw [insert_eq, sigma_union, sigma_singleton]
lemma sigma_preimage_eq {f : ι' → ι} {g : Π i, β i → α i} :
(f ⁻¹' s).sigma (λ i, g (f i) ⁻¹' t (f i)) =
(λ p : Σ i, β (f i), sigma.mk _ (g _ p.2)) ⁻¹' (s.sigma t) := rfl
lemma sigma_preimage_left {f : ι' → ι} :
(f ⁻¹' s).sigma (λ i, t (f i)) = (λ p : Σ i, α (f i), sigma.mk _ p.2) ⁻¹' (s.sigma t) := rfl
lemma sigma_preimage_right {g : Π i, β i → α i} :
s.sigma (λ i, g i ⁻¹' t i) = (λ p : Σ i, β i, sigma.mk p.1 (g _ p.2)) ⁻¹' (s.sigma t) := rfl
lemma preimage_sigma_map_sigma {α' : ι' → Type*} (f : ι → ι') (g : Π i, α i → α' (f i)) (s : set ι')
(t : Π i, set (α' i)) :
sigma.map f g ⁻¹' (s.sigma t) = (f ⁻¹' s).sigma (λ i, g i ⁻¹' t (f i)) := rfl
@[simp] lemma mk_preimage_sigma (hi : i ∈ s) : sigma.mk i ⁻¹' s.sigma t = t i :=
ext $ λ _, and_iff_right hi
@[simp] lemma mk_preimage_sigma_eq_empty (hi : i ∉ s) : sigma.mk i ⁻¹' s.sigma t = ∅ :=
ext $ λ _, iff_of_false (hi ∘ and.left) id
lemma mk_preimage_sigma_eq_if [decidable_pred (∈ s)] :
sigma.mk i ⁻¹' s.sigma t = if i ∈ s then t i else ∅ :=
by split_ifs; simp [h]
lemma mk_preimage_sigma_fn_eq_if {β : Type*} [decidable_pred (∈ s)] (g : β → α i) :
(λ b, sigma.mk i (g b)) ⁻¹' s.sigma t = if i ∈ s then g ⁻¹' t i else ∅ :=
ext $ λ _, by split_ifs; simp [h]
lemma sigma_univ_range_eq {f : Π i, α i → β i} :
(univ : set ι).sigma (λ i, range (f i)) = range (λ x : Σ i, α i, ⟨x.1, f _ x.2⟩) :=
ext $ by simp [range]
protected lemma nonempty.sigma :
s.nonempty → (∀ i, (t i).nonempty) → (s.sigma t : set _).nonempty :=
λ ⟨i, hi⟩ h, let ⟨a, ha⟩ := h i in ⟨⟨i, a⟩, hi, ha⟩
lemma nonempty.sigma_fst : (s.sigma t : set _).nonempty → s.nonempty := λ ⟨x, hx⟩, ⟨x.1, hx.1⟩
lemma nonempty.sigma_snd : (s.sigma t : set _).nonempty → ∃ i ∈ s, (t i).nonempty :=
λ ⟨x, hx⟩, ⟨x.1, hx.1, x.2, hx.2⟩
lemma sigma_nonempty_iff : (s.sigma t : set _).nonempty ↔ ∃ i ∈ s, (t i).nonempty :=
⟨nonempty.sigma_snd, λ ⟨i, hi, a, ha⟩, ⟨⟨i, a⟩, hi, ha⟩⟩
lemma sigma_eq_empty_iff : s.sigma t = ∅ ↔ ∀ i ∈ s, t i = ∅ :=
not_nonempty_iff_eq_empty.symm.trans $ sigma_nonempty_iff.not.trans $
by simp only [not_nonempty_iff_eq_empty, not_exists]
lemma image_sigma_mk_subset_sigma_left {a : Π i, α i} (ha : ∀ i, a i ∈ t i) :
(λ i, sigma.mk i (a i)) '' s ⊆ s.sigma t :=
image_subset_iff.2 $ λ i hi, ⟨hi, ha _⟩
lemma image_sigma_mk_subset_sigma_right (hi : i ∈ s) : sigma.mk i '' t i ⊆ s.sigma t :=
image_subset_iff.2 $ λ a, and.intro hi
lemma sigma_subset_preimage_fst (s : set ι) (t : Π i, set (α i)) : s.sigma t ⊆ sigma.fst ⁻¹' s :=
λ a, and.left
lemma fst_image_sigma_subset (s : set ι) (t : Π i, set (α i)) : sigma.fst '' s.sigma t ⊆ s :=
image_subset_iff.2 $ λ a, and.left
lemma fst_image_sigma (s : set ι) (ht : ∀ i, (t i).nonempty) : sigma.fst '' s.sigma t = s :=
(fst_image_sigma_subset _ _).antisymm $ λ i hi, let ⟨a, ha⟩ := ht i in ⟨⟨i, a⟩, ⟨hi, ha⟩, rfl⟩
lemma sigma_diff_sigma : s₁.sigma t₁ \ s₂.sigma t₂ = s₁.sigma (t₁ \ t₂) ∪ (s₁ \ s₂).sigma t₁ :=
ext $ λ x, by by_cases h₁ : x.1 ∈ s₁; by_cases h₂ : x.2 ∈ t₁ x.1; simp [*, ←imp_iff_or_not]
end set
|
\documentclass[11pt, a4paper]{article}
\usepackage[inner=1in,outer=1in,top=1in,bottom=1in]{geometry}
\pagestyle{empty}
\usepackage{placeins}
\usepackage{graphicx}
\usepackage{fancyhdr, lastpage, bbding, pmboxdraw}
\usepackage[usenames,dvipsnames]{color}
\definecolor{darkblue}{rgb}{0,0,.6}
\definecolor{darkred}{rgb}{.7,0,0}
\definecolor{darkgreen}{rgb}{0,.6,0}
\definecolor{red}{rgb}{.98,0,0}
\usepackage[colorlinks,pagebackref,pdfusetitle,urlcolor=darkblue,citecolor=darkblue,linkcolor=darkred,bookmarksnumbered,plainpages=false]{hyperref}
%\renewcommand{\thefootnote}{\fnsymbol{footnote}}
\usepackage{amsmath}
\usepackage{amssymb}
\pagestyle{fancyplain}
\fancyhf{}
\lhead{ \fancyplain{}{\CourseTitle} }
%\chead{ \fancyplain{}{} }
\rhead{ \fancyplain{}{\CourseSemester \CourseYear} }
%\rfoot{\fancyplain{}{page \thepage\ of \pageref{LastPage}}}
\fancyfoot[RO, LE] {page \thepage\ of \pageref{LastPage} }
\thispagestyle{plain}
\usepackage{tabularx}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{xspace}
\newcommand{\CourseNumber}{NPRE412}
\newcommand{\CourseTitle}{Nuclear Power Economics and Fuel Management\xspace}%
\newcommand{\CourseInstructor}{Prof. Kathryn Huff\xspace}%
\newcommand{\CourseSemester}{Fall\xspace}%
\newcommand{\CourseYear}{2016\xspace}%
\newcommand{\CourseDays}{MWF\xspace}%
\newcommand{\CourseStart}{10:00am\xspace}%
\newcommand{\CourseEnd}{10:50am\xspace}%
\newcommand{\CourseInstructorEmail}{[email protected]}
\newcommand{\CourseRoom}{106B3\xspace}%
\newcommand{\CourseBuilding}{Engineering Hall\xspace}%
\newcommand{\CourseUniversity}{University of Illinois, Urbana-Champaign\xspace}%
\newcommand{\TeachingAssistant}{Ibrahim Jarrah\xspace}%
\newcommand{\TAOfficeHourDays}{Wednesdays\xspace}%
\newcommand{\TAOfficeHourStart}{1:00pm\xspace}%
\newcommand{\TAOfficeHourEnd}{3:00pm\xspace}%
\newcommand{\TAOfficeHourPlace}{123 Talbot Laboratory\xspace}
%\newcommand{\Course<++>}{<++>}
%\newcommand{\Course<++>}{<++>}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{\CourseNumber: \CourseTitle\\}
\author{\CourseUniversity}
\date{\CourseSemester \CourseYear}
\begin{document}
\maketitle
%\setlength{\unitlength}{1in}
\renewcommand{\arraystretch}{1.5}
\begin{center}
\begin{table}[h]
\begin{tabularx}{\textwidth}{rXrX}
\hline
\textbf{Instructor:} & \CourseInstructor & \textbf{Time:} & \CourseDays \CourseStart -- \CourseEnd \\
\textbf{Email:} & \href{mailto:\CourseInstructorEmail}{\CourseInstructorEmail} & \textbf{Place:} & \CourseRoom \CourseBuilding\\
\end{tabularx}
\end{table}
\end{center}
\paragraph{Course Pages:}
\begin{enumerate}
\item \url{https://compass2g.illinois.edu}
\item \url{https://github.com/katyhuff/\CourseNumber}
\item \url{https://piazza.com/illinois/fall2016/\CourseNumber/home}
\end{enumerate}
\paragraph{TA Office Hours:} The teaching assistant for the course,
\TeachingAssistant, will hold office hours \TAOfficeHourDays from
\TAOfficeHourStart to \TAOfficeHourEnd in \TAOfficeHourPlace.
\paragraph{Office Hours:} Prof. Huff will hold office hours on Tuesdays and
Fridays from 3pm-COB in her office, 118 Talbot Laboratory at 104 S. Wright St.
If you have an individual issue, please make an appointment. If your colleagues
might be helpful, please post your questions in the forum provided for this
purpose online rather than attending office hours.
\paragraph{Main References:}
A few essential references for this course will be assigned as readings. The
recommended text for this course is \cite{tsoulfanidis_nuclear_2013}.
\bibliographystyle{unsrt}
\renewcommand{\refname}{\normalfont\selectfont\normalsize}\vspace{-1cm}
\bibliography{bibliography}
\paragraph{Objectives:}
This course will equip students to:
\begin{itemize}
\item Quantify impacts of the nuclear power industry
\item Calculate nuclear fuel cycle and capital costs for thermal and fast reactors.
\item Optimize nuclear fuel management for lowest energy costs and highest system performance.
\item Differentiate among features of fossil fuel systems, fission systems, and controlled thermonuclear fusion systems.
\item Quantiatively analyze nuclear fuel cycle technologies for both once-through and closed strategies.
\item Comparatively assess spent fuel storage, reprocessing, and disposal strategies.
\end{itemize}
\paragraph{Prerequisites:}
\begin{itemize}
\item Junior standing is required.
\item NPRE 402 or 247
\end{itemize}
\paragraph{Grading Policy:} Grades will be assigned as a weighted sum of the
following work.
\begin{table}[h]
\begin{tabularx}{\textwidth}{Xrr}
\textbf{Work} & \textbf{Weight (Undergraduate)} & \textbf{Weight (Graduate)} \\
\hline
\textbf{Quizzes} & (20\%) & (0\%)\\
\textbf{Homework} & (40\%) & (40\%)\\
\textbf{Midterm 1} & (10\%) & (10\%)\\
\textbf{Midterm 2} & (10\%) & (10\%)\\
\textbf{Final Exam} & (20\%) & (20\%)\\
\textbf{Final Proj.} & (0\%) & (20\%)\\
\hline
\textbf{Total} & (100\%) & (100\%)\\
\end{tabularx}
\end{table}
\paragraph{Important Dates:}
\begin{center} \begin{minipage}{3.8in}
\begin{flushleft}
Midterm \#1 \dotfill 10:00-10:50am, October 3, 2016 \\
Midterm \#2 \dotfill 10:00-10:50am, November 7, 2016\\
%Project Deadline \dotfill ~Month Day \\
Final Exam \dotfill 1:30-4:30pm, December 14, 2016\\
\end{flushleft}
\end{minipage}
\end{center}
\paragraph{Class Policies:}
\begin{itemize}
\item[] \textbf{Integrity:} This is an institution of higher
learning. You will be swiftly ejected from the course if you are caught
undermining its integrity. Note the
\href{http://www.provost.illinois.edu/academicintegrity/students.html}{Student's
Quick Reference Guide to Academic Integrity} and the
\href{http://studentcode.illinois.edu/article1_part4_1-401.html}{Academic
Integrity Policy and Procedure}.
\item[] \textbf{Attendance:} Regular attendance is mandatory. Request approval for absence for extenuating circumstances prior to absence.
\item[] \textbf{Electronics:} Active participation is essential and expected.
Accordingly, students must turn off all electronic devices (laptop,
tablets, cellphones, etc.) during class. Exceptions may be granted for
laptops if engaging in computational exercises or taking notes.
\item[] \textbf{Collaboration:} Collaboratively reviewing course materials and studying for exams with fellow students can be enriching. This is recommended. However, unless otherwise instructed, homework assignments are to be completed independently and materials submitted as homework should be the result of one's own independent work.
\item[] \textbf{Late Work:} Late work has a halflife of 1 hour. That is, adjusted for lateness, your grade $G(t)$ is a decaying percentage of the raw grade $G_0$. An assignment turned in $t$ hours late will receive a grade according to the following relation:
\begin{align*}
G(t) &= G_0e^{-\lambda t}
\intertext{where}
G(t) &= \mbox{grade adjusted for lateness}\\
G_0 &= \mbox{raw grade}\\
\lambda &= \frac{ln(2)}{t_{\frac{1}{2}}} = \mbox{decay constant} \\
t &= \mbox{time elapsed since due [hours]}\\
t_{1/2} &= 1 = \mbox{half-life [hours]} \\
\end{align*}
\item[] \textbf{Make-up Work:} There will be no negotiation about late work except in the case of absence documented by an absence letter from the Dean of Students. The university policy for requesting such a letter is in \href{http://studentcode.illinois.edu/article1_part5_1-501.html}{the Student Code}. Please note that such a letter is appropriate for many types of conflicts, but that religious conflicts require special early handling. In accordance with university policy, students seeking an excused absence for religious reasons should complete the Request for Accommodation for Religious Observances Form, which can be found on the Office of the Dean of Students website. The student should submit this form to the instructor and the Office of the Dean of Students by the end of the second week of the course to which it applies.
\end{itemize}
\paragraph{Accessibility:} I hope that this course will be inclusive and
accommodating for all learners. As such, I am committed upholding the vision
and values of \href{http://www.inclusiveillinois.illinois.edu/index.html}{Inclusive Illinois}
in my
classroom. With regard to accommodating all learners, please note that many
resources are provided through
\href{http://disability.illinois.edu/academic-support/accommodations}{the
Division of Disability Resources and Educational Services}. To request
particular accommodations, please contact me as soon as possible so that we can
work out any necessary arrangements.
\paragraph{Other Resources:}
University students typically experience a wide range of stressors during their
time on campus. Accordingly, campus resources exist to help students manage
stress levels, mental health, physical health, and emergencies while navigating
this environment. I hope you will take advantage of these campus resources as
soon as they can be of help.
\begin{itemize}
\item \href{https://campusrec.illinois.edu/}{The Campus Recreational Centers}
\item \href{http://counselingcenter.illinois.edu/}{The Counselling Center}
\item \href{http://www.mckinley.illinois.edu/clinics/mental\_health.htm}{The McKinley Mental Health Clinic}
\item \href{http://odos.illinois.edu/emergency/}{The Emergency Dean}
\end{itemize}
\pagebreak
\FloatBarrier
\renewcommand{\arraystretch}{1}
\begin{table}[h]
\begin{center}
\begin{tabular}{lllcllll}
\multicolumn{8}{c}{\textbf{Course Schedule:}\textit{ Note that this schedule is subject to change}}\\
&&&&&&&\\
\textbf{Date} & \textbf{Week} & \textbf{Day} & \textbf{Unit} & \textbf{Chap.} & \textbf{Quiz} & \textbf{HW} & \textbf{HW}\\
& & & & & & \textbf{Given} & \textbf{Due}\\
\hline
\hline
08-22 & 1 & M & Intro & 1 & & & \\
08-24 & 1 & W & Overview & 1 & & & \\
08-26 & 1 & F & Overview & 1 & & HW1 & \\
08-29 & 2 & M & Economics & 8 & Q1 & & \\
08-31 & 2 & W & Economics & 8 & & & \\
09-02 & 2 & F & Economics & 8 & & HW2 & HW1\\
09-05 & 3 & M & \textbullet~\textbf{No Class} \textbullet & & Q2 & & \\
09-07 & 3 & W & Economics & 8 & & & \\
09-09 & 3 & F & Economics & 8 & & HW3 & HW2\\
09-12 & 4 & M & Mining \& Milling & 2 & Q3 & & \\
09-14 & 4 & W & Mining \& Milling & 2 & & & \\
09-16 & 4 & F & Conversion & 3 & & HW4 & HW3\\
09-19 & 5 & M & Enrichment & 3 & Q4 & & \\
09-21 & 5 & W & Enrichment & 3 & & & \\
09-23 & 5 & F & Enrichment & 3 & & HW5 & HW4\\
09-26 & 6 & M & Fuel Fabrication & 4 & Q5 & & \\
09-28 & 6 & W & Fuel Fabrication & 4 & & & \\
09-30 & 6 & F & Fuel Fabrication & 4 & & HW6 & HW5\\
10-03 & 7 & M & \textbullet~\textbf{Midterm} \textbullet & & Q6 & & \\
10-05 & 7 & W & Reactors & 5 & & & \\
10-07 & 7 & F & Reactors & 5 & & HW7 & HW6\\
10-10 & 8 & M & Reactors & 5 & Q7 & & \\
10-12 & 8 & W & Fuel In-Core & 6 & & & \\
10-14 & 8 & F & Fuel In-Core & 6 & & HW8 & HW7\\
10-17 & 9 & M & Fuel In-Core & 6 & Q8 & & \\
10-19 & 9 & W & Reprocessing & 7 & & & \\
10-21 & 9 & F & Reprocessing & 7 & & HW9 & HW8\\
10-24 & 10 & M & Reprocessing & 7 & Q9 & & \\
10-26 & 10 & W & Reprocessing & 7 & & & \\
10-28 & 10 & F & HLW & 9 & & HW10 & HW9\\
10-31 & 11 & M & HLW & 9 & Q10 & & \\
11-02 & 11 & W & HLW & 9 & & & \\
11-04 & 11 & F & HLW & 9 & & HW11 & HW10\\
11-07 & 12 & M & \textbullet~\textbf{Midterm} \textbullet & & Q11 & & \\
11-09 & 12 & W & HLW & 9 & & & \\
11-11 & 12 & F & HLW & 9 & & HW12 & HW11\\
11-14 & 13 & M & LLW & 10 & Q12 & & \\
11-16 & 13 & W & LLW & 10 & & & \\
11-18 & 13 & F & Nonproliferation & 11 & & HW13 & HW12\\
11-21 & 14 & M & \textbullet~\textbf{No Class} \textbullet & & & & \\
11-23 & 14 & W & \textbullet~\textbf{No Class} \textbullet & & & & \\
11-25 & 14 & F & \textbullet~\textbf{No Class} \textbullet & & & & \\
11-28 & 15 & M & Environment & 12 & Q13 & & \\
11-30 & 15 & W & Environment & 12 & & & \\
12-02 & 15 & F & Environment & 12 & & & HW13\\
12-05 & 16 & M & Environment & 12 & & & \\
12-07 & 16 & W & Review & & & & \\
12-09 & 16 & F & \textbullet~\textbf{No Class} \textbullet & & & & \\
12-12 & 17 & M & \textbullet~\textbf{No Class} \textbullet & & & & \\
12-14 & 17 & W & \textbullet~\textbf{Final Exam} \textbullet & & & & \\
\end{tabular}
\end{center}
\end{table}
\FloatBarrier
%%%%%% THE END
\end{document}
|
module OCCA
include("occapaths.jl");
include("occabuiltwith.jl");
#Flags for which threading libraries to build into OCCA.
USE_OPENMP = OCCA_USE_OPENMP;
USE_PTHREADS = OCCA_USE_PTHREADS;
USE_CUDA = OCCA_USE_CUDA;
USE_OPENCL = OCCA_USE_OPENCL;
thisfile= @__FILE__();
thisdir = dirname(thisfile);
tmpdir = pwd();
cd(thisdir);
cd("../deps");
#Point to OCCA shared library.
ENV["OCCA_DIR"]=pwd() * "/OCCA2"
cd(tmpdir);
#---[ Types ]-----------------
type Device
cdevice::Ptr{Void}
end
function Device(infos::String)
cdevice = ccall((:occaGetDevice, libocca),
Ptr{Void},
(Ptr{Uint8},),
bytestring(infos));
return Device(cdevice);
end
function Device(infos::String)
cdevice = ccall((:occaGetDevice, libocca),
Ptr{Void},
(Ptr{Uint8},),
bytestring(infos));
return Device(cdevice);
end
function Device(;mode = "",
threadCount = -1,
schedule = "",
pinnedCores = Int32[],
deviceID = -1,
platformID = -1)
infos::String = "";
if mode != ""
infos *= string("mode = ", mode)
end
if 0 <= threadCount
infos *= string(", threadCount = ", threadCount)
end
if schedule != ""
infos *= string(", schedule = ", schedule)
end
if 0 < length(pinnedCores)
infos *= string(", pinnedCores = [", pinnedCores[1])
for core in pinnedCores[2:end]
infos *= string(", ", core)
end
infos *= "]"
end
if 0 <= deviceID
infos *= string(", deviceID = ", deviceID)
end
if 0 <= platformID
infos *= string(", platformID = ", platformID)
end
return Device(infos)
end
type Stream
cstream::Ptr{Void}
end
type Kernel
ckernel::Ptr{Void}
end
type KernelInfo
ckernelinfo::Ptr{Void}
end
function KernelInfo()
return KernelInfo(ccall((:occaCreateKernelInfo, libocca),Ptr{Void},()));
end
type Memory
cmemory::Ptr{Void}
ctypes
end
#---[ Device ]----------------
function free(d::Device)
ccall((:occaDeviceFree, libocca),
Void,
(Ptr{Void},),
d.cdevice)
end
function mode(d::Device)
cmode = ccall((:occaDeviceMode, libocca),
Ptr{Uint8},
(Ptr{Void},), d.cDevice)
return bytestring(cmode)
end
function set!(d::Device;compiler="",flags="")
if length(compiler)>0
ccall((:occaDeviceSetCompiler, libocca),
Void,
(Ptr{Void}, Ptr{Uint8},),
d.cdevice, bytestring(compiler));
end
if length(flags)>0
ccall((:occaDeviceSetCompilerFlags, libocca),
Void,
(Ptr{Void}, Ptr{Uint8},),
d.cDevice, bytestring(flags));
end
end
function buildkernel(d::Device,filename::String,functionName::String;binary=false)
if binary
return Kernel(ccall((:occaBuildKernelFromBinary, libocca),
Ptr{Void},
(Ptr{Void}, Ptr{Uint8}, Ptr{Uint8},),
d.cDevice,
bytestring(filename),
bytestring(functionName)));
else
return Kernel(ccall((:occaBuildKernelFromSource, libocca),
Ptr{Void},
(Ptr{Void}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Void},),
d.cdevice,
bytestring(filename),
bytestring(functionName),
C_NULL));
end
end
function buildkernel(d::Device,filename::String,functionName::String,info::KernelInfo)
return Kernel(ccall((:occaBuildKernelFromSource, libocca),
Ptr{Void},
(Ptr{Void}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Void},),
d.cdevice,
bytestring(filename),
bytestring(functionName),
info.ckernelinfo));
end
function malloc(d::Device, source::Array)
ctypes = typeof(source[1])
bytes = length(source) * sizeof(ctypes)
convert(Uint, bytes)
cmemory = ccall((:occaDeviceMalloc, libocca),
Ptr{Void},
(Ptr{Void}, Uint, Ptr{Void},),
d.cdevice, bytes, pointer(source))
return Memory(cmemory, ctypes)
end
function malloc(d::Device, t::Type, nentries)
bytes = sizeof(t)*nentries;
convert(Uint, bytes)
cmemory = ccall((:occaDeviceMalloc, libocca),
Ptr{Void},
(Ptr{Void}, Uint, Ptr{Void},),
d.cdevice, bytes, C_NULL);
return Memory(cmemory, t)
end
function flush(d::Device)
ccall((:occaDeviceFlush, libocca),
Void,
(Ptr{Void},), d.cDevice)
end
function finish(d::Device)
ccall((:occaDeviceFinish, libocca),
Void,
(Ptr{Void},), d.cDevice)
end
function createstream(d::Device)
cstream = ccall((:occaCreateStream, libocca),
Ptr{Void},
(Ptr{Void},),
d.cDevice)
return Stream(cstream)
end
function getstream(d::Device)
cstream = ccall((:occaGetStream, libocca),
Ptr{Void},
(Ptr{Void},),
d.cDevice)
return Stream(cstream)
end
function setstream!(d::Device, s::Stream)
ccall((:occaSetStream, libocca),
Void,
(Ptr{Void}, Ptr{Void},),
d.cDevice, s.cstream)
end
#---[ Kernel ]----------------
function free(k::Kernel)
ccall((:occaKernelFree, libocca),
Void,
(Ptr{Void},),
k.cKernel)
end
function mode(k::Kernel)
cMode = ccall((:occaKernelMode, libocca),
Ptr{Uint8},
(Ptr{Void},),
k.cKernel)
return bytestring(cMode)
end
function getpreferreddimsize(k::Kernel)
return ccall((:occaKernelPreferredDimSize, libocca),
Int32,
(Ptr{Void},),
k.cKernel)
end
function setworkingdims!(k::Kernel,
dims, items, groups)
convert(Int32, dims)
items_ = ones(Uint, 3)
groups_ = ones(Uint, 3)
for i = 1:dims
items_[i] = items[i]
groups_[i] = groups[i]
end
ccall((:occaKernelSetAllWorkingDims, libocca),
Void,
(Ptr{Void},
Int32,
Uint, Uint, Uint,
Uint, Uint, Uint,),
k.cKernel,
dims,
items_[1] , items_[2] , items_[3],
groups_[1], groups_[2], groups_[3])
end
argType(arg::Int8) = ccall((:occaChar, libocca), Ptr{Void}, (Int8,) , arg)
argType(arg::Uint8) = ccall((:occaUChar, libocca), Ptr{Void}, (Uint8,), arg)
argType(arg::Int16) = ccall((:occaShort, libocca), Ptr{Void}, (Int16,) , arg)
argType(arg::Uint16) = ccall((:occaUShort, libocca), Ptr{Void}, (Uint16,), arg)
argType(arg::Int32) = ccall((:occaInt, libocca), Ptr{Void}, (Int32,) , arg)
argType(arg::Uint32) = ccall((:occaUInt, libocca), Ptr{Void}, (Uint32,), arg)
argType(arg::Int64) = ccall((:occaLong, libocca), Ptr{Void}, (Int64,) , arg)
argType(arg::Uint64) = ccall((:occaULong, libocca), Ptr{Void}, (Uint64,), arg)
argType(arg::Float32) = ccall((:occaFloat, libocca), Ptr{Void}, (Float32,) , arg)
argType(arg::Float64) = ccall((:occaDouble, libocca), Ptr{Void}, (Float64,) , arg)
argType(arg::Memory) = arg.cmemory;
function runkernel!(k::Kernel, args...)
argList = ccall((:occaCreateArgumentList, libocca),
Ptr{Void}, ())
pos = convert(Int32, 0)
for arg in args
carg = argType(arg);
ccall((:occaArgumentListAddArg,libocca),
Void,
(Ptr{Void}, Int32, Ptr{Void},),
argList, pos, carg);
pos += 1
end
ccall((:occaKernelRun_, libocca),
Void,
(Ptr{Void}, Ptr{Void},),
k.ckernel, argList)
ccall((:occaArgumentListFree, libocca),
Void,
(Ptr{Void},),
argList)
end
function adddefine!(info::KernelInfo, macro_::String, value::String)
occaValue = ccall((:occaString, libocca),
Ptr{Void},
(Ptr{Uint8},),
bytestring(value))
ccall((:occaKernelInfoAddDefine, libocca),
Void,
(Ptr{Void}, Ptr{Uint8}, Ptr{Void},),
info.ckernelinfo, bytestring(macro_), occaValue)
end
function addinclude!(info::KernelInfo, includepath::String)
ccall((:occaKernelInfoAddInclude,libocca),
Void,
(Ptr{Void},Ptr{Uint8},),
info.ckernelinfo,bytestring(includepath));
end
function free(info::KernelInfo)
ccall((:occaKernelInfoFree, libocca),
Void,
(Ptr{Void},),
info.ckernelinfo)
end
#---[ Memory ]----------------
function free(m::Memory)
ccall((:occaMemoryFree, libocca),
Void,
(Ptr{Void},),
m.cmemory)
end
function mode(m::Memory)
cMode = ccall((:occaMemoryMode, libocca),
Ptr{Uint8},
(Ptr{Void},),
m.cmemory)
return bytestring(cMode)
end
function memcpy!{T}(dest::Memory,src::Array{T})
destptr=dest.cmemory;
srcptr = pointer(src);
ccall((:occaCopyPtrToMem,libocca),Void,(Ptr{Void},Ptr{Void},Uint,Uint,),
destptr,srcptr,Uint(0),Uint(0));
end
function memcpy!(dest::Memory,src::Memory)
destptr=dest.cmemory;
srcptr =src.cmemory;
ccall((:occaCopyMemToMem, libocca),Void,(Ptr{Void}, Ptr{Void}, Uint, Uint, Uint,),
destptr, srcptr, Uint(0), Uint(0), Uint(0))
end
function memcpy!{T}(dest::Array{T},src::Memory)
destptr=pointer(dest);
srcptr =src.cmemory;
ccall((:occaCopyMemToPtr,libocca),Void,(Ptr{Void},Ptr{Void},Uint,Uint,),
destptr,srcptr,Uint(0),Uint(0));
end
function memcpy!{T}(src::Array{T},dest::Array{T})
for i = 1 : length(src)
src[i]=dest[i];
end
end
function swap!(a::Memory, b::Memory)
tmp = a.cmemory
a.cmemory = b.cmemory
b.cmemory = tmp
end
function rebuildwith!(;pthreads=false,opencl=false,cuda=false,openmp=false)
f=open(thisdir * "/occabuiltwith.jl","w");
if openmp
write(f,"OCCA_USE_OPENMP = true;\n");
else
write(f,"OCCA_USE_OPENMP = false;\n");
end
if pthreads
write(f,"OCCA_USE_PTHREADS = true;\n");
else
write(f,"OCCA_USE_PTHREADS = false;\n");
end
if opencl
write(f,"OCCA_USE_OPENCL = true;\n");
else
write(f,"OCCA_USE_OPENCL = false;\n");
end
if openmp
write(f,"OCCA_USE_CUDA = true;\n");
else
write(f,"OCCA_USE_CUDA = false;\n");
end
close(f);
reload("OCCA");
USE_OPENMP = openmp;
USE_PTHREADS = pthreads;
USE_CUDA = cuda;
USE_OPENCL = opencl;
Pkg.build("OCCA");
end
function verbosecompile(enabled::Bool)
ccall((:occaSetVerboseCompilation, libocca),
Void,
(Int32,),
Int32(enabled ? 1 : 0));
end
end
|
{-# OPTIONS --cubical --safe #-}
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Path
open import Cubical.Foundations.Isomorphism renaming (Iso to _≅_)
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.HLevels
open import Cubical.Functions.FunExtEquiv
open import Cubical.Data.Unit
open import Cubical.Data.Sigma
open import Cubical.Data.Nat
module Cubical.Data.W.Indexed where
open _≅_
private
variable
ℓX ℓS ℓP : Level
module Types {X : Type ℓX} (S : X → Type ℓS) (P : ∀ x → S x → Type ℓP) (inX : ∀ x (s : S x) → P x s → X) where
data IW (x : X) : Type (ℓ-max ℓX (ℓ-max ℓS ℓP)) where
node : (s : S x) → (subtree : (p : P x s) → IW (inX x s p)) → IW x
Subtree : ∀ {x} → (s : S x) → Type (ℓ-max (ℓ-max ℓX ℓS) ℓP)
Subtree {x} s = (p : P x s) → IW (inX x s p)
RepIW : (x : X) → Type (ℓ-max (ℓ-max ℓX ℓS) ℓP)
RepIW x = Σ[ s ∈ S x ] Subtree s
open Types public
module _ {X : Type ℓX} {S : X → Type ℓS} {P : ∀ x → S x → Type ℓP} {inX : ∀ x (s : S x) → P x s → X} where
getShape : ∀ {x} → IW S P inX x → S x
getShape (node s subtree) = s
getSubtree : ∀ {x} → (w : IW S P inX x) → (p : P x (getShape w)) → IW S P inX (inX x (getShape w) p)
getSubtree (node s subtree) = subtree
wExt : ∀ {x} (w w' : IW S P inX x)
→ (ps : getShape w ≡ getShape w')
→ (pw : PathP (λ i → Subtree S P inX (ps i)) (getSubtree w) (getSubtree w'))
→ w ≡ w'
wExt (node s subtree) (node s' subtree') ps psubtree = cong₂ node ps psubtree
isoRepIW : (x : X) → IW S P inX x ≅ RepIW S P inX x
fun (isoRepIW x) (node s subtree) = s , subtree
inv (isoRepIW x) (s , subtree) = node s subtree
rightInv (isoRepIW x) (s , subtree) = refl
leftInv (isoRepIW x) (node s subtree) = refl
equivRepIW : (x : X) → IW S P inX x ≃ RepIW S P inX x
equivRepIW x = isoToEquiv (isoRepIW x)
pathRepIW : (x : X) → IW S P inX x ≡ RepIW S P inX x
pathRepIW x = ua (equivRepIW x)
isPropIW : (∀ x → isProp (S x)) → ∀ x → isProp (IW S P inX x)
isPropIW isPropS x (node s subtree) (node s' subtree') =
cong₂ node (isPropS x s s') (toPathP (funExt λ p → isPropIW isPropS _ _ (subtree' p)))
module IWPathTypes {X : Type ℓX} (S : X → Type ℓS) (P : ∀ x → S x → Type ℓP) (inX : ∀ x (s : S x) → P x s → X) where
--somewhat inspired by https://github.com/jashug/IWTypes , but different.
IndexCover : Type (ℓ-max (ℓ-max ℓX ℓS) ℓP)
IndexCover = Σ[ x ∈ X ] IW S P inX x × IW S P inX x
ShapeCover : IndexCover → Type ℓS
ShapeCover (x , w , w') = getShape w ≡ getShape w'
ArityCover : ∀ xww' → ShapeCover xww' → Type ℓP
ArityCover (x , w , w') ps = P x (getShape w')
inXCover : ∀ xww' → (ps : ShapeCover xww') → ArityCover xww' ps → IndexCover
inXCover (x , w , w') ps p = (inX x (getShape w') p) , (subst (Subtree S P inX) ps (getSubtree w) p , getSubtree w' p)
Cover : ∀ {x : X} → (w w' : IW S P inX x) → Type (ℓ-max (ℓ-max ℓX ℓS) ℓP)
Cover {x} w w' = IW ShapeCover ArityCover inXCover (x , w , w')
module IWPath {X : Type ℓX} {S : X → Type ℓS} {P : ∀ x → S x → Type ℓP} {inX : ∀ x (s : S x) → P x s → X} where
open IWPathTypes S P inX
isoEncode : ∀ {x} (w w' : IW S P inX x) → (w ≡ w') ≅ Cover w w'
isoEncodeSubtree : ∀ {x} (w w' : IW S P inX x) (ps : ShapeCover (x , w , w'))
→ (PathP (λ i → Subtree S P inX (ps i)) (getSubtree w) (getSubtree w'))
≅
(∀ (p : P x (getShape w')) → IW ShapeCover ArityCover inXCover (inXCover (x , w , w') ps p))
isoEncodeSubtree w w'@(node s' subtree') ps =
PathPIsoPath (λ i → Subtree S P inX (ps i)) (getSubtree w) (getSubtree w') ⟫
invIso (funExtIso) ⟫
codomainIsoDep (λ p → isoEncode _ (subtree' p))
where _⟫_ = compIso
infixr 10 _⟫_
fun (isoEncode w@(node s subtree) w'@(node s' subtree')) pw =
node (cong getShape pw) (fun (isoEncodeSubtree w w' (cong getShape pw)) (cong getSubtree pw))
inv (isoEncode w@(node s subtree) w'@(node s' subtree')) cw@(node ps csubtree) =
cong₂ node ps (inv (isoEncodeSubtree w w' ps) csubtree)
rightInv (isoEncode w@(node s subtree) w'@(node s' subtree')) cw@(node ps csubtree) =
cong (node ps) (
fun (isoEncodeSubtree w w' ps) (inv (isoEncodeSubtree w w' ps) csubtree)
≡⟨ rightInv (isoEncodeSubtree w w' ps) csubtree ⟩
csubtree ∎
)
leftInv (isoEncode w@(node s subtree) w'@(node s' subtree')) pw =
cong₂ node (cong getShape pw)
(inv (isoEncodeSubtree w w' (cong getShape pw))
(fun (isoEncodeSubtree w w' (cong getShape pw))
(cong getSubtree pw)
)
)
≡⟨ cong (cong₂ node (cong getShape pw)) (leftInv (isoEncodeSubtree w w' (cong getShape pw)) (cong getSubtree pw)) ⟩
cong₂ node (cong getShape pw) (cong getSubtree pw)
≡⟨ flipSquare (λ i → wExt (node (getShape (pw i)) (getSubtree (pw i))) (pw i) refl refl) ⟩
pw ∎
encode : ∀ {x} (w w' : IW S P inX x) → w ≡ w' → Cover w w'
encode w w' = fun (isoEncode w w')
decode : ∀ {x} (w w' : IW S P inX x) → Cover w w' → w ≡ w'
decode w w' = inv (isoEncode w w')
decodeEncode : ∀ {x} (w w' : IW S P inX x) → (pw : w ≡ w') → decode w w' (encode w w' pw) ≡ pw
decodeEncode w w' = leftInv (isoEncode w w')
encodeDecode : ∀ {x} (w w' : IW S P inX x) → (cw : Cover w w') → encode w w' (decode w w' cw) ≡ cw
encodeDecode w w' = rightInv (isoEncode w w')
equivEncode : ∀ {x} (w w' : IW S P inX x) → (w ≡ w') ≃ Cover w w'
equivEncode w w' = isoToEquiv (isoEncode w w')
pathEncode : ∀ {x} (w w' : IW S P inX x) → (w ≡ w') ≡ Cover w w'
pathEncode w w' = ua (equivEncode w w')
open IWPathTypes
open IWPath
isOfHLevelSuc-IW : {X : Type ℓX} {S : X → Type ℓS} {P : ∀ x → S x → Type ℓP} {inX : ∀ x (s : S x) → P x s → X} →
(n : HLevel) → (∀ x → isOfHLevel (suc n) (S x)) → ∀ x → isOfHLevel (suc n) (IW S P inX x)
isOfHLevelSuc-IW zero isHS x = isPropIW isHS x
isOfHLevelSuc-IW (suc n) isHS x w w' =
subst (isOfHLevel (suc n)) (λ i → pathEncode w w' (~ i))
(isOfHLevelSuc-IW n
(λ (y , v , v') → isHS y (getShape v) (getShape v'))
(x , w , w')
)
|
Every numeral is a real number. |
{-# OPTIONS --cubical --safe #-}
module Data.Vec where
open import Prelude
private
variable
n m : ℕ
infixr 5 _∷_
data Vec (A : Type a) : ℕ → Type a where
[] : Vec A zero
_∷_ : A → Vec A n → Vec A (suc n)
head : Vec A (suc n) → A
head (x ∷ _) = x
foldr : (A → B → B) → B → Vec A n → B
foldr f b [] = b
foldr f b (x ∷ xs) = f x (foldr f b xs)
|
||| Testing IntMap using silly stupid tests
module Test.Patricia.Spec
import Patricia.IntMap
import Patricia.IntSet
import Specdris.Spec
----------------------------------------------------------------------------
-- Maps to test
----------------------------------------------------------------------------
initMap : Int32Map String
initMap = fromList [(1,"a"), (2,"b"), (3,"c"), (4, "d"), (4, "x")]
biggerMap : Int32Map String
biggerMap = insert 5 "e" initMap
lesserMap : Int32Map String
lesserMap = delete 10 $ delete 5 $ delete 2 biggerMap
----------------------------------------------------------------------------
-- Sets to test
----------------------------------------------------------------------------
initSet : Int32Set
initSet = fromList [1, 2, 3, 4, 4]
biggerSet : Int32Set
biggerSet = insert 5 initSet
lesserSet : Int32Set
lesserSet = delete 10 $ delete 5 $ delete 2 biggerSet
----------------------------------------------------------------------------
-- Test runner
----------------------------------------------------------------------------
export
main : IO ()
main = spec $ do
describe "Patricia" $ do
it "size" $ do
size initMap `shouldBe` 4
size biggerMap `shouldBe` 5
size lesserMap `shouldBe` 3
it "lookup & insert & delete" $ do
lookup 2 initMap `shouldBe` Just "b"
lookup 4 initMap `shouldBe` Just "x"
lookup 0 initMap `shouldBe` Nothing
lookup 5 biggerMap `shouldBe` Just "e"
lookup 5 lesserMap `shouldBe` Nothing
lookup 2 lesserMap `shouldBe` Nothing
lookup 3 lesserMap `shouldBe` Just "c"
it "toList" $ do
values initMap `shouldBe` ["a", "b", "c", "x"]
values biggerMap `shouldBe` ["a", "b", "c", "x", "e"]
values lesserMap `shouldBe` ["a", "c", "x"]
describe "IntSet" $ do
it "size" $ do
size initSet `shouldBe` 4
size biggerSet `shouldBe` 5
size lesserSet `shouldBe` 3
it "member & insert & delete" $ do
member 2 initSet `shouldBe` True
member 4 initSet `shouldBe` True
member 0 initSet `shouldBe` False
member 5 biggerSet `shouldBe` True
member 5 lesserSet `shouldBe` False
member 2 lesserSet `shouldBe` False
member 3 lesserSet `shouldBe` True
|
Require Import msl.base.
Require Import msl.sepalg.
Require Import Recdef.
Require Wellfounded. (* Can't Import this, because that brings the identifier B into
scope, which breaks things like `{ageable B} in this file.
Stupid feature of Coq, that the B in `{ageable B} is not unambiguously a
binding occurrence of B. *)
Delimit Scope pred with pred.
Local Open Scope pred.
Definition pred (A:Type) := A -> Prop.
Bind Scope pred with pred.
Definition derives (A:Type) (P Q:pred A) := forall a:A, P a -> Q a.
Implicit Arguments derives.
Lemma pred_ext : forall A (P Q:pred A),
derives P Q -> derives Q P -> P = Q.
Proof.
intros.
extensionality a.
apply prop_ext; intuition.
Qed.
Lemma derives_cut {A} : forall Q P R : pred A,
derives P Q ->
derives Q R ->
derives P R.
Proof.
repeat intro; intuition.
Qed.
Definition prop {A: Type} (P: Prop) : pred A := (fun _ => P).
Hint Unfold prop.
Definition TT {A}: pred A := prop True.
Definition FF {A}: pred A := prop False.
Set Implicit Arguments.
Definition imp {A} (P Q:pred A) :=
fun a:A => P a -> Q a.
Definition orp {A} (P Q:pred A) :=
fun a:A => P a \/ Q a.
Definition andp {A} (P Q:pred A) :=
fun a:A => P a /\ Q a.
Definition allp {A B: Type} (f: B -> pred A) : pred A
:= fun a => forall b, f b a.
Definition exp {A B: Type} (f: B -> pred A) : pred A
:= fun a => exists b, f b a.
Notation "'emp'" := identity.
Definition sepcon {A} {JA: Join A}(p q:pred A) := fun z:A =>
exists x:A, exists y:A, join x y z /\ p x /\ q y.
Definition wand {A} {JA: Join A} (p q:pred A) := fun y =>
forall x z, join x y z -> p x -> q z.
Notation "P '|--' Q" := (derives P Q) (at level 80, no associativity).
Notation "'EX' x ':' T ',' P " := (exp (fun x:T => P%pred)) (at level 65, x at level 99) : pred.
Notation "'ALL' x ':' T ',' P " := (allp (fun x:T => P%pred)) (at level 65, x at level 99) : pred.
Infix "||" := orp (at level 50, left associativity) : pred.
Infix "&&" := andp (at level 40, left associativity) : pred.
Notation "P '-->' Q" := (imp P Q) (at level 55, right associativity) : pred.
Notation "P '<-->' Q" := (andp (imp P Q) (imp Q P)) (at level 57, no associativity) : pred.
Notation "P '*' Q" := (sepcon P Q) : pred.
Notation "P '-*' Q" := (wand P Q) (at level 60, right associativity) : pred.
Notation "'!!' e" := (prop e) (at level 25) : pred.
Definition precise {A} {JA: Join A}{PA: Perm_alg A} (P: pred A) : Prop :=
forall w w1 w2, P w1 -> P w2 -> join_sub w1 w -> join_sub w2 w -> w1=w2.
Definition precise2 {A} {JA: Join A}{PA: Perm_alg A} (P: pred A) : Prop :=
forall Q R, P * (Q && R) = (P * Q) && (P * R).
Lemma precise_eq {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}:
precise =
fun P : pred A => forall Q R, P * (Q && R) = (P * Q) && (P * R).
Proof.
extensionality P.
unfold precise.
apply prop_ext; split; intros.
extensionality w.
apply prop_ext; split; intros.
destruct H0 as [phi1 [phi2 [? [? [? ?]]]]].
split; exists phi1; exists phi2; auto.
destruct H0 as [[phi1a [phi2a [? [? ?]]]] [phi1b [phi2b [? [? ?]]]]].
specialize (H w _ _ H1 H4).
spec H.
econstructor; eauto.
spec H.
econstructor; eauto.
subst phi1b.
generalize (join_canc (join_comm H0) (join_comm H3)).
intro; subst phi2b.
exists phi1a; exists phi2a; split; auto.
split; auto.
split; auto.
rename w1 into w1a.
rename w2 into w1b.
destruct H2 as [w2a ?].
destruct H3 as [w2b ?].
pose (fa x := x=w2a).
pose (fb x := x=w2b).
assert (((P * fa) && (P * fb)) w).
split; do 2 econstructor; repeat split; eauto.
rewrite <- H in H4.
destruct H4 as [w1 [w2 [? [? [? ?]]]]].
unfold fa,fb in *.
subst.
generalize (join_canc H2 H4); intro.
subst w1a w2b.
eapply join_canc; eauto.
Qed.
Lemma derives_precise {A} {JA: Join A}{PA: Perm_alg A}:
forall P Q, (P |-- Q) -> precise Q -> precise P.
Proof.
intros; intro; intros; eauto.
Qed.
Lemma prop_true_and:
forall (P: Prop) A (Q: pred A), P -> (!! P && Q = Q).
Proof.
intros. unfold prop, andp;
extensionality w; apply prop_ext; split; intuition.
Qed.
Lemma prop_andp_e {A}: forall P Q (w:A), (!! P && Q) w -> P /\ Q w.
Proof.
intuition; destruct H; auto.
Qed.
Lemma prop_andp_i {A}: forall P Q (w:A), P /\ Q w -> (!! P && Q) w.
Proof.
intuition.
split; auto.
Qed.
Lemma derives_trans {A}: forall (P Q R: pred A), P |-- Q -> Q |-- R -> P |-- R.
Proof.
firstorder.
Qed.
Lemma and_i {A}: forall (P Q R: pred A),
P |-- Q -> P |-- R -> P |-- Q && R.
Proof. intuition.
intros w ?.
split; eauto.
Qed.
Lemma andp_derives {A} :
forall P Q P' Q': pred A, P |-- P' -> Q |-- Q' -> P && Q |-- P' && Q'.
Proof.
intros.
intros w [? ?]; split; auto.
Qed.
Lemma sepcon_assoc {A} {JA: Join A}{PA: Perm_alg A}:
forall p q r, (((p * q) * r) = (p * (q * r))).
Proof.
pose proof I.
intros.
extensionality w; apply prop_ext; split; intros.
destruct H0 as [w12 [w3 [? [[w1 [w2 [? [? ?]]]] ?]]]].
destruct (join_assoc H1 H0) as [w23 [? ?]].
exists w1; exists w23; repeat split; auto.
exists w2; exists w3; split; auto.
destruct H0 as [w1 [w23 [? [? [w2 [w3 [? [? ?]]]]]]]].
destruct (join_assoc (join_comm H2) (join_comm H0)) as [w12 [? ?]].
exists w12; exists w3; repeat split; auto.
exists w1; exists w2; repeat split; auto.
Qed.
Lemma sepcon_comm {A} {JA: Join A}{PA: Perm_alg A}: forall (P Q: pred A) , P * Q = Q * P.
Proof.
intros.
extensionality w; apply prop_ext; split; intros;
(destruct H as [w1 [w2 [? [? ?]]]]; exists w2; exists w1; split ; [apply join_comm; auto | split; auto]).
Qed.
Lemma sepcon_emp {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}: forall P, (P * emp) = P.
Proof.
intros.
extensionality w; apply prop_ext; split; intros.
destruct H as [w1 [w2 [? [? ?]]]].
generalize (identity_unit (a:=w1) H1); intro.
spec H2.
econstructor; eauto.
unfold unit_for in H2.
generalize (join_eq H (join_comm H2)).
intros; subst; auto.
destruct (join_ex_identities w) as [e [? ?]].
exists w; exists e; repeat split; auto.
apply join_comm.
apply identity_unit; auto.
Qed.
Lemma emp_sepcon {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}:
forall P, (emp*P) = P.
Proof. intros. rewrite sepcon_comm; rewrite sepcon_emp; auto. Qed.
Lemma precise_emp {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}:
precise emp.
Proof.
intros.
rewrite precise_eq.
intros.
repeat rewrite emp_sepcon.
auto.
Qed.
Definition exactly {A} (x: A) : pred A := fun w => w=x.
Lemma join_exactly {A} {JA: Join A}{PA: Perm_alg A}:
forall w1 w2 w3, join w1 w2 w3 -> exactly w1 * exactly w2 = exactly w3.
Proof.
intros.
unfold exactly.
extensionality w.
apply prop_ext; split; intros.
destruct H0 as [? [? [? [? ?]]]].
subst. eapply join_eq; eauto.
subst w3.
exists w1; exists w2; split; auto.
Qed.
Lemma exists_and1 {A: Type} : forall {T: Type} (P: T -> pred A) (Q: pred A),
exp P && Q = EX x:T, P x && Q.
Proof.
intros.
extensionality w.
apply prop_ext; split; intros.
destruct H as [[x ?] ?].
exists x; split; auto.
destruct H as [x [? ?]].
split; auto.
exists x; auto.
Qed.
Lemma andp_comm {A: Type}: forall (P Q: pred A), P && Q = Q && P.
Proof.
intros.
extensionality w.
unfold andp;
apply prop_ext; split; intuition.
Qed.
Lemma andp_assoc {A}: forall (P Q R: pred A),
((P && Q) && R = P && (Q && R)).
Proof.
intros.
extensionality w.
unfold andp.
apply prop_ext; intuition.
Qed.
Lemma True_andp_eq {A}:
forall (P: Prop) (Q: pred A), P -> (!!P && Q)%pred = Q.
intros.
extensionality w; apply prop_ext; split; unfold prop, andp; simpl; intros; intuition.
Qed.
Lemma TT_i {A} : forall w: A, TT w.
Proof.
unfold TT, prop; simpl; auto.
Qed.
Hint Resolve @TT_i.
Lemma TT_and {A}: forall (Q: pred A), TT && Q = Q.
intros; unfold andp, TT, prop; extensionality w.
apply prop_ext; intuition.
Qed.
Lemma andp_TT {A}: forall (P: pred A), P && TT = P.
Proof.
intros.
extensionality w; apply prop_ext; split; intros.
destruct H; auto.
split; auto.
Qed.
Lemma emp_wand {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}:
forall P, emp -* P = P.
Proof.
intros.
extensionality w; apply prop_ext; split; intros.
destruct (join_ex_units w) as [e ?].
eapply H; eauto.
eapply unit_identity; eauto.
intro; intros.
replace z with w; auto.
Qed.
Lemma wand_derives {A} {JA: Join A}{PA: Perm_alg A}:
forall P P' Q Q', P' |-- P -> Q |-- Q' -> P -* Q |-- P' -* Q'.
Proof.
intros.
intros w ?.
intro; intros.
eauto.
Qed.
Lemma TT_sepcon_TT {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}: TT * TT = TT.
Proof.
intros.
extensionality w; apply prop_ext; split; intros; auto.
destruct (join_ex_units w).
exists x; exists w; split; auto.
Qed.
Definition ewand {A} {JA: Join A} (P Q: pred A) : pred A :=
fun w => exists w1, exists w2, join w1 w w2 /\ P w1 /\ Q w2.
(* Notation "P '-o' Q" := (ewand P Q) (at level 60, right associativity). *)
Lemma emp_ewand {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}{CA: Canc_alg A}: forall P, ewand emp P = P.
Proof.
intros.
extensionality w; apply prop_ext; split; intros.
destruct H as [w1 [w2 [? [? ?]]]].
replace w with w2; auto.
eapply join_eq; eauto.
eapply identity_unit; eauto.
destruct (join_ex_units w) as [e ?].
exists e; exists w.
split; auto. split; auto.
eapply unit_identity; eauto.
Qed.
Lemma exists_sepcon1 {A} {JA: Join A}{PA: Perm_alg A}:
forall T (P: T -> pred A) Q, exp P * Q = exp (fun x => P x * Q).
Proof.
intros.
extensionality w.
apply prop_ext; split; intros.
destruct H as [w1 [w2 [? [[x ?] ?]]]].
exists x; exists w1; exists w2; split; auto.
destruct H as [x [w1 [w2 [? [? ?]]]]].
exists w1; exists w2; split; auto.
split; auto.
exists x; auto.
Qed.
Lemma derives_refl {A: Type}:
forall (P: pred A), (P |-- P).
Proof. firstorder.
Qed.
Hint Resolve @derives_refl.
Lemma derives_TT {A}: forall (P: pred A), P |-- TT.
Proof.
intros.
intros ? ?; auto.
Qed.
Hint Resolve @derives_TT.
Lemma sepcon_derives {A} {JA: Join A}{PA: Perm_alg A}:
forall p q p' q', (p |-- p') -> (q |-- q') -> (p * q |-- p' * q').
Proof.
intros.
do 2 intro.
destruct H1 as [w1 [w2 [? [? ?]]]].
exists w1; exists w2; repeat split ;auto.
Qed.
Lemma derives_e {A: Type}: forall p q (st: A),
(p |-- q) -> p st -> q st.
Proof.
auto.
Qed.
Lemma exp_derives {A} :
forall B (P: B -> pred A) Q , (forall x:B, P x |-- Q x) -> (exp P |-- exp Q).
Proof.
intros.
intros w [b ?].
exists b; eapply H; eauto.
Qed.
Lemma unmodus_wand {A} {JA: Join A}{PA: Perm_alg A}:
forall P Q R, Q = P * R -> Q |-- P * (P -* Q).
Proof.
intros.
subst.
apply sepcon_derives; auto.
intros ?w ?; intro; intros.
exists x; exists w; split; auto.
Qed.
Definition superprecise {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A} (P: pred A) :=
forall w1 w2, P w1 -> P w2 -> comparable w1 w2 -> w1=w2.
Lemma modus_ewand {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A} : forall P Q, superprecise P -> P * (ewand P Q) |-- Q.
Proof.
pose proof I.
intros.
intros w ?.
destruct H1 as [w1 [w2 [? [? ?]]]].
unfold ewand in H3.
destruct H3 as [w1' [w3 [? [? ?]]]].
assert (w1'=w1).
apply H0; auto.
apply comparable_trans with w2. eapply join_comparable2; eauto.
apply comparable_sym. eapply join_comparable2; eauto.
subst.
replace w with w3; auto.
eapply join_eq; eauto.
Qed.
Lemma exists_expand_sepcon {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}:
forall B (p: B -> pred A) q, (exp p * q)%pred = (exp (fun x => p x * q))%pred.
Proof.
intros; extensionality w; apply prop_ext; split; intros.
destruct H as [? [? [? [? ?]]]].
destruct H0.
exists x1; exists x; exists x0; split; auto.
destruct H as [? [? [? [? [? ?]]]]].
exists x0; exists x1; split; auto.
split; auto.
exists x; auto.
Qed.
Lemma exists_expand_sepcon' {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}:
forall B p (q: B -> pred A), (p * exp q)%pred = (exp (fun x => p * q x))%pred.
Proof.
intros; extensionality w; apply prop_ext; split; intros.
destruct H as [? [? [? [? ?]]]].
destruct H1.
exists x1; exists x; exists x0; split; auto.
destruct H as [? [? [? [? [? ?]]]]].
exists x0; exists x1; split; auto.
split; auto.
exists x; auto.
Qed.
Lemma exists_expand_and {A} {JA: Join A}:
forall B (p: B -> pred A) q, (exp p && q)%pred = (exp (fun x => p x && q))%pred.
Proof.
intros; extensionality w; apply prop_ext; split; intros.
destruct H.
destruct H.
exists x; split; auto.
destruct H. destruct H.
split; auto.
exists x; auto.
Qed.
Lemma exists_expand_and' {A} {JA: Join A}:
forall B p (q: B -> pred A), (p && exp q)%pred = (exp (fun x => p && q x))%pred.
Proof.
intros; extensionality w; apply prop_ext; split; intros.
destruct H.
destruct H0.
exists x; split; auto.
destruct H. destruct H.
split; auto.
exists x; auto.
Qed.
Lemma allp_derives_right {A} : forall B p (q: B -> pred A),
((p |-- allp q) <-> (forall x, p |-- q x)).
Proof.
intros.
split; intros.
eapply derives_trans; eauto.
intros ? ?. apply H0.
intros ? ? ?.
eapply (H b).
auto.
Qed.
Lemma wand_exists {A} {JA: Join A}{PA: Perm_alg A}:
forall B P Q, (EX x: B, P -* Q x) |-- (P -* EX x : B, Q x).
Proof.
pose proof I.
intros.
intros w ?.
destruct H0 as [x ?].
intros ?w ?w ? ?.
spec H0 w0 w1 H1 H2.
exists x; auto.
Qed.
Lemma modus_wand {A} {JA: Join A}{PA: Perm_alg A}:
forall P Q, P * (P -* Q) |-- Q.
Proof.
intros.
intros w [?w [?w [? [? ?]]]].
eapply H1; eauto.
Qed.
Lemma distrib_sepcon_andp {A} {JA: Join A}{PA: Perm_alg A}:
forall P Q R, P * (Q && R) |-- (P * Q) && (P * R).
Proof.
intros. intros w [w1 [w2 [? [? ?]]]].
destruct H1.
split; exists w1; exists w2; split; auto.
Qed.
Lemma andp_r {A: Type} : forall (P Q R: pred A), P |-- Q -> P |-- R -> P |-- Q && R.
Proof.
intros.
intros w ?; split; auto.
Qed.
Definition list_sepcon {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A} : list (pred A) -> pred A := fold_right sepcon emp.
Lemma sepcon_andp_prop {A} {JA: Join A}{PA: Perm_alg A}: forall P Q R, P * (!!Q && R) = !!Q && (P * R).
Proof.
intros.
extensionality w; apply prop_ext; split; intros.
destruct H as [w1 [w2 [? [? [? ?]]]]].
split. apply H1.
exists w1; exists w2; split; [|split]; auto.
destruct H.
destruct H0 as [w1 [w2 [? [? ?]]]].
exists w1; exists w2; repeat split; auto.
Qed.
Require Import msl.cross_split.
Lemma exactly_i {A} : forall x: A, exactly x x.
Proof. intros. reflexivity.
Qed.
Hint Resolve @exactly_i.
Lemma superprecise_exactly {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}: forall x, superprecise (exactly x).
Proof.
unfold exactly, superprecise; intros.
subst; auto.
Qed.
Hint Resolve @superprecise_exactly.
Lemma find_overlap {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}:
Cross_alg A ->
forall S P Q R, (S * P) && (Q * R) |--
EX SQ:_, EX SR:_, EX PQ:_, EX PR:_,
(((SQ* SR) && S)*((PQ* PR) && P)) &&
(((SQ* PQ) && Q)*((SR* PR) && R)) &&
!! (superprecise SQ /\ superprecise SR /\ superprecise PQ /\ superprecise PR).
Proof.
pose proof I.
intros.
intros w [[w1 [w2 [? [? ?]]]] [w3 [w4 [? [? ?]]]]].
destruct (X _ _ _ _ _ H0 H3) as [[[[wa wb] wc] wd] [? [? [? ?]]]].
exists (exactly wa); exists (exactly wb); exists (exactly wc); exists (exactly wd).
repeat split; auto.
exists w1; exists w2; split; [|split]; auto; split; auto.
exists wa; exists wb; split; [|split]; auto.
exists wc; exists wd; split; [|split]; auto.
exists w3; exists w4; split; [|split]; auto; split; auto.
exists wa; exists wc; split; [|split]; auto.
exists wb; exists wd; split; [|split]; auto.
Qed.
Lemma modus_ponens {A} : forall (X P Q:pred A),
X |-- P ->
X |-- (P --> Q) ->
X |-- Q.
Proof.
unfold derives, imp; simpl; intuition eauto.
Qed.
Lemma and_intro {A} : forall (X P Q:pred A),
X |-- P ->
X |-- Q ->
X |-- P && Q.
Proof.
unfold derives, imp, andp; simpl; intuition.
Qed.
Lemma and1 {A} : forall (X P Q:pred A),
X |-- P && Q --> P.
Proof.
unfold derives, imp, andp; simpl; intuition eauto.
Qed.
Lemma and2 {A} : forall (X P Q:pred A),
X |-- P && Q --> Q.
Proof.
unfold derives, imp, andp; simpl; intuition eauto.
Qed.
Lemma and3 {A} : forall (X P Q R:pred A),
X |-- (P --> Q) --> (P --> R) --> (P --> Q && R).
Proof.
unfold derives, imp, andp; simpl; intuition eauto.
Qed.
Lemma or1 {A} : forall (X P Q:pred A),
X |-- P --> P || Q.
Proof.
unfold derives, imp, orp; simpl; intuition.
Qed.
Lemma or2 {A} : forall (X P Q:pred A),
X |-- Q --> P || Q.
Proof.
unfold derives, imp, orp; simpl; intuition.
Qed.
Lemma or3 {A} : forall (X P Q R:pred A),
X |-- (P --> R) --> (Q --> R) --> (P || Q --> R).
Proof.
unfold derives, imp, orp; simpl; intuition eauto.
Qed.
Lemma TTrule {A} : forall X (P: pred A),
X |-- P --> TT.
Proof.
unfold derives, imp, TT; simpl; intuition.
Qed.
Lemma FFrule {A} : forall X (P: pred A),
X |-- FF --> P.
Proof.
unfold derives, imp, FF; simpl; intuition.
hnf in H0; contradiction.
Qed.
Lemma distribution {A} : forall (X P Q R:pred A),
X |-- P && (Q || R) --> (P && Q) || (P && R).
Proof.
unfold derives, imp, orp, andp; simpl; intuition.
Qed.
Lemma wand_sepcon_adjoint {A} {JA: Join A}{PA: Perm_alg A} : forall (P Q R:pred A),
((P * Q) |-- R) = (P |-- (Q -* R)).
Proof.
intros. apply prop_ext.
split; intros.
hnf; intros; simpl; intros.
hnf; intros.
apply H.
exists a; exists x; split; auto.
hnf; intros.
destruct H0 as [w [v [? [? ?]]]].
eapply H; eauto.
Qed.
Lemma ewand_sepcon {A} {JA: Join A}{PA: Perm_alg A}: forall P Q R,
(ewand (P * Q) R = ewand P (ewand Q R))%pred.
Proof.
intros; apply pred_ext; intros w ?.
destruct H as [w1 [w2 [? [? ?]]]].
destruct H0 as [w3 [w4 [? [? ?]]]].
exists w3.
destruct (join_assoc (join_comm H0) H) as [wf [? ?]].
exists wf.
split; [|split]; auto.
exists w4. exists w2. split; auto.
destruct H as [w1 [w2 [? [? ?]]]].
destruct H1 as [w3 [w4 [? [? ?]]]].
destruct (join_assoc (join_comm H) (join_comm H1)) as [wf [? ?]].
exists wf. exists w4. split; [|split]; auto.
exists w1; exists w3; split; auto.
Qed.
Lemma andp_right {A} : forall (X P Q:pred A),
X |-- P ->
X |-- Q ->
X |-- P && Q.
Proof.
unfold derives, imp, andp; simpl; intuition.
Qed.
Lemma andp_left1{A}: forall P Q R: pred A, P |-- R -> P && Q |-- R.
Proof. repeat intro. destruct H0; auto.
Qed.
Lemma andp_left2{A}: forall P Q R: pred A, Q |-- R -> P && Q |-- R.
Proof. repeat intro. destruct H0; auto.
Qed.
Lemma orp_left{A}: forall P Q R: pred A, P |-- R -> Q |-- R -> P || Q |-- R.
Proof. repeat intro. destruct H1; auto.
Qed.
Lemma orp_right1{A}: forall P Q R: pred A, P |-- Q -> P |-- Q || R.
Proof. repeat intro. left; auto.
Qed.
Lemma orp_right2{A}: forall P Q R: pred A, P |-- R -> P |-- Q || R.
Proof. repeat intro. right; auto.
Qed.
Lemma exp_right:
forall {B A: Type}(x:B) p (q: B -> pred A),
p |-- q x ->
p |-- exp q.
Proof.
intros.
eapply derives_trans; try apply H.
intros w ?; exists x; auto.
Qed.
Lemma exp_left:
forall {B A: Type}(p: B -> pred A) q,
(forall x, p x |-- q) ->
exp p |-- q.
Proof.
intros.
intros w [x' ?].
eapply H; eauto.
Qed.
Lemma allp_right {B A: Type}:
forall (P: pred A) (Q: B -> pred A),
(forall v, P |-- Q v) ->
P |-- allp Q.
Proof.
intros. intros w ? v; apply (H v); auto.
Qed.
Lemma allp_left {B}{A}:
forall (P: B -> pred A) x Q, P x |-- Q -> allp P |-- Q.
Proof.
intros. intros ? ?. apply H. apply H0.
Qed.
Lemma imp_andp_adjoint {A} : forall (P Q R:pred A),
(P && Q) |-- R <-> P |-- (Q --> R).
Proof.
split; intros.
hnf; intros; simpl; intros.
intro; intros. apply H. split; auto.
intro; intros. destruct H0. apply H; auto.
Qed.
Lemma exp_andp1 {A} :
forall B (p: B -> pred A) q, (exp p && q)%pred = (exp (fun x => p x && q))%pred.
Proof.
intros; apply pred_ext; intros w ?.
destruct H as [[x ?] ?].
exists x; split; auto.
destruct H as [x [? ?]]; split; auto. exists x; auto.
Qed.
Lemma exp_sepcon1 {A} {JA: Join A}{PA: Perm_alg A}:
forall T (P: T -> pred A) Q, (exp P * Q = exp (fun x => P x * Q))%pred.
Proof.
intros.
apply pred_ext; intros ? ?.
destruct H as [w1 [w2 [? [[x ?] ?]]]].
exists x; exists w1; exists w2; split; auto.
destruct H as [x [w1 [w2 [? [? ?]]]]].
exists w1; exists w2; split; auto.
split; auto.
exists x; auto.
Qed.
Definition pure {A}{JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}
(P: pred A) : Prop :=
P |-- emp.
Lemma sepcon_pure_andp {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}:
forall P Q, pure P -> pure Q -> ((P * Q) = (P && Q)).
Proof.
intros.
apply pred_ext; intros w ?.
destruct H1 as [w1 [w2 [? [? ?]]]].
unfold pure in *.
assert (unit_for w1 w2). apply H in H2; simpl in H2;
apply identity_unit; auto. exists w; auto.
unfold unit_for in H4.
assert (w2=w) by (apply (join_eq H4 H1)).
subst w2.
assert (join w w1 w1).
apply identity_unit; apply H0 in H3; simpl in H3; auto. exists w; auto.
assert (w1=w) by (apply (join_eq H5 (join_comm H1))).
subst w1.
split; auto.
destruct H1.
exists w; exists w; split; [|split]; auto.
apply H in H1.
clear dependent P. clear dependent Q.
pose proof (core_unit w); unfold unit_for in *.
pose proof (H1 _ _ (join_comm H)).
rewrite H0 in H; auto.
Qed.
Lemma pure_sepcon_TT_andp {A} {JA: Join A}{PA: Perm_alg A}{SA: Sep_alg A}:
forall P Q, pure P -> (P * TT) && Q = (P*Q).
Proof.
pose proof I.
intros.
apply pred_ext.
intros w [? ?].
destruct H1 as [w1 [w2 [? [? ?]]]].
exists w1; exists w2; split; [|split]; auto.
apply join_unit1_e in H1; auto.
subst; auto.
apply andp_right.
apply sepcon_derives; auto.
intros w [w1 [w2 [? [? ?]]]].
apply join_unit1_e in H1; auto.
subst; auto.
Qed.
|
#redirect Dan Glendening
|
If $x$ is an algebraic number, then there exists a polynomial $p$ with integer coefficients such that $p(x) = 0$. |
/*
* SystemBlock.hpp
*
* Created on: August 15, 2020
* Author: Quincy Jones
*
* Copyright (c) <2020> <Quincy Jones - [email protected]/>
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software
* is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef NOMAD_CORE_CONTROLLERS_SYSTEMBLOCK_H_
#define NOMAD_CORE_CONTROLLERS_SYSTEMBLOCK_H_
// C System Files
// C++ System Files
#include <iostream>
#include <string>
#include <memory>
// Project Include Files
#include <Eigen/Dense>
#include <Communications/Port.hpp>
#include <Communications/Messages/double_vec_t.hpp>
#include <Communications/Messages/msg_helpers.hpp>
namespace Core::Systems
{
class BlockDiagram;
class SystemBlock
{
friend class BlockDiagram;
public:
// Base Class System Block Node
// name = Task Name
// T_s = Sample Time (-1 for inherit)
SystemBlock(const std::string &name, const double T_s = -1);
// Add Subsystem to System Block
void AddSubSystem();
// Get Output Port
std::shared_ptr<Communications::PortInterface> GetOutputPort(const int port_id) const;
// Get Input Port
std::shared_ptr<Communications::PortInterface> GetInputPort(const int port_id) const;
// Set Transport Configuration for Port
void SetPortOutput(const int port_id, const Communications::PortInterface::TransportType transport, const std::string &transport_url, const std::string &channel);
// Overriden Run Function
virtual void Run(double d_t);
// Overriden Setup Function
virtual void Setup();
// Return System Name
const std::string& Name() const { return name_;}
protected:
static const int MAX_PORTS = 16;
// Update function for stateful outputs
virtual void UpdateStateOutputs() {}
// Update function for stateless outputs
virtual void UpdateStatelessOutputs() {}
// Update fucntion for next state from inputs
virtual void UpdateState() {}
// Sampling Time (s)
double T_s_;
// Current Time (s)
double T_;
// Last Sample Time (s)
double T_prev_;
// Parent Block Diagram/System
BlockDiagram *parent_;
// System Name
std::string name_;
// Input Port Map
std::shared_ptr<Communications::PortInterface> input_port_map_[MAX_PORTS];
// Output Port Map
std::shared_ptr<Communications::PortInterface> output_port_map_[MAX_PORTS];
};
class ConstantBlock : public SystemBlock
{
public:
// Constant System Block Node
// name = Task Name
ConstantBlock(const Eigen::VectorXd &value, const double T_s = -1) : SystemBlock("CONSTANT", T_s)
{
constant_.length = value.size();
constant_.data.resize(constant_.length);
// Map to output message. Would love to have this be eigen types...
Eigen::Map<Eigen::VectorXd>(constant_.data.data(), constant_.length) = value;
// Create Output Port
output_port_map_[0] = std::move(Communications::Port<double_vec_t>::CreateOutput("CONSTANT", T_s_));
}
protected:
// Update function for stateful outputs
void UpdateStateOutputs()
{
}
// Update function for stateless outputs
void UpdateStatelessOutputs()
{
GetOutputPort(0)->Send(constant_);
}
// Update fucntion for next state from inputs
void UpdateState()
{
}
//
//Eigen::VectorXd constant_;
double_vec_t constant_;
};
class AddBlock : public SystemBlock
{
public:
// Transport Type Enum
enum OperandType
{
ADD = 0,
MINUS
};
// Constant System Block Node
// name = Task Name
AddBlock(const double T_s = -1) : SystemBlock("ADD", T_s)
{
// Create Output Port
output_port_map_[0] = Communications::Port<double_vec_t>::CreateOutput("ADD", T_s_);
}
void AddInput(OperandType op_type, const int dimension)
{
// Update Dimension
if(!operands_.empty())
{
assert(dimension_ == dimension || operands_.size() > MAX_PORTS);
}
else
{
dimension_ = dimension;
result_.length = dimension_;
result_.data.resize(result_.length);
// Zero output result
Eigen::Map<Eigen::VectorXd>(result_.data.data(), result_.length) = Eigen::Vector3d::Zero(dimension_);
}
// Create Port
input_port_map_[operands_.size()] = Communications::Port<double_vec_t>::CreateInput(std::to_string(operands_.size()), T_s_);
// Update Operation Type
operation_types_.push_back(op_type);
// Add Operand Input
double_vec_t operand;
operand.length = dimension;
operand.data.resize(operand.length);
// Zero initial state
Eigen::Map<Eigen::VectorXd>(operand.data.data(), operand.length) = Eigen::Vector3d::Zero(dimension_);
operands_.push_back(operand);
}
protected:
// Update function for stateful outputs
void UpdateStateOutputs()
{
}
// Update function for stateless outputs
void UpdateStatelessOutputs()
{
if(operands_.empty())
{
return;
}
// Read Input
GetInputPort(0)->Receive(operands_[0]);
Eigen::VectorXd out_result = Eigen::Map<Eigen::VectorXd>(operands_[0].data.data(), dimension_);
for(int i = 1; i < operands_.size(); i++)
{
GetInputPort(i)->Receive(operands_[i]);
Eigen::VectorXd operand = Eigen::Map<Eigen::VectorXd>(operands_[i].data.data(), dimension_);
switch (operation_types_[i])
{
case MINUS:
operand = operand * -1; // Negate it
break;
default:
break;
}
out_result = out_result + operand;
}
// Map to output message. Would love to have this be eigen types...
Eigen::Map<Eigen::VectorXd>(result_.data.data(), result_.length) = out_result;
GetOutputPort(0)->Send(result_);
std::cout << "SENDING: " << out_result << std::endl;
}
// Update function for next state from inputs
void UpdateState()
{
}
// Dimension of operand vectors. Must be equal
int dimension_;
std::vector<double_vec_t> operands_;
std::vector<OperandType> operation_types_;
double_vec_t result_;
};
template <typename T>
class Demux : public SystemBlock
{
public:
// Constant System Block Node
// name = Task Name
Demux(const double T_s = -1) : SystemBlock("DEMUX", T_s)
{
// Create Input Port
input_port_map_[0] = Communications::Port<T>::CreateInput("INPUT");
// Demux for Output
Communications::conv<T>::Demux();
// Create Output Port
output_port_map_[0] = std::move(Communications::Port<T>::CreateOutput("CONVERTED", T_s_));
}
protected:
// Update function for stateful outputs
void UpdateStateOutputs()
{
}
// Update function for stateless outputs
void UpdateStatelessOutputs()
{
std::cout << "GETTING INPUT" << std::endl;
// Read Type
// Pass to Converter
// Send Outputs
}
// Update fucntion for next state from inputs
void UpdateState()
{
}
T msg_type_;
};
} // namespace Controllers::Systems
#endif // NOMAD_CORE_CONTROLLERS_SYSTEMBLOCK_H_
|
lemma uniformly_continuous_on_cmul_right [continuous_intros]: fixes f :: "'a::real_normed_vector \<Rightarrow> 'b::real_normed_algebra" shows "uniformly_continuous_on s f \<Longrightarrow> uniformly_continuous_on s (\<lambda>x. f x * c)" |
module PushArgument where
open import Agda.Builtin.Nat
data List (A : Set) : Nat -> Set where
nil : List A 0
cons : {n : Nat} -> A -> List A n -> List A (suc n)
newFunc : {A : Set} -> {B : Set} -> (f : A -> B) -> (x : A) -> B
newFunc f x = f x
map : {n : Nat} -> {A : Set} -> {B : Set} -> (A -> B) -> List A n -> List B n
map f nil = nil
map f (cons x xs) = cons (newFunc f x) (map f xs)
|
[STATEMENT]
lemma bound_cmono: assumes "X \<subseteq> Y" shows "bound Y \<le> bound X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bound Y \<le> bound X
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
X \<subseteq> Y
goal (1 subgoal):
1. bound Y \<le> bound X
[PROOF STEP]
by auto |
Formal statement is: lemma holomorphic_on_imp_differentiable_at: "\<lbrakk>f holomorphic_on s; open s; x \<in> s\<rbrakk> \<Longrightarrow> f field_differentiable (at x)" Informal statement is: If $f$ is holomorphic on an open set $s$ and $x \in s$, then $f$ is differentiable at $x$. |
Require Export Coq.Bool.Bool.
Lemma negb_true_elim
: forall x
, true = negb x
-> false = x.
Proof. destruct x; auto. Qed.
Lemma negb_true_intro
: forall x
, false = x
-> true = negb x .
Proof. destruct x; eauto. Qed.
Lemma negb_false_elim
: forall x
, false = negb x
-> true = x.
Proof. destruct x; auto. Qed.
Ltac norm_negb
:= match goal with
| [H : true = negb _ |- _ ]
=> apply negb_true_elim in H
| [H : false = negb _ |- _ ]
=> apply negb_false_elim in H
end.
(********************************************************************)
Lemma beq_true_split
: forall A B
, true = andb A B
-> true = A /\ true = B.
Proof.
intros.
destruct A.
tauto.
simpl in H. congruence.
Qed.
Hint Resolve beq_true_split.
Lemma beq_false_split
: forall A B
, false = andb A B
-> false = A \/ false = B.
Proof.
intros.
destruct A.
simpl in H. subst. tauto.
tauto.
Qed.
Hint Resolve beq_false_split.
Lemma beq_false_join
: forall A B
, false = A \/ false = B
-> false = andb A B.
Proof.
intros.
inversion H. subst.
tauto.
destruct A; tauto.
Qed.
Hint Resolve beq_false_split.
Ltac norm_andb
:= match goal with
| [H : true = andb _ _ |- _]
=> apply beq_true_split in H
| [H : andb _ _ = true |- _]
=> symmetry in H; apply beq_true_split in H
| [H : false = andb _ _ |- _]
=> apply beq_false_split in H
| [H : andb _ _ = false |- _]
=> symmetry in H; apply beq_false_split in H
end.
(********************************************************************)
Ltac norm_orb
:= match goal with
| [H : true = orb _ _ |- _]
=> symmetry in H; apply orb_true_iff in H
| [H : orb _ _ = true |- _]
=> apply orb_true_iff in H
| [H : false = orb _ _ |- _]
=> symmetry in H; apply orb_false_iff in H
| [H : orb _ _ = false |- _]
=> apply orb_false_iff in H
end.
|
library("WatershedTools")
library(raster)
library(sp)
gisBase <- getGISBase()
testDEM <- raster(system.file("testdata/testDEM.grd", package="WatershedTools"))
testDEM <- projectRaster(testDEM, crs=CRS("+init=epsg:3035"))
gs <- GrassSession(testDEM, layerName = "dem", gisBase = gisBase)
gs <- fillDEM("dem", filledDEM = "filledDEM", probs = "problems", gs = gs)
gs <- drainageAccumulation("filledDEM", accumulation = "accum", drainage = "drain",
gs = gs)
gs <- extractStream(dem = "filledDEM", accumulation = "accum", qthresh = 0.998,
outputName = "streamRas", gs = gs)
streamRas <- GSGetRaster("streamRas", gs)
drainage <- GSGetRaster("drain", gs)
accum <- GSGetRaster("accum", gs)
coords <- coordinates(accum)[which.max(values(accum)),, drop=FALSE]
streamCrop <- cropToCatchment(coords, streamRaster = streamRas, drainage = "drain", gs = gs)
vals <- values(streamCrop)
pts <- coordinates(streamCrop)[!is.na(vals),]
caFile <- "inst/testdata/testCA.rds"
if(file.exists(caFile)) {
catchArea <- readRDS(caFile)
} else {
catchArea <- catchment(pts, "drain", gs)
catchArea <- data.frame(pts, A = catchArea)
saveRDS(catchArea, caFile)
}
# discharge for northernmost point
q <- 5
A <- catchArea[which.max(catchArea$y),'A']
qMat <- discharge_scaling(catchArea, data.frame(A = A, Q = q))
geom <- hydraulic_geometry(qMat$A)
elevCrop <- crop(testDEM, streamCrop)
elevCrop <- resample(elevCrop, streamCrop, 'bilinear')
accumCrop <- crop(accum, streamCrop)
drainCrop <- crop(drainage, streamCrop)
caRaster <- rasterFromXYZ(catchArea, crs=proj4string(elevCrop))
coordinates(geom) <- pts
proj4string(geom) <- proj4string(elevCrop)
gridded(geom) <- TRUE
geom <- stack(geom)
testWS <- Watershed(streamCrop, drainCrop, elevCrop, accumCrop, caRaster, geom)
saveRDS(testWS, "inst/testdata/testWS.rds")
|
[STATEMENT]
lemma set_list:
assumes "finite (set ` XS)"
assumes "\<And>xs. xs \<in> XS \<Longrightarrow> distinct xs"
shows "finite XS"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite XS
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. finite XS
[PROOF STEP]
have "XS \<subseteq> {xs | xs. set xs \<in> set ` XS \<and> distinct xs}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
finite (set ` XS)
?xs \<in> XS \<Longrightarrow> distinct ?xs
goal (1 subgoal):
1. XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
goal (1 subgoal):
1. finite XS
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
goal (1 subgoal):
1. finite XS
[PROOF STEP]
have 1: "{xs |xs. set xs \<in> set ` XS \<and> distinct xs} = \<Union>{{xs | xs. set xs = A \<and> distinct xs} | A. A \<in> set ` XS}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {xs |xs. set xs \<in> set ` XS \<and> distinct xs} = \<Union> {{xs |xs. set xs = A \<and> distinct xs} |A. A \<in> set ` XS}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
{xs |xs. set xs \<in> set ` XS \<and> distinct xs} = \<Union> {{xs |xs. set xs = A \<and> distinct xs} |A. A \<in> set ` XS}
goal (1 subgoal):
1. finite XS
[PROOF STEP]
have "finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
[PROOF STEP]
using finite_set2[OF _ finite_set] distinct_card assms(1)
[PROOF STATE]
proof (prove)
using this:
card (set ?xs1) = ?n \<Longrightarrow> finite {xs. set xs = set ?xs1 \<and> distinct xs}
distinct ?xs \<Longrightarrow> card (set ?xs) = length ?xs
finite (set ` XS)
goal (1 subgoal):
1. finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
[PROOF STEP]
unfolding 1
[PROOF STATE]
proof (prove)
using this:
card (set ?xs1) = ?n \<Longrightarrow> finite {xs. set xs = set ?xs1 \<and> distinct xs}
distinct ?xs \<Longrightarrow> card (set ?xs) = length ?xs
finite (set ` XS)
goal (1 subgoal):
1. finite (\<Union> {{xs |xs. set xs = A \<and> distinct xs} |A. A \<in> set ` XS})
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
goal (1 subgoal):
1. finite XS
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
goal (1 subgoal):
1. finite XS
[PROOF STEP]
using finite_subset
[PROOF STATE]
proof (prove)
using this:
XS \<subseteq> {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
finite {xs |xs. set xs \<in> set ` XS \<and> distinct xs}
\<lbrakk>?A \<subseteq> ?B; finite ?B\<rbrakk> \<Longrightarrow> finite ?A
goal (1 subgoal):
1. finite XS
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
finite XS
goal:
No subgoals!
[PROOF STEP]
qed |
[STATEMENT]
lemma (in ring) indexed_pmult_in_carrier:
assumes "carrier_coeff P" shows "carrier_coeff (P \<Otimes> i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier_coeff (P \<Otimes> i)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
carrier_coeff P
goal (1 subgoal):
1. carrier_coeff (P \<Otimes> i)
[PROOF STEP]
unfolding carrier_coeff_def indexed_pmult_def
[PROOF STATE]
proof (prove)
using this:
\<forall>m. P m \<in> carrier R
goal (1 subgoal):
1. \<forall>m. (if i \<in># m then P (m - {#i#}) else \<zero>) \<in> carrier R
[PROOF STEP]
by simp |
function [p,q,D,sc] = dpfast(M,C,T,G)
% [p,q,D,sc] = dpfast(M,C,T,G)
% Use dynamic programming to find a min-cost path through matrix M.
% Return state sequence in p,q; full min cost matrix as D and
% local costs along best path in sc.
% This version gives the same results as dp.m, but uses dpcore.mex
% to run ~200x faster.
% C is a step matrix, with rows (i step, j step, cost factor)
% Default is [1 1 1.0;0 1 1.0;1 0 1.0];
% Another good one is [1 1 1;1 0 1;0 1 1;1 2 2;2 1 2]
% T selects traceback origin: 0 is to any edge; 1 is top right (default);
% T > 1 finds path to min of anti-diagonal T points away from top-right.
% Optional G defines length of 'gulleys' for T=0 mode; default 0.5
% (i.e. accept path to only 50% of edge nearest top-right)
% 2003-04-04,2005-04-04 [email protected] $Header: /Users/dpwe/projects/dtw/RCS/dpfast.m,v 1.6 2008/03/14 14:40:50 dpwe Exp dpwe $
% Copyright (c) 2003 Dan Ellis <[email protected]>
% released under GPL - see file COPYRIGHT
if nargin < 2
% Default step / cost matrix
C = [1 1 1.0;0 1 1.0;1 0 1.0];
end
if nargin < 3
% Default: path to top-right
T = 1;
end
if nargin < 4
% how big are gulleys?
G = 0.5; % half the extent
end
if sum(isnan(M(:)))>0
error('dpwe:dpfast:NAN','Error: Cost matrix includes NaNs');
end
if min(M(:)) < 0
disp('Warning: cost matrix includes negative values; results may not be what you expect');
end
[r,c] = size(M);
% Core cumulative cost calculation coded as mex
[D,phi] = dpcore(M,C);
p = [];
q = [];
%% Traceback from top left?
%i = r;
%j = c;
if T == 0
% Traceback from lowest cost "to edge" (gulleys)
TE = D(r,:);
RE = D(:,c);
% eliminate points not in gulleys
TE(1:round((1-G)*c)) = max(max(D));
RE(1:round((1-G)*r)) = max(max(D));
if (min(TE) < min(RE))
i = r;
j = max(find(TE==min(TE)));
else
i = max(find(RE==min(RE)));
j = c;
end
else
if min(size(D)) == 1
% degenerate D has only one row or one column - messes up diag
i = r;
j = c;
else
% Traceback from min of antidiagonal
%stepback = floor(0.1*c);
stepback = T;
slice = diag(fliplr(D),-(r-stepback));
[mm,ii] = min(slice);
i = r - stepback + ii;
j = c + 1 - ii;
end
end
p=i;
q=j;
sc = M(p,q);
while i > 1 & j > 1
% disp(['i=',num2str(i),' j=',num2str(j)]);
tb = phi(i,j);
i = i - C(tb,1);
j = j - C(tb,2);
p = [i,p];
q = [j,q];
sc = [M(i,j),sc];
end
|
(** * Notation for decidable propositions *)
Require Import Arith.
Definition dec (X : Prop) : Type := {X} + {~ X}.
Class Dec (X : Prop) : Type := decide : dec X.
Arguments decide X {_}.
Ltac gen_Dec_eq := unfold Dec; unfold dec; decide equality.
Instance decide_eq_nat (x y : nat) : Dec (x = y). gen_Dec_eq. Defined.
Instance decide_le_nat (x y : nat) : Dec (x <= y). apply le_dec. Defined.
Instance decide_lt_nat (x y : nat) : Dec (x < y). apply lt_dec. Defined.
Tactic Notation "decide" constr(p) := destruct (decide p). |
(* Title: HOL/Auth/n_germanSimp_lemma_on_inv__35.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_on_inv__35 imports n_germanSimp_base
begin
section{*All lemmas on causal relation between inv__35 and some rule r*}
lemma n_SendInvAckVsinv__35:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__35 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__35:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__35 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__35:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__35 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const I))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv4)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const I))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv4)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__35:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__35 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntS))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__35:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__35 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__35:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendGntSVsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntS i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__0Vsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__0Vsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__1Vsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__1Vsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__35:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__35 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
State Before: α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
A B : OrderBot α
⊢ A = B State After: case mk
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
B : OrderBot α
toBot✝ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
⊢ mk ha = B Tactic: rcases A with ⟨ha⟩ State Before: case mk
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
B : OrderBot α
toBot✝ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
⊢ mk ha = B State After: case mk.mk
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ mk ha = mk hb Tactic: rcases B with ⟨hb⟩ State Before: case mk.mk
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ mk ha = mk hb State After: case mk.mk.e_toBot
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ toBot✝¹ = toBot✝ Tactic: congr State Before: case mk.mk.e_toBot
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ toBot✝¹ = toBot✝ State After: case mk.mk.e_toBot.bot
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ ⊥ = ⊥ Tactic: ext State Before: case mk.mk.e_toBot.bot
α✝ : Type u
β : Type v
γ : Type ?u.25434
δ : Type ?u.25437
α : Type u_1
inst✝ : PartialOrder α
toBot✝¹ : Bot α
ha : ∀ (a : α), ⊥ ≤ a
toBot✝ : Bot α
hb : ∀ (a : α), ⊥ ≤ a
⊢ ⊥ = ⊥ State After: no goals Tactic: exact le_antisymm (ha _) (hb _) |
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.RingSolver.IntAsRawRing where
open import Cubical.Data.Nat hiding (_+_; _·_)
open import Cubical.Data.Int
open import Cubical.Data.Int.Base renaming (Int to ℤ) public
open import Cubical.Foundations.Prelude
open import Cubical.Algebra.RingSolver.RawRing
ℤAsRawRing : RawRing {ℓ-zero}
ℤAsRawRing = rawring ℤ (pos zero) (pos (suc zero)) _+_ _·_ (λ k → - k)
+Ridℤ : (k : ℤ) → (pos zero) + k ≡ k
+Ridℤ k = sym (pos0+ k)
|
range.cap = function( x, r ) {
x[ which( x < r[1] ) ] = r[1]
x[ which( x > r[2] ) ] = r[2]
return(x)
}
|
/-
Copyright (c) 2022 Joël Riou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joël Riou
-/
import for_mathlib.algebra.homology.hom_complex
import for_mathlib.algebra.homology.bounded_above
noncomputable theory
open category_theory category_theory.category category_theory.limits category_theory.preadditive
namespace cochain_complex
namespace hom_complex
variables {C : Type*} [category C] [preadditive C]
variables {F G K : cochain_complex C ℤ} {n : ℤ} (z : cocycle F G n) {m : ℤ} [∀ p, has_binary_biproduct (F.X (p+1-n)) (G.X p)]
include z
namespace twist
@[protected, simp]
def δ (p q : ℤ) :
biprod (F.X (p+1-n)) (G.X p) ⟶ biprod (F.X (q+1-n)) (G.X q) :=
begin
refine biprod.desc (biprod.lift (ε (n+1) • F.d (p+1-n) (q+1-n)) _) (biprod.lift 0 (G.d p q)),
by_cases p+1 = q,
{ exact (z : cochain F G n).v (p+1-n) q (show q=(p+1-n)+n, by linarith), },
{ exact 0, },
end
end twist
@[simps]
def twist : cochain_complex C ℤ :=
{ X := λ p, biprod (F.X (p+1-n)) (G.X p),
d := λ p q, twist.δ z p q,
shape' := λ p q hpq, begin
dsimp [twist.δ],
ext,
{ simp only [biprod.inl_desc, biprod.lift_fst, comp_zero, zero_comp],
rw [F.shape, smul_zero],
intro h,
apply hpq,
change p+1-n+1=q+1-n at h,
change p+1=q,
linarith, },
{ simp only [biprod.inl_desc, biprod.lift_snd, comp_zero, zero_comp],
split_ifs,
{ exfalso, exact hpq h, },
{ refl, }, },
{ simp only [biprod.inr_desc, biprod.lift_fst, comp_zero, zero_comp], },
{ simp only [biprod.inr_desc, biprod.lift_snd, comp_zero, zero_comp, G.shape p q hpq], },
end,
d_comp_d' := λ i j k hij hjk, begin
change i+1=j at hij,
subst hij,
change i+1+1=k at hjk,
have hjk' : i+2 = k := by linarith,
subst hjk',
simp only [twist.δ, dif_pos rfl, dif_pos (show i+1+1 = i+2, by linarith)],
ext,
{ simp only [add_zero, biprod.inl_desc_assoc, biprod.lift_desc, linear.smul_comp,
add_comp, assoc, biprod.lift_fst,
linear.comp_smul, homological_complex.d_comp_d, smul_zero, comp_zero, zero_comp], },
{ simp only [biprod.inl_desc_assoc, biprod.lift_desc, linear.smul_comp,
add_comp, assoc, biprod.lift_snd, comp_zero, zero_comp],
have hz₁ := z.2,
rw cocycle.mem_iff n (n+1) rfl at hz₁,
have hz₂ := cochain.congr_v hz₁ (i+1-n) (i+2) (by linarith),
simp only [δ_v n (n+1) rfl _ (i+1-n) (i+2) (by linarith) (i+1) (i+1+1-n)
(by linarith) (by linarith), cochain.zero_v] at hz₂,
rw ← hz₂,
abel, },
{ simp only [zero_add, biprod.inr_desc_assoc, biprod.lift_desc, zero_comp, assoc, biprod.lift_fst, comp_zero], },
{ simp only [zero_add, biprod.inr_desc_assoc, biprod.lift_desc, zero_comp, assoc, biprod.lift_snd, homological_complex.d_comp_d,
comp_zero], },
end }
namespace twist
omit z
lemma is_bounded_above (z : cocycle F G n) (hF : F.is_bounded_above) (hG : G.is_bounded_above) :
(twist z).is_bounded_above :=
begin
cases hF with r hr,
cases hG with s hs,
use max (r+n-1) s,
intros i hi,
dsimp only [twist],
rw is_zero.iff_of_biprod,
split,
{ apply hr,
have h := lt_of_le_of_lt (le_max_left _ _) hi,
linarith, },
{ apply hs,
exact lt_of_le_of_lt (le_max_right _ _) hi, },
end
include z
def inl {n₀ : ℤ} (hn₀ : n₀+1=n) : cochain F (twist z) n₀ :=
cochain.mk (λ p q hpq, (cochain.of_hom (𝟙 F)).v p (q+1-n) (by linarith) ≫ biprod.inl)
def inr : G ⟶ twist z := { f := λ p, biprod.inr, }
def fst {n₁ : ℤ} (hn₁ : n+n₁=1) : cocycle (twist z) F n₁ :=
cocycle.mk (cochain.mk (λ p q hpq, biprod.fst ≫
(cochain.of_hom (𝟙 F)).v (p+1-n) q (show q=p+1-n+0, by linarith))) (n₁+1) rfl
begin
have hn₁' : n₁ = 1-n := by linarith,
subst hn₁',
ext1,
simp only [δ_v (1-n) (1-n+1) rfl _ p q hpq (p+1-n) (p+1) (by linarith) rfl,
cochain.mk_v, cochain.of_hom_v, homological_complex.id_f, comp_id, twist_d, twist.δ, dif_pos,
cochain.zero_v],
ext,
{ have hq : q = p+1+1-n := by linarith,
subst hq,
have eq : ε (1-n+1) * ε (n+1) = ε ((1 : ℤ) + 1 + 1) := by { rw ← ε_add, congr' 1, linarith, },
have eq' : ε ((1 : ℤ)+1+1) = -1 := by { simp only [ε_succ, ε_1, neg_neg], },
simp only [biprod.inl_fst_assoc, biprod.inl_desc_assoc, biprod.lift_fst, comp_add,
cochain.of_hom_v, homological_complex.id_f, comp_zsmul, comp_id, comp_zero, smul_smul,
eq, eq', neg_smul, one_zsmul, add_right_neg], },
{ simp only [zero_add, neg_eq_zero, comp_add, biprod.inr_fst_assoc, zero_comp,
linear.comp_smul, biprod.inr_desc_assoc, biprod.lift_fst_assoc, smul_zero, comp_zero], },
end
def snd : cochain (twist z) G 0 :=
cochain.mk (λ p q hpq, biprod.snd ≫ (cochain.of_hom (𝟙 G)).v p q hpq)
@[simp]
lemma inl_comp_fst {n₀ n₁ : ℤ} (hn₀ : n₀+1=n) (hn₁ : n+n₁=1) :
(inl z hn₀).comp ↑(fst z hn₁) (show 0=n₀+n₁, by linarith) = cochain.of_hom (𝟙 F) :=
begin
ext,
dsimp [cochain.comp, cochain.mk, cochain.v, cochain.of_hom, cochain.of_homs, inl, fst],
simp only [id_comp, assoc, biprod.inl_fst_assoc, eq_to_hom_trans, eq_to_hom_refl],
end
@[simp]
lemma inl_comp_snd {n₀ : ℤ} (hn₀ : n₀+1=n) :
(inl z hn₀).comp (snd z) (add_zero n₀).symm = 0 :=
begin
ext,
simp only [inl, snd, cochain.comp, cochain.mk, cochain.v, cochain.of_hom, cochain.of_homs,
assoc, biprod.inl_snd_assoc, zero_comp, comp_zero, cochain.zero_v],
end
@[simp]
lemma inr_comp_fst {n₁ : ℤ} (hn₁ : n+n₁=1) :
(cochain.of_hom (inr z)).comp (fst z hn₁ : cochain (twist z) F n₁) (zero_add n₁).symm = 0 :=
begin
ext,
simp only [inr, fst, cochain.zero_cochain_comp, cochain.of_hom_v, cocycle.mk_coe,
cochain.mk_v, biprod.inr_fst_assoc, zero_comp, cochain.zero_v],
end
@[simp]
lemma inr_comp_snd :
(cochain.of_hom (inr z)).comp (snd z) (add_zero 0).symm = cochain.of_hom (𝟙 G) :=
begin
ext,
simp only [inr, snd, cochain.comp_zero_cochain, cochain.mk_v, cochain.of_hom_v,
homological_complex.id_f, comp_id, biprod.inr_snd],
end
@[simp]
lemma δ_inl {n₀ : ℤ} (hn₀ : n₀+1=n) :
δ n₀ n (inl z hn₀) = cochain.comp ↑z (cochain.of_hom (inr z)) (add_zero n).symm :=
begin
ext1,
simp only [δ_v n₀ n hn₀ (inl z hn₀) p q hpq _ _ rfl rfl, twist_d, twist.δ],
ext,
{ simp only [← hn₀, inl, cochain.mk_v, ε_succ, neg_neg, assoc, biprod.inl_desc, neg_smul,
add_comp, biprod.lift_fst, comp_zsmul, cochain.of_hom_v_comp_d,
homological_complex.id_f, id_comp, neg_comp, zsmul_comp, biprod.inl_fst, comp_id,
cochain.d_comp_of_hom_v, add_right_neg, cochain.comp_zero_cochain, cochain.of_hom_v,
inr, biprod.inr_fst, comp_zero], },
{ simp only [inl, inr, add_zero, sub_add_cancel, eq_self_iff_true, cochain.mk_v,
dif_pos, assoc, biprod.inl_desc, add_comp, biprod.lift_snd,
linear.smul_comp, biprod.inl_snd, comp_zero, smul_zero, cochain.comp_zero_cochain,
cochain.of_hom_v, biprod.inr_snd, comp_id, id_comp,
cochain.zero_cochain_comp' _ _ p (q-1+1-n) q, homological_complex.id_f], },
end
@[simp]
lemma δ_snd {n₁ : ℤ} (hn₁ : n+n₁=1) :
δ 0 1 (snd z) = -cochain.comp (fst z hn₁ : cochain (twist z) F n₁) (↑z) (show 1 = n₁+n, by rw [← hn₁, add_comm]) :=
begin
ext1,
simp only [δ_v 0 1 (zero_add 1) _ p q hpq p q (by linarith) hpq, fst, snd, zero_add, ε_1,
cochain.mk_v, cochain.of_hom_v, homological_complex.id_f, comp_id, neg_zsmul, one_zsmul,
cochain.neg_v, cocycle.mk_coe, twist_d, twist.δ,
cochain.comp_v _ _ (show 1=n₁+n, by linarith) p (p+1-n) q (by linarith) (by linarith)],
ext,
{ simp only [dif_pos hpq.symm, zero_add, comp_add, biprod.inl_snd_assoc, zero_comp,
comp_neg, biprod.inl_desc_assoc, biprod.lift_snd, biprod.inl_fst_assoc], },
{ simp only [neg_zero, comp_add, biprod.inr_snd_assoc, comp_neg, biprod.inr_desc_assoc,
biprod.lift_snd, add_right_neg, biprod.inr_fst_assoc, zero_comp], },
end
lemma id_eq {n₀ n₁ : ℤ} (hn₀ : n₀+1=n) (hn₁ : n+n₁=1) : cochain.of_hom (𝟙 (twist z)) =
cochain.comp ↑(fst z hn₁) (inl z hn₀) (show 0=n₁+n₀, by linarith) +
cochain.comp (snd z) (cochain.of_hom (inr z)) (zero_add 0).symm :=
begin
ext1,
simpa only [fst, inl, snd, inr, cochain.add_v,
cochain.comp_v _ _ (show 0 = n₁+n₀, by linarith) p (p+1-n) p (by linarith) (by linarith),
cochain.of_hom_v, homological_complex.id_f, cocycle.mk_coe, cochain.mk_v,
comp_id, id_comp, cochain.comp_zero_cochain, biprod.total],
end
lemma cochain_ext (y₁ y₂ : cochain (twist z) K m) {n₀ n₁ : ℤ} (hn₀ : n₀+1=n)
(hn₁ : n₁ = n₀+m) :
y₁ = y₂ ↔ cochain.comp (inl z hn₀) y₁ hn₁ = cochain.comp (inl z hn₀) y₂ hn₁ ∧
cochain.comp (cochain.of_hom (inr z)) y₁ (zero_add m).symm =
cochain.comp (cochain.of_hom (inr z)) y₂ (zero_add m).symm :=
begin
split,
{ intro h, rw h, tauto, },
{ rintro ⟨hl, hr⟩,
suffices : cochain.comp (cochain.of_hom (𝟙 _)) y₁ (zero_add m).symm =
cochain.comp (cochain.of_hom (𝟙 _)) y₂ (zero_add m).symm,
{ ext1,
simpa only [cochain.id_comp] using cochain.congr_v this p q hpq, },
simp only [id_eq z hn₀ (show n+(-n₀)=1, by linarith), cochain.add_comp,
cochain.comp_assoc_of_second_is_zero_cochain,
cochain.comp_assoc _ _ _ (show 0=-n₀+n₀, by linarith) (show n₁=n₀+m, by linarith)
(show m=-n₀+n₀+m, by linarith), hl, hr], }
end
def desc_cochain {m m₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : cochain G K m)
(hm₁ : m₁+1=n+m) : cochain (twist z) K m :=
cochain.comp ↑(fst z (show n+(m-m₁) = 1, by linarith)) y₁ (eq_add_of_sub_eq rfl : m=(m-m₁)+m₁) +
cochain.comp (snd z) y₂ (zero_add m).symm
lemma desc_cochain_eq {m m₁ n₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : cochain G K m)
(hm₁ : m₁+1=n+m) (hn₁ : n+n₁=1) : desc_cochain z y₁ y₂ hm₁ =
cochain.comp ↑(fst z hn₁) y₁ (show m = n₁+m₁, begin
suffices : m+1=n₁+m₁+1,
{ simpa only [add_left_inj] using this, },
rw [add_assoc, hm₁, ← hn₁, add_comm n₁, add_comm n m, add_assoc],
end) + cochain.comp (snd z) y₂ (zero_add m).symm :=
begin
have h : n₁ = m-m₁ := by linarith,
subst h,
refl,
end
lemma inl_comp_desc_cochain {m m₁ n₀ : ℤ} (y₁ : cochain F K m₁)
(y₂ : cochain G K m) (hm₁ : m₁+1=n+m) (hn₀ : n₀+1=n) :
cochain.comp (inl z hn₀) (desc_cochain z y₁ y₂ hm₁) begin
suffices : m₁+1 = n₀+m+1,
{ simpa only [add_left_inj] using this, },
rw [add_assoc, hm₁, ← hn₀, add_assoc, add_comm 1 m],
end = y₁ :=
begin
simp only [desc_cochain_eq z y₁ y₂ hm₁ (show n+(-n₀)=1, by linarith), cochain.comp_add,
← cochain.comp_assoc (inl z hn₀) _ y₁ (show 0=n₀+(-n₀), by linarith)
(show m= _, by linarith) (show m₁=_, by linarith),
← cochain.comp_assoc_of_second_is_zero_cochain, add_zero,
inl_comp_fst, inl_comp_snd, cochain.id_comp, cochain.zero_comp],
end
lemma inr_comp_desc_cochain {m m₁ : ℤ} (y₁ : cochain F K m₁)
(y₂ : cochain G K m) (hm₁ : m₁+1=n+m) :
cochain.comp (cochain.of_hom (inr z)) (desc_cochain z y₁ y₂ hm₁) (zero_add m).symm = y₂ :=
begin
simp only [desc_cochain_eq z y₁ y₂ hm₁ (show n+(1-n)=1, by linarith), cochain.comp_add,
← cochain.comp_assoc_of_second_is_zero_cochain, inr_comp_snd, cochain.id_comp,
← cochain.comp_assoc_of_first_is_zero_cochain, inr_comp_fst, cochain.zero_comp, zero_add],
end
lemma δ_desc_cochain {m m₁ m₂ n₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : cochain G K m)
(hm₁ : m₁+1=n+m) (hn₁ : n+n₁=1) (hm₂ : m₁+1=m₂)
(m' : ℤ) (hm' : m+1=m') :
δ m m' (desc_cochain z y₁ y₂ hm₁) =
cochain.comp (fst z hn₁ : cochain (twist z) F n₁) (δ m₁ m₂ y₁ +
ε (m+1) • cochain.comp ↑z y₂ (show m₂ = n+m, by linarith)) (show m' = n₁+m₂, by linarith) +
cochain.comp (snd z) (δ m m' y₂) (zero_add m').symm :=
begin
simp only [desc_cochain_eq z y₁ y₂ hm₁ hn₁, δ_add, cochain.comp_add,
δ_comp_of_first_is_zero_cochain _ _ _ hm', δ_snd z hn₁,
δ_comp ↑(fst z hn₁) y₁ (show m = n₁+m₁, by linarith) _ m₂ m' hm' rfl hm₂,
cochain.comp_zsmul, cochain.neg_comp, zsmul_neg, ε_add, ε_1, mul_neg, mul_one,
neg_zsmul, cochain.comp_neg, cocycle.δ_eq_zero, cochain.zero_comp, zsmul_zero, add_zero,
add_assoc],
rw cochain.comp_assoc _ _ _ (show 1=n₁+n, by linarith) (show m₂=n+m, by linarith)
(show m' = n₁+n+m, by linarith),
conv_rhs { congr, skip, rw add_comm, },
end
@[simps]
def desc_cocycle {m m₁ n₂ : ℤ} (y₁ : cochain F K m₁) (y₂ : cocycle G K m)
(hm₁ : m₁+1=n+m) (hn₂ : n₂ = n+m)
(hy : δ m₁ n₂ y₁ = ε m • cochain.comp (z : cochain F G n) (y₂ : cochain G K m) hn₂) :
cocycle (twist z) K m :=
cocycle.mk (desc_cochain z y₁ ↑y₂ hm₁) _ rfl
begin
simp only [δ_desc_cochain z y₁ ↑y₂ hm₁ (show n+(1-n)=1, by linarith) (show m₁+1=n₂, by linarith) _ rfl,
cocycle.δ_eq_zero, cochain.comp_zero, add_zero, hy, ε_add, ε_1, mul_neg, mul_one, neg_zsmul,
add_right_neg, cochain.comp_zero],
end
lemma inr_comp_desc_cocycle {m m₁ n₂ : ℤ} (y₁ : cochain F K m₁) (y₂ : cocycle G K m)
(hm₁ : m₁+1=n+m) (hn₂ : n₂ = n+m)
(hy : δ m₁ n₂ y₁ = ε m • cochain.comp (z : cochain F G n) (y₂ : cochain G K m) hn₂) :
cochain.comp (cochain.of_hom (inr z)) (desc_cocycle z y₁ y₂ hm₁ hn₂ hy : cochain (twist z) K m)
(zero_add m).symm = y₂ :=
by simp only [desc_cocycle, cocycle.mk_coe, inr_comp_desc_cochain]
@[simps]
def desc_hom_as_cocycle {m₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : G ⟶ K)(hm₁ : m₁+1=n)
(hy : δ m₁ n y₁ = cochain.comp (z : cochain F G n) (cochain.of_hom y₂) (add_zero n).symm) :
cocycle (twist z) K 0 :=
begin
apply desc_cocycle z y₁ (cocycle.of_hom y₂) (by linarith) (add_zero n).symm,
simpa only [hy, ε_0, one_zsmul],
end
@[simps]
def desc {m₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : G ⟶ K)
(hm₁ : m₁+1=n)
(hy : δ m₁ n y₁ = cochain.comp (z : cochain F G n) (cochain.of_hom y₂) (add_zero n).symm) :
twist z ⟶ K :=
cocycle.hom_of (desc_hom_as_cocycle z y₁ y₂ hm₁ hy)
@[simp]
lemma inr_comp_desc {m₁ : ℤ} (y₁ : cochain F K m₁) (y₂ : G ⟶ K)
(hm₁ : m₁+1=n)
(hy : δ m₁ n y₁ = cochain.comp (z : cochain F G n) (cochain.of_hom y₂) (add_zero n).symm) :
inr z ≫ desc z y₁ y₂ hm₁ hy = y₂ :=
begin
apply (cocycle.equiv_hom G K).to_equiv.injective,
ext1,
dsimp [cocycle.equiv_hom],
simp only [cocycle.of_hom, cocycle.mk_coe, cochain.of_hom_comp, desc,
cocycle.cochain_of_hom_hom_of_eq_coe, desc_hom_as_cocycle_coe, inr_comp_desc_cochain],
end
def lift_cochain {m₁ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m)
(hm : m+1=m₁+n) : cochain K (twist z) m :=
cochain.comp y₁ (inl z (show (n-1)+1=n, by linarith)) (show m=m₁+(n-1), by linarith) +
cochain.comp y₂ (cochain.of_hom (inr z)) (add_zero m).symm
lemma lift_cochain_eq {m₁ n₀ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m) (hm : m+1=m₁+n)
(hn₀ : n₀+1=n) : lift_cochain z y₁ y₂ hm =
cochain.comp y₁ (inl z hn₀) (begin
suffices : m+1=m₁+n₀+1,
{ simpa only [add_left_inj] using this, },
rw [hm, ← hn₀, add_assoc],
end) + cochain.comp y₂ (cochain.of_hom (inr z)) (add_zero m).symm :=
begin
have eq : n₀ = n-1 := by linarith,
subst eq,
refl,
end
lemma lift_cochain_v {m₁ n₀ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m)
(hm : m+1=m₁+n) (hn₀ : n₀+1 = n) (p q : ℤ) (hpq : q = p + m) (p' : ℤ) (hp' : p' = p + m₁):
(lift_cochain z y₁ y₂ hm).v p q hpq =
y₁.v p p' hp' ≫ (inl z hn₀).v p' q (by rw [hpq, hp', add_assoc, add_right_inj,
← add_left_inj (1 : ℤ), hm, ← hn₀, add_assoc]) +
y₂.v p q hpq ≫ (inr z).f q :=
begin
have hn₀' : n₀ = n-1 := by linarith,
substs hn₀' hp' hpq,
dsimp [lift_cochain, cochain.comp],
simp only [cochain.of_hom_v],
end
@[simp]
lemma lift_cochain_comp_fst {m₁ n₁ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m) (hm : m+1=m₁+n)
(hn₁ : n+n₁=1) : cochain.comp (lift_cochain z y₁ y₂ hm) ↑(fst z hn₁)
(show m₁=m+n₁, by { suffices : m₁+n = m+n₁+n,
{ simpa only [add_left_inj] using this,},
rw [← hm, ← hn₁, add_comm n, add_assoc]}) = y₁ :=
begin
simp only [lift_cochain, cochain.add_comp,
cochain.comp_assoc _ _ _ (show m=m₁+(n-1), by linarith) (show 0=n-1+n₁, by linarith)
(show m₁=_, by linarith), inl_comp_fst, cochain.comp_id, add_zero,
cochain.comp_assoc_of_second_is_zero_cochain, inr_comp_fst, cochain.comp_zero],
end
@[simp]
lemma lift_cochain_comp_snd {m₁ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m) (hm : m+1=m₁+n) :
cochain.comp (lift_cochain z y₁ y₂ hm) (snd z) (add_zero m).symm = y₂ :=
by simp only [lift_cochain, cochain.add_comp, cochain.comp_assoc_of_third_is_zero_cochain,
inl_comp_snd, cochain.comp_zero, zero_add, inr_comp_snd, cochain.comp_id]
lemma δ_lift_cochain {m₁ n₀ m₂ : ℤ} (y₁ : cochain K F m₁) (y₂ : cochain K G m) (hm : m+1=m₁+n)
(hn₀ : n₀+1=n) (hm₂ : m₁+1=m₂) (m' : ℤ) (hm' : m+1=m') :
δ m m' (lift_cochain z y₁ y₂ hm) =
ε n₀ • cochain.comp (δ m₁ m₂ y₁) (inl z hn₀)
(by rw [← hm', ← hm₂, hm, ← hn₀, add_comm n₀ 1, add_assoc]) +
cochain.comp (δ m m' y₂ + cochain.comp y₁ ↑z (by rw [← hm', hm]))
(cochain.of_hom (inr z)) (add_zero m').symm :=
begin
simp only [lift_cochain_eq z y₁ y₂ hm hn₀, δ_add,
δ_comp y₁ (inl z hn₀) (show m = m₁+n₀, by linarith) m₂ n m' hm' hm₂ hn₀,
δ_comp_of_second_is_zero_cochain _ _ _ hm', δ_inl, cocycle.δ_cochain_of_hom,
cochain.comp_zero, zero_add, cochain.comp_assoc_of_third_is_zero_cochain,
cochain.add_comp],
conv_lhs { rw [add_assoc, add_comm, add_assoc], },
end
def lift_cocycle {m₁ n₀ : ℤ} (y₁ : cocycle K F m₁) (y₂ : cochain K G m) (hm : m+1=m₁+n)
(hn₀ : n₀+1=n) (m' : ℤ) (hm' : m+1=m')
(hy : δ m m' y₂ + cochain.comp (y₁ : cochain K F m₁) ↑z (show m'=m₁+n, by rw [← hm', hm]) = 0) :
cocycle K (twist z) m := cocycle.mk (lift_cochain z ↑y₁ y₂ hm) m' hm'
(by simp only [δ_lift_cochain z ↑y₁ y₂ hm hn₀ rfl m' hm', cocycle.δ_eq_zero, cochain.zero_comp,
zsmul_zero, zero_add, hy])
@[simps]
def lift_hom_as_cocycle {m₁ n₀ : ℤ} (y₁ : cocycle K F m₁) (y₂ : cochain K G 0) (hm : m₁+n=1)
(hn₀ : n₀+1=n)
(hy : δ 0 1 y₂ + cochain.comp (y₁ : cochain K F m₁) ↑z hm.symm = 0) : cocycle K (twist z) 0 :=
lift_cocycle z y₁ y₂ (show 0+1 = m₁+n, by linarith) hn₀ 1 (zero_add 1) hy
@[simps]
def lift {m₁ n₀ : ℤ} (y₁ : cocycle K F m₁) (y₂ : cochain K G 0) (hm : m₁+n=1)
(hn₀ : n₀+1=n)
(hy : δ 0 1 y₂ + cochain.comp (y₁ : cochain K F m₁) ↑z hm.symm = 0) :
K ⟶ twist z :=
cocycle.hom_of (lift_hom_as_cocycle z y₁ y₂ hm hn₀ hy)
lemma cochain_ext' (y₁ y₂ : cochain K (twist z) m) {n₁ m₁ : ℤ} (hn₁ : n+n₁=1) (hm₁ : m₁ = m+n₁) :
y₁ = y₂ ↔ cochain.comp y₁ (fst z hn₁ : cochain (twist z) F n₁) hm₁
= cochain.comp y₂ (fst z hn₁ : cochain (twist z) F n₁) hm₁ ∧
cochain.comp y₁ (snd z) (add_zero m).symm =
cochain.comp y₂ (snd z) (add_zero m).symm :=
begin
split,
{ intro h, rw h, tauto, },
{ rintro ⟨hl, hr⟩,
suffices : cochain.comp y₁ (cochain.of_hom (𝟙 _)) (add_zero m).symm =
cochain.comp y₂ (cochain.of_hom (𝟙 _)) (add_zero m).symm,
{ ext1,
simpa only [cochain.comp_id] using cochain.congr_v this p q hpq, },
simp only [id_eq z (show n-1+1=n, by linarith) hn₁, cochain.add_comp,
cochain.comp_add],
simp only [← cochain.comp_assoc_of_second_is_zero_cochain,
← cochain.comp_assoc _ _ _ hm₁ (show 0=n₁+(n-1), by linarith)
(show m=m+n₁+(n-1), by linarith), hl, hr], },
end
lemma of_d_eq (n₁ n₂ n₃ : ℤ) (hn₁ : n + n₁ = 1) (hn₂ : n₂ = n₁+1) (hn₃ : n₃+1=n) : cochain.of_d (twist z) =
ε (n+1) • ((fst z hn₁ : cochain (twist z) F n₁).comp (cochain.of_d F) hn₂).comp (inl z hn₃)
(by rw [← hn₁, hn₂, ← hn₃, add_comm n₃, add_assoc, add_comm n₃, ← add_assoc, add_comm n₁]) +
((fst z hn₁ : cochain (twist z) F n₁).comp (z : cochain F G n) (show 1 = n₁ + n, by rw [← hn₁, add_comm])).comp
(cochain.of_hom (inr z)) (add_zero 1).symm +
((snd z).comp (cochain.of_d G) (zero_add 1).symm).comp (cochain.of_hom (inr z)) (add_zero 1).symm :=
begin
rw cochain_ext z (cochain.of_d (twist z)) _ hn₃ hn₃.symm,
split,
{ simp only [cochain.comp_add, cochain.comp_zsmul],
simp only [← cochain.comp_assoc_of_third_is_zero_cochain,
← cochain.comp_assoc_of_second_is_zero_cochain, inl_comp_snd, inl_comp_fst, cochain.id_comp,
← cochain.comp_assoc (inl z hn₃) _ _ (show 1 = n₃+n₂, by linarith) _ (show n = n₃+n₂+n₃, by linarith),
← cochain.comp_assoc (inl z hn₃) _ _ (show 0 = n₃ + n₁, by linarith) _ (show 1 = n₃ + n₁ + 1, by linarith),
← cochain.comp_assoc (inl z hn₃) _ _ (show 0 = n₃ + n₁, by linarith) (show 1 = n₁ + n, by linarith)
(show n = n₃+n₁+n, by linarith), cochain.zero_comp, add_zero,
cochain_ext' z _ _ hn₁ (show 1 = n+n₁, by linarith)],
split,
{ simp only [cochain.add_comp, cochain.zsmul_comp,
cochain.comp_assoc_of_second_is_zero_cochain, inr_comp_fst, cochain.comp_zero, inl_comp_fst,
cochain.comp_assoc _ (inl z hn₃) _ (show n = 1+n₃, by linarith) (show 0 = n₃ + n₁, by linarith)
(show 1 = 1 + n₃ + n₁, by linarith), add_zero, cochain.comp_id],
ext p q hpq,
dsimp [inl, fst, cochain.comp],
simp only [twist.δ, cochain.of_d_v, twist_d, assoc, biprod.inl_desc, biprod.lift_fst_assoc,
linear.smul_comp, cochain.d_comp_of_hom_v, homological_complex.id_f, comp_id,
linear.comp_smul, cochain.of_hom_v_comp_d, id_comp], },
{ simp only [cochain.comp_assoc_of_third_is_zero_cochain, cochain.add_comp, cochain.zsmul_comp,
inl_comp_snd, inr_comp_snd, cochain.comp_zero, zsmul_zero, zero_add, cochain.comp_id],
ext p q hpq,
have hp : ∃ (p' : ℤ), p = p' + 1 -n := ⟨p+n-1, by linarith⟩,
obtain ⟨p', hp'⟩ := hp,
subst hp',
rw cochain.comp_v _ _ hn₃.symm (p'+1-n) p' q (by linarith) (by linarith),
dsimp [inl, snd],
simp only [cochain.of_hom_v, homological_complex.id_f, id_comp, cochain.comp_zero_cochain,
twist.δ, cochain.of_d_v, twist_d, cochain.mk_v, comp_id, biprod.inl_desc_assoc,
biprod.lift_snd, dif_pos (show p'+1 = q, by linarith)], }, },
{ simp only [ε_succ, neg_smul,
cochain.comp_add, cochain.comp_neg, cochain.comp_zsmul,
← cochain.comp_assoc_of_first_is_zero_cochain, inr_comp_fst, cochain.zero_comp, zsmul_zero,
zero_add, neg_zero, inr_comp_snd, cochain.id_comp,
cochain_ext' z _ _ hn₁ (show n₂ = 1 + n₁, by linarith)],
split,
{ rw [cochain.comp_assoc_of_second_is_zero_cochain, inr_comp_fst, cochain.comp_zero],
ext p q hpq,
simp only [cochain.comp_assoc_of_first_is_zero_cochain, cochain.zero_cochain_comp,
cochain.of_hom_v, cochain.zero_v,
cochain.comp_v _ _ (show n₂ = 1 + n₁, by linarith) p (p+1) q rfl (by linarith)],
dsimp [inr, fst],
simp only [twist.δ, cochain.of_d_v, twist_d, biprod.inr_desc_assoc,
biprod.lift_fst_assoc, zero_comp], },
{ rw [cochain.comp_assoc_of_second_is_zero_cochain, inr_comp_snd, cochain.comp_id],
ext p q hpq,
dsimp [inr, snd],
simp only [cochain.comp_assoc_of_third_is_zero_cochain, cochain.zero_cochain_comp,
cochain.of_hom_v, cochain.comp_zero_cochain, twist.δ, cochain.of_d_v, twist_d,
cochain.mk_v, homological_complex.id_f, comp_id, biprod.inr_desc_assoc,
biprod.lift_snd], }, },
end
end twist
end hom_complex
end cochain_complex
|
\foldertitle{poster}{Posterior Simulator Objects and Functions}{poster/Contents}
Posterior objects, \texttt{poster}, are used to evaluate the behaviour
of the posterior dsitribution, and to draw model parameters from the
posterior distibution.
Posterior objects are set up within the
\href{model/estimate}{\texttt{model/estimate}} function and returned as
the second output argument - the set up and initialisation of the
posterior object is fully automated in this case. Alternatively, you can
set up a posterior object manually, by setting all its properties
appropriately.
Poster methods:
\paragraph{Constructor}\label{constructor}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\href{poster/poster}{\texttt{poster}} - Create new empty posterior
simulation (poster) object.
\end{itemize}
\paragraph{Evaluating posterior
density}\label{evaluating-posterior-density}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\href{poster/arwm}{\texttt{arwm}} - Adaptive random-walk Metropolis
posterior simulator.
\item
\href{poster/eval}{\texttt{eval}} - Evaluate posterior density at
specified points.
\item
\href{poster/regen}{\texttt{regen}} - Regeneration time MCMC
Metropolis posterior simulator.
\end{itemize}
\paragraph{Chain statistics}\label{chain-statistics}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\href{poster/stats}{\texttt{stats}} - Evaluate selected statistics of
ARWM chain.
\end{itemize}
\paragraph{Getting on-line help on model
functions}\label{getting-on-line-help-on-model-functions}
\begin{verbatim}
help poster
help poster/function_name
\end{verbatim}
|
(* Title: HOL/Auth/n_german_lemma_on_inv__41.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__41 imports n_german_base
begin
section{*All lemmas on causal relation between inv__41 and some rule r*}
lemma n_SendInvEVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvSVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Ident ''ExGntd'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__41:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__41 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__41:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqESVsinv__41:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqVsinv__41:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReq N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__41:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__41:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__41 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(* string requirements *)
Require Import Strings.String.
Local Open Scope string_scope.
Scheme Equality for string.
Require Import Ascii.
(* integral requirements *)
Require Import Coq.ZArith.BinInt.
Local Open Scope Z_scope.
(* requirements for cases and arrays *)
Local Open Scope list_scope.
Inductive ErrorZ : Type := (* variabila int = NUMBR *)
| error_Z : ErrorZ
| number : Z -> ErrorZ.
Coercion number : Z >-> ErrorZ.
Inductive ErrorBool : Type := (* variabila bool = TROOF *)
| error_bool : ErrorBool
| boolean : bool -> ErrorBool.
Coercion boolean : bool >-> ErrorBool.
Inductive ErrorString : Type := (* variabila string = YARN *)
| error_string : ErrorString
| vstring : string -> ErrorString.
Coercion vstring : string >-> ErrorString.
Inductive realVar :=
| var_notdecl : realVar
| error_equal : realVar
| default : realVar
| numbr_e : ErrorZ -> realVar
| troof_e : ErrorBool -> realVar
| strng_e : ErrorString -> realVar.
Check numbr_e. Check ErrorZ.
Scheme Equality for realVar.
Definition Env := string -> realVar.
Definition env_notdecl : Env :=
fun v => var_notdecl.
Definition CheckVar (a : realVar) (b : realVar) : bool := (* se verifica daca acea variabila exista *)
match a with
| var_notdecl => match b with
| var_notdecl => true
| _ => false
end
| error_equal => match b with
| error_equal => true
| _ => false
end
| numbr_e n1 => match b with
| numbr_e n2 => true
| _ => false
end
| troof_e b1 => match b with
| troof_e b2 => true
| _ => false
end
| strng_e s1 => match b with
| strng_e s2 => true
| _ => false
end
| default => match b with
| default => true
| _ => false
end
end.
Definition update (env : Env) (s : string) (x : realVar) : Env :=
fun y => if (string_beq y s)
then
if (andb (CheckVar (var_notdecl) (env y)) (negb(CheckVar (default) (x))))
then var_notdecl
else
if (andb (CheckVar (var_notdecl) (env y)) (CheckVar (default) (x)))
then default
else
if (orb (CheckVar (default) (env y)) (CheckVar (x) (env y)))
then x
else error_equal
else env y.
(* string expressions *)
Inductive StExp :=
| sconstant : ErrorString -> StExp
| sident : string -> StExp
| sconcat : StExp -> StExp -> StExp.
Coercion sconstant : ErrorString >-> StExp.
Definition convert_string (s: ErrorString) : string :=
match s with
| error_string => ""
| vstring s' => s'
end.
Notation "'CONCAT' a 'WIT' b" := (sconcat a b) (at level 50).
(* Check length(convert_string "long"). *)
Definition err_cat (s1 s2 : ErrorString) : ErrorString :=
match s1, s2 with
| error_string, _ => error_string
| _, error_string => error_string
| vstring s1, vstring s2 => vstring (s1 ++ s2)
end.
(* Big-Step semantics for string expressions *)
Reserved Notation "st -[ S ]-> st'" (at level 50).
Inductive stringBS : StExp -> Env -> ErrorString -> Prop :=
| sconstantBS : forall s sg, sconstant s -[ sg ]-> s
| sidentBS : forall sid sg, sident sid -[ sg ]-> match (sg sid) with
| strng_e sid => sid
| _ => ""
end
| scatBS : forall s1 s2 r i1 i2 sg,
s1 -[ sg ]-> i1 ->
s2 -[ sg ]-> i2 ->
r = err_cat i1 i2 ->
sconcat s1 s2 -[ sg ]-> r
where "st -[ S ]-> st'" := (stringBS st S st').
Inductive AExp :=
(* arithmetic expressions *)
| aconstant : ErrorZ -> AExp
| aident : string -> AExp
| plus : AExp -> AExp -> AExp
| minus : AExp -> AExp -> AExp
| multiply : AExp -> AExp -> AExp
| divide : AExp -> AExp -> AExp
| modulo : AExp -> AExp -> AExp
| increment : AExp -> AExp
| decrement : AExp -> AExp
| maximum : AExp -> AExp -> AExp
| minimum : AExp -> AExp -> AExp
| swap : AExp -> AExp -> AExp
| len : StExp -> AExp.
Coercion aconstant : ErrorZ >-> AExp.
Coercion aident : string >-> AExp.
(* notations for arithmetic expressions*)
Notation "'SUM' 'OF' a 'AN' b" := (plus a b) (at level 20).
Notation "'DIFF' 'OF' a 'AN' b" := (minus a b) (at level 20).
Notation "'PRODUKT' 'OF' a 'AN' b" := (multiply a b) (at level 24).
Notation "'QUOSHUNT' 'OF' a 'AN' b" := (divide a b) (at level 24).
Notation "'MOD' 'OF' a 'AN' b" := (modulo a b) (at level 24).
Notation "'BIGGR' 'OF' a 'AN' b" := (maximum a b) (at level 40).
Notation "'SMALLR' 'OF' a 'AN' b" := (minimum a b) (at level 40).
Notation "'BUFF' n" := (increment n) (at level 20).
Notation "'NERF' n" := (decrement n) (at level 20).
Notation "'FWAP' a 'WIT' b" := (swap a b) (at level 45).
Notation "'LANG' 'OF' a" := (len a) (at level 46).
Check (SUM OF 3 AN 4).
Check (DIFF OF "a" AN 2).
Check (BUFF 15).
Check (NERF "i").
Check (LANG OF "b").
(* simulating arithmetic calculus errors *)
Definition plus_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => number (n1 + n2)
end.
Definition minus_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => if Z.ltb n1 n2
then error_Z
else number (n1 - n2)
end.
Definition multiply_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => number (n1 * n2)
end.
Definition divide_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| _, number 0 => error_Z
| number n1 , number n2 => number (Z.div n1 n2)
end.
Definition modulo_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| _, number 0 => error_Z
| number n1 , number n2 => number (Z.modulo n1 n2)
end.
Definition incr_err (n : ErrorZ) : ErrorZ :=
match n with
| error_Z => error_Z
| number n => number (n + 1)
end.
Definition decr_err (n : ErrorZ) : ErrorZ :=
match n with
| error_Z => error_Z
| number n => number (n - 1)
end.
Definition max_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => if Z.ltb n1 n2
then n2
else n1
end.
Definition min_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => if Z.ltb n1 n2
then n1
else n2
end.
(* Definition swap_err (n1 n2 : ErrorZ) : ErrorZ :=
match n1, n2 with
| error_Z, _ => error_Z
| _, error_Z => error_Z
| number n1 , number n2 => match n1, n2 with
| number n1 => n2
| number n2 => n1
end
end.
aici n-am inteles cum sa-l implementez *)
(* Big-Step semantics for arithmetic expressions *)
Reserved Notation "A =[ S ]=> N" (at level 60).
Inductive aritBS : AExp -> Env -> ErrorZ -> Prop :=
| constantBS : forall n sg, aconstant n =[ sg ]=> n
| identBS : forall aid sg, aident aid =[ sg ]=> match (sg aid) with
| numbr_e aid => aid
| _ => error_Z
end
| plusBS : forall a1 a2 i1 i2 sg n,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
n = plus_err i1 i2 ->
plus a1 a2 =[ sg ]=> n
| minusBS : forall a1 a2 i1 i2 sg n,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
n = minus_err i1 i2 ->
minus a1 a2 =[ sg ]=> n
| multBS : forall a1 a2 i1 i2 sg n,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
n = multiply_err i1 i2 ->
multiply a1 a2 =[ sg ]=> n
| divBS : forall a1 a2 i1 i2 sg n,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
n = divide_err i1 i2 ->
divide a1 a2 =[ sg ]=> n
| modBS : forall a1 a2 i1 i2 sg n,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
n = modulo_err i1 i2 ->
modulo a1 a2 =[ sg ]=> n
| incrBS : forall a i sg n,
a =[ sg ]=> i ->
n = incr_err i ->
increment a =[ sg ]=> n
| decrBS : forall a i sg n,
a =[ sg ]=> i ->
n = decr_err i ->
decrement a =[ sg ]=> n
| maxBS : forall a1 a2 i1 i2 sg b,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
b = max_err i1 i2 ->
maximum a1 a2 =[ sg ]=> b
| minBS : forall a1 a2 i1 i2 sg b,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
b = min_err i1 i2 ->
minimum a1 a2 =[ sg ]=> b
(* | lenBS : forall s i sg r,
s -[ sg ]-> i ->
r = numbr_e (length (convert_string s)) ->
len s =[ sg ]=> r *)
(* | swapBS : forall a1 a2 i1 i2 aux sg,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
aux = i1 ->
a1 = i2 ->
a2 = aux ->
swap a1 a2 =[ sg ]=> *)
where "a =[ sg ]=> n" := (aritBS a sg n).
Compute (env_notdecl "a").
Example ex1 : SUM OF 3 AN 4 =[ env_notdecl ]=> 7.
Proof.
eapply plusBS.
eapply constantBS.
eapply constantBS.
simpl. reflexivity.
Qed.
Example ex1' : DIFF OF 3 AN 5 =[ env_notdecl]=> error_Z.
Proof.
eapply minusBS.
eapply constantBS.
eapply constantBS.
simpl. reflexivity.
Qed.
Example ex2 : MOD OF 12 AN 0 =[ env_notdecl ]=> error_Z.
Proof.
eapply modBS.
eapply constantBS.
eapply constantBS.
simpl. reflexivity.
Qed.
Example ex3 : BUFF 10 =[ env_notdecl ]=> 11.
Proof.
eapply incrBS.
eapply constantBS.
simpl. reflexivity.
Qed.
Example ex4 : BIGGR OF 8 AN 5 =[ env_notdecl ]=> 8.
Proof.
eapply maxBS.
eapply constantBS.
eapply constantBS.
simpl. reflexivity.
Qed.
(* boolean expressions *)
Inductive BExp :=
| bconstant : ErrorBool -> BExp
| bident : string -> BExp
| right : BExp
| wrong : BExp
| non : BExp -> BExp
| and : BExp -> BExp -> BExp
| or : BExp -> BExp -> BExp
| xor : BExp -> BExp -> BExp
| same : AExp -> AExp -> BExp
| diff : AExp -> AExp -> BExp
| lt : AExp -> AExp -> BExp
| gt : AExp -> AExp -> BExp
| leq : AExp -> AExp -> BExp
| geq : AExp -> AExp -> BExp
| scomp : StExp -> StExp -> BExp.
Coercion bconstant : ErrorBool >-> BExp.
Coercion bident : string >-> BExp.
(* notations for boolean expressions *)
Notation "'BOTH' 'OF' a 'AN' b" := (and a b) (at level 49).
Notation "'EITHER' 'OF' a 'AN' b" := (or a b) (at level 49).
Notation "'WON' 'OF' a 'AN' b" := (xor a b) (at level 49).
Notation "'NOT' n" := (non n) (at level 49).
Notation "'BOTH' 'SAEM' a 'AN' b" := (same a b) (at level 49).
Notation "'DIFFRINT' a 'AN' b" := (diff a b) (at level 49).
Notation "'DIFFRINT' 'AN' 'BIGGR' 'OF' a 'AN' b" := (lt a b) (at level 53).
Notation "'DIFFRINT' 'AN' 'SMALLR' 'OF' a 'AN' b" := (gt a b) (at level 53).
Notation "'BOTH' 'SAEM' 'AN' 'BIGGR' 'OF' a 'AN' b" := (geq a b) (at level 53).
Notation "'BOTH' 'SAEM' 'AN' 'SMALLR' 'OF' a 'AN' b" := (leq a b) (at level 53).
Notation "'COMP' a 'WIT' b" := (scomp a b)(at level 50).
Check (NOT "x").
Check (NOT true).
Check (BOTH SAEM "a" AN BIGGR OF "a" AN "b").
Check (COMP "a" WIT "x").
Definition non_err (n : ErrorBool) : ErrorBool :=
match n with
| error_bool => error_bool
| boolean n => negb (n)
end.
Definition and_err (n1 n2 : ErrorBool) : ErrorBool :=
match n1, n2 with
| error_bool, _ => error_bool
| _, error_bool => error_bool
| boolean n1 , boolean n2 => boolean (andb n1 n2)
end.
Definition or_err (n1 n2 : ErrorBool) : ErrorBool :=
match n1, n2 with
| error_bool, _ => error_bool
| _, error_bool => error_bool
| boolean n1 , boolean n2 => boolean (orb n1 n2)
end.
Definition xor_err (n1 n2 : ErrorBool) : ErrorBool :=
match n1, n2 with
| error_bool, _ => error_bool
| _, error_bool => error_bool
| boolean n1 , boolean n2 => boolean (xorb n1 n2)
end.
Definition same_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (Z.eqb n1 n2)
end.
Definition diff_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (negb(Z.eqb n1 n2))
end.
Definition lt_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (Z.ltb n1 n2)
end.
Definition gt_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (Z.ltb n2 n1)
end.
Definition leq_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (Z.leb n1 n2)
end.
Definition geq_err (n1 n2 : ErrorZ) : ErrorBool :=
match n1, n2 with
| error_Z, _ => error_bool
| _, error_Z => error_bool
| number n1 , number n2 => boolean (Z.leb n2 n1)
end.
Definition equal_strings (s1 s2 : string ) : bool :=
if(string_dec s1 s2)
then true
else false. (* folosit pentru egalitatea a doua stringuri *)
Definition scmp (s1 s2 : ErrorString) : ErrorBool :=
match s1, s2 with
| error_string, _ => error_bool
| _, error_string => error_bool
| vstring s1 , vstring s2 => equal_strings (convert_string s1) (convert_string s2)
end.
(* Big-Step semantics for boolean expressions - in works *)
Reserved Notation "B ={ S }=> B'" (at level 70).
Inductive boolBS : BExp -> Env -> ErrorBool -> Prop :=
| itz_tru : forall sg, right ={ sg }=> true
| itz_fls : forall sg, wrong ={ sg }=> false
| bconstantBS : forall b sg, bconstant b ={ sg }=> b
| bidentBS : forall bid sg, bident bid ={ sg }=> match (sg bid) with
| troof_e bid => bid
| _ => false
end
(* | ttof : forall b sg,
b ={ sg }=> true ->
non b ={ sg }=> false
| ftot : forall b sg,
b ={ sg }=> false ->
non b ={ sg }=> true *)
| notBS : forall sg b b' i1,
b ={ sg }=> i1 ->
b' = (non_err i1) ->
(non b) ={ sg }=> b'
(* | tand : forall b1 b2 sg r,
b1 ={ sg }=> true ->
b2 ={ sg }=> r ->
and b1 b2 ={ sg }=> r
| fand : forall b1 b2 sg,
b1 ={ sg }=> false ->
and b1 b2 ={ sg }=> false *)
| andBS : forall sg b1 b2 i1 i2 b,
b1 ={ sg }=> i1 ->
b2 ={ sg }=> i2 ->
b = (and_err i1 i2) ->
and b1 b2 ={ sg }=> b
(* | tor : forall b1 b2 sg,
b1 ={ sg }=> true ->
or b1 b2 ={ sg }=> true
| wfor : forall b1 b2 sg r,
b1 ={ sg }=> false ->
b2 ={ sg }=> r ->
or b1 b2 ={ sg }=> r *)
| orBS : forall sg b1 b2 i1 i2 b,
b1 ={ sg }=> i1 ->
b2 ={ sg }=> i2 ->
b = (or_err i1 i2) ->
or b1 b2 ={ sg }=> b
(* | fxor : forall b1 b2 sg r,
b1 ={ sg }=> r ->
b2 ={ sg }=> r ->
xor b1 b2 ={ sg }=> false
| txor : forall b1 b2 sg r1 r2,
b1 ={ sg }=> r1 ->
b2 ={ sg }=> r2 ->
xor b1 b2 ={ sg }=> true *)
| xorBS : forall sg b1 b2 i1 i2 b,
b1 ={ sg }=> i1 ->
b2 ={ sg }=> i2 ->
b = (xor_err i1 i2) ->
xor b1 b2 ={ sg }=> b
| sameBS : forall sg b1 b2 i1 i2 b,
b1 =[ sg ]=> i1 ->
b2 =[ sg ]=> i2 ->
b = (same_err i1 i2) ->
same b1 b2 ={ sg }=> b
| diffBS : forall sg b1 b2 i1 i2 b,
b1 =[ sg ]=> i1 ->
b2 =[ sg ]=> i2 ->
b = (diff_err i1 i2) ->
diff b1 b2 ={ sg }=> b
| lessthanBS : forall a1 a2 i1 i2 sg r,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
r = lt_err i1 i2 ->
lt a1 a2 ={ sg }=> r
| greaterthanBS : forall a1 a2 i1 i2 sg r,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
r = gt_err i1 i2 ->
gt a1 a2 ={ sg }=> r
| lesseqBS : forall a1 a2 i1 i2 sg r,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
r = leq_err i1 i2 ->
leq a1 a2 ={ sg }=> r
| greatereqBS : forall a1 a2 i1 i2 sg r,
a1 =[ sg ]=> i1 ->
a2 =[ sg ]=> i2 ->
r = geq_err i1 i2 ->
geq a1 a2 ={ sg }=> r
| cmpBS : forall s1 s2 i1 i2 sg r,
s1 -[ sg ]-> i1 ->
s2 -[ sg ]-> i2 ->
r = scmp i1 i2 ->
scomp s1 s2 ={ sg }=> r
where "B ={ S }=> B'" := (boolBS B S B').
Example ex5 : NOT true ={ env_notdecl }=> false.
Proof.
eapply notBS.
eapply bconstantBS.
simpl. reflexivity.
Qed.
Example ex6 : BOTH OF true AN false ={ env_notdecl }=> false.
Proof.
eapply andBS.
eapply bconstantBS.
eapply bconstantBS.
simpl. reflexivity.
Qed.
Example ex7 : DIFFRINT AN BIGGR OF 7 AN 10 ={ env_notdecl }=> true.
Proof.
eapply lessthanBS.
eapply constantBS.
eapply constantBS.
simpl. reflexivity.
Qed.
Example ex8 : COMP "sb" WIT "tf" ={ env_notdecl }=> true.
Proof.
eapply cmpBS.
eapply sconstantBS.
eapply sconstantBS.
simpl.
Abort.
Inductive VExp :=
| error_vector : VExp
| vector_int : Z -> list Z -> VExp.
(* flow controls + assignment + sequence *)
Inductive Stmt :=
| equals_Z : string -> AExp -> Stmt
| equals_bool : string -> BExp -> Stmt
| equals_string : string -> StExp -> Stmt
| equalsvect_Z: string -> VExp -> Stmt
| decl_vectZ : string -> VExp -> Stmt
| decl_Z : string -> Stmt
| decl_bool : string -> Stmt
| decl_string : string -> Stmt
| seqinflow : Stmt -> Stmt -> Stmt
| ifthen : BExp -> Stmt -> Stmt
| ifthenelse : BExp -> Stmt -> Stmt -> Stmt
| whileseq : BExp -> Stmt -> Stmt
| forsq_any : Stmt -> Stmt -> BExp -> Stmt -> Stmt
| break : Stmt
| continue : Stmt
| comment : string -> Stmt
| switch : AExp -> list Cases -> Stmt
with Cases :=
| case_nr : AExp -> Stmt -> Cases
| default_case : Stmt -> Cases.
(* notations for flow controls *)
Notation "a 'AND' b" := (seqinflow a b)(at level 90). (*pentru secventele folosite in partea de conditional*)
Notation "a ; b" := (seqinflow a b) (at level 90).
Notation "'BTW' comm" := (comment comm) (at level 90).
Notation "'OBTW' comm 'TLDR'" := (comment comm) (at level 90).
Notation " a 'ITZ' b" := (equals_Z a b) (at level 50).
Notation " a 'ITZB' b" := (equals_bool a b) (at level 50).
Notation " a 'ITS' b" := (equals_string a b) (at level 50).
Notation " 'I' 'HAS' 'A' 'ANUMBR' a [ n ] " := (decl_vectZ a (vector_int n nil)) (at level 80).
Notation " 'I' 'HAS' 'A' 'NUMBR' a" := (decl_Z a) (at level 80).
Notation " 'I' 'HAS' 'A' 'TROOF' a" := (decl_bool a) (at level 80).
Notation " 'I' 'HAS' 'A' 'YARN' a" := (decl_string a) (at level 80).
Notation " cond 'O_RLY?' 'YA_RLY' { s1 } 'NO_WAI' { s2 } 'OIC'" := (ifthenelse cond s1 s2) (at level 95).
Notation " cond 'O_RLY?' 'YA_RLY' { s } 'OIC'" := (ifthen cond s) (at level 95).
Notation " 'IM_IN_YR_WHILE' cond s 'IM_OUTTA_YR_WHILE'" := (whileseq cond s) (at level 95).
Notation " 'IM_IN_YR_LOOP' oper 'YR' a 'WILE' cond { s } 'IM_OUTTA_YR_LOOP'" := (forsq_any oper a cond s) (at level 95).
Notation "'ENUF'" := (break) (at level 80).
Notation "'GOON'" := (continue) (at level 80).
Check (I HAS A NUMBR "a" ; "a" ITZ 12).
Check (I HAS A ANUMBR "a" [30]).
Check (I HAS A TROOF "b" ; "b" ITZB false).
Check ( "c" ITS "ff").
(* notatii pentru functia switch() *)
Notation "var ',WTF?' C1 ;; C2 ;; .. ;; Cn 'OIC'" := (switch var (cons C1 (cons C2 .. (cons Cn nil) .. ))) (at level 99).
Notation "'OMG' val { seq }" := (case_nr val seq) (at level 99).
Notation "'OMGWTF' { seq }" := (default_case seq) (at level 99).
(* input-output functions *)
Inductive InAndOut :=
| scan : string -> InAndOut
| write : string -> InAndOut.
Notation "'GIMMEH' var" := (scan var)(at level 91).
Notation "'VISIBLE' var" := (write var)(at level 91).
Reserved Notation "S -{ sg }-> sg'" (at level 75).
Inductive strBS : Stmt -> Env -> Env -> Prop :=
| decl_ZBS : forall i x sg sg',
sg' = (update sg x (numbr_e i)) ->
decl_Z x -{ sg }-> sg'
| decl_BoolBS : forall i x sg sg',
sg' = (update sg x (troof_e i)) ->
decl_bool x -{ sg }-> sg'
| decl_StringBS : forall i x sg sg',
sg' = (update sg x (strng_e i)) ->
decl_string x -{ sg }-> sg'
| equal_ZBS : forall a i x sg sg',
a =[ sg ]=> i ->
sg' = (update sg x (numbr_e i)) ->
equals_Z x a -{ sg }-> sg'
| equal_boolBS : forall a i x sg sg',
a ={ sg }=> i ->
sg' = (update sg x (troof_e i)) ->
equals_bool x a -{ sg }-> sg'
| equal_stringBS : forall a i x sg sg',
sg' = (update sg x (strng_e i)) ->
equals_string x a -{ sg }-> sg'
| seqBS : forall s1 s2 sg sg1 sg2,
s1 -{ sg }-> sg1 ->
s2 -{ sg1 }-> sg2 ->
seqinflow s1 s2 -{ sg }-> sg2
| ifelse_falseBS : forall cond s1 s2 sg sg',
cond ={ sg }=> false ->
s2 -{ sg }-> sg' ->
ifthenelse cond s1 s2 -{ sg }-> sg'
| ifelse_trueBS : forall cond s1 s2 sg sg',
cond ={ sg }=> true ->
s1 -{ sg }-> sg' ->
ifthenelse cond s1 s2 -{ sg }-> sg'
| iftrueBS : forall cond s sg sg',
cond ={ sg }=> true ->
s -{ sg }-> sg' ->
ifthen cond s -{ sg }-> sg'
| iffalseBS : forall cond s sg sg',
cond ={ sg }=> false ->
ifthen cond s -{ sg }-> sg'
| whilefalseBS : forall b s sg,
b ={ sg }=> false ->
whileseq b s -{ sg }-> sg
| whiletrueBS : forall b s sg sg',
b ={ sg }=> true ->
(s ; whileseq b s) -{ sg }-> sg' ->
whileseq b s -{ sg }-> sg'
| forany_trueBS : forall init cond op s sg sg',
cond ={ sg }=> true ->
( init ; whileseq cond (s ; op) ) -{ sg }-> sg' ->
forsq_any op init cond s -{ sg }-> sg'
| forany_falseBS : forall init cond op s sg sg',
cond ={ sg }=> false ->
forsq_any op init cond s -{ sg }-> sg'
| breakBS : forall s sg,
s -{ sg }-> sg
| continueBS : forall s sg sg',
s -{ sg }-> sg'
| switchBS : forall a i case b s sg sg',
a =[ sg ]=> i ->
b = same i case ->
switch a s -{ sg }-> sg'
where "s -{ sg }-> sg'" := (strBS s sg sg').
Check (SUM OF "VAR" AN 1).
Check(same "ANIMAL" "CAT")
O_RLY?
YA_RLY
{ "CAT" ITS "ok" }
OIC.
Check I HAS A NUMBR "A"; "A" ITZ 0 ;
whileseq (BOTH SAEM "A" AN 8)
("A" ITZ SUM OF "A" AN 2 ) .
Check I HAS A NUMBR "B" ; "B" ITZ 1 ;
forsq_any ("i" ITZ SUM OF "i" AN 1) ("i" ITZ 1) (NOT BOTH SAEM "i" AN 4)
("B" ITZ PRODUKT OF "B" AN 2).
Check BTW "notatie".
Check OBTW "alta notatie" TLDR.
(* Check switch "var"
case_nr 3 "a" ITZ 0
case_nr 2 "a" ITZ 1
.
*)
Compute (I HAS A NUMBR "n" ; "n" ITZ 4).
Example ex8 : exists sg', ("n" ITZ 4 ; "n" ITZ 9) -{ env_notdecl }-> sg' /\ sg' "n" = numbr_e 9.
Proof.
eexists.
split.
- eapply seqBS.
eapply equal_ZBS. eapply constantBS. reflexivity.
eapply equal_ZBS. eapply constantBS. reflexivity.
- unfold update. simpl. (* reflexivity. *)
Admitted.
Definition p1 := I HAS A NUMBR "x";
"x" ITZ 5;
whileseq (NOT BOTH SAEM "x" AN 10)
("x" ITZ SUM OF "x" AN 1).
Example ex9 : exists sg', p1 -{ env_notdecl }-> sg' /\ sg' "x" = numbr_e 10.
Proof.
eexists.
split.
unfold p1.
eapply seqBS. eapply seqBS.
eapply decl_ZBS. eauto.
eapply equal_ZBS. eapply constantBS. reflexivity.
eapply whiletrueBS. eapply notBS. eapply sameBS. eapply identBS. eapply constantBS.
unfold update. simpl. reflexivity. simpl. (* eauto. *)
Admitted.
Definition p2 := I HAS A NUMBR "x";
"x" ITZ 1;
forsq_any ("i" ITZ SUM OF "i" AN 1) ("i" ITZ 1) (BOTH SAEM AN SMALLR OF "i" AN 5)
("x" ITZ PRODUKT OF "x" AN 2).
Example ex10 : exists sg', p2 -{ env_notdecl }-> sg' /\ sg' "x" = numbr_e 32.
Proof.
eexists.
split.
unfold p2.
eapply seqBS. eapply seqBS.
eapply decl_ZBS. eauto.
eapply equal_ZBS. eapply constantBS. reflexivity.
eapply forany_trueBS. eapply lesseqBS. eapply identBS. eapply constantBS.
simpl. (* eauto. *)
Admitted.
(* Defining a stack machine to create a compiler - not complete *)
Inductive Var := string.
Definition AEnv := Var -> Z.
Definition Aenv0 := fun x => if string_dec x "x" then 10 else 0.
Check Aenv0.
Inductive AExp' :=
| aconst' : Z -> AExp'
| aid' : Var -> AExp'
| plus' : AExp' -> AExp' -> AExp'
| minus' : AExp' -> AExp' -> AExp'
| multiply' : AExp' -> AExp' -> AExp'
| divide' : AExp' -> AExp' -> AExp'.
Fixpoint Ainterpret (e : AExp') (env : AEnv) : Z :=
match e with
| aconst' c => c
| aid' x => (env x)
| plus' e1 e2 => (Ainterpret e1 env) + (Ainterpret e2 env)
| multiply' e1 e2 => (Ainterpret e1 env) * (Ainterpret e2 env)
| minus' e1 e2 => (Ainterpret e1 env) - (Ainterpret e2 env)
| divide' e1 e2 => Z.div (Ainterpret e1 env) (Ainterpret e2 env)
end.
Inductive Instr :=
| push_const : Z -> Instr
| push_var : Var -> Instr
| plus_instr : Instr
| mult_instr : Instr
| diff_instr : Instr
| divv_instr : Instr.
Compute diff.
Definition Stack := list Z.
Fixpoint Arun_instruction (i : Instr)
(env : AEnv) (stack : Stack) : Stack :=
match i with
| push_const c => (c :: stack)
| push_var x => ((env x) :: stack)
| plus_instr => match stack with
| n1 :: n2 :: stack' => (n1 + n2) :: stack'
| _ => stack
end
| mult_instr => match stack with
| n1 :: n2 :: stack' => (n1 * n2) :: stack'
| _ => stack
end
| diff_instr => match stack with
| n1 :: n2 :: stack' => (n1 - n2) :: stack'
| _ => stack
end
| divv_instr => match stack with
| n1 :: n2 :: stack' => (Z.div n1 n2) :: stack'
| _ => stack
end
end.
Fixpoint run_list (il : list Instr)
(env : Var -> Z) (stack : Stack) : Stack :=
match il with
| [ ] => stack
| i :: il' => run_list il' env (run_list i env stack)
end.
Compute (Arun_instruction (push_const 93) env_notdecl []).
Compute (Arun_instruction (push_var "x") env_notdecl []).
Definition pgm1 := [ push_const 19 ; push_var "x" ].
Compute run_instructions pgm1 env0 [].
Fixpoint compile (e : AExp') : list Instr :=
match e with
| aconst' c => [push_const c]
| aid' x => [push_var x]
| plus' e1 e2 => (compile e1) ++ (compile e2) ++ [plus_instr]
| diff' e1 e2 => (compile e1) ++ (compile e2) ++ [minus_instr]
| multiply' e1 e2 => (compile e1) ++ (compile e2) ++ [multiply_instr]
| divide' e1 e2 => (compile e1) ++ (compile e2) ++ [divide_instr]
end.
Compute compile (plus' 2 (id "x")).
Compute interpret (plus' 2 (id "x") ) env_notdecl.
Compute run_instructions (compile (plus' 2 (id "x") )) env_notdecl [].
Lemma soundness_helper :
forall e env stack is',
run_list (compile e ++ is') env stack =
run_list is' env ((Ainterpret e env) :: stack).
Proof.
induction e; intros; simpl; trivial.
- rewrite <- app_assoc.
rewrite <- app_assoc.
rewrite IHe1.
rewrite IHe2.
simpl.
rewrite Z.add_comm.
reflexivity.
- rewrite <- app_assoc.
rewrite <- app_assoc.
rewrite IHe1.
rewrite IHe2.
simpl.
rewrite Z.minus_comm.
reflexivity.
- rewrite <- app_assoc.
rewrite <- app_assoc.
rewrite IHe1.
rewrite IHe2.
simpl.
rewrite Z.multiply_comm.
reflexivity.
- rewrite <- app_assoc.
rewrite <- app_assoc.
rewrite IHe1.
rewrite IHe2.
simpl.
rewrite Z.div_comm.
reflexivity.
Admitted.
Theorem soundness :
forall e env,
run_list (compile e) env [] =
[Ainterpret e env].
Proof.
intros.
Check app_nil_r.
rewrite <- app_nil_r with (l := (compile e)).
rewrite soundness_helper.
simpl. trivial.
Qed.
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.Group where
open import Cubical.Algebra.Group.Base public
open import Cubical.Algebra.Group.Properties public
open import Cubical.Algebra.Group.Morphism public
open import Cubical.Algebra.Group.MorphismProperties public
open import Cubical.Algebra.Group.Algebra public
|
If $f$ is continuous on the closed interval $[a,b]$, then $f$ is bounded on $[a,b]$. |
Lemma exo1_1 :
forall m : nat, 0 + m = m.
Proof.
intros.
reflexivity.
Qed.
Lemma exo1_2 :
forall m n : nat, S n + m = S (n + m).
Proof.
intros.
simpl.
f_equal.
Qed.
Lemma plus_n_0 :
forall n, n + 0 = n.
Proof.
intros.
induction n.
- simpl. reflexivity.
- simpl. f_equal. apply IHn.
Qed.
Lemma plus_n_Sm :
forall n m, n + S m = S (n + m).
Proof.
intros.
induction n.
- simpl. f_equal.
- simpl. f_equal. apply IHn.
Qed.
Lemma exo2_1 :
forall m : nat, 0 * m = 0.
Proof.
intros.
reflexivity.
Qed.
Lemma exo2_1_2 :
forall m n : nat, S n * m = m + n * m.
Proof.
intros.
reflexivity.
Qed.
Lemma exo2_2 :
forall m : nat, m * 0 = 0.
Proof.
intros.
induction m; simpl.
- reflexivity.
- apply IHm.
Qed.
Lemma associative :
forall n m p : nat, (n + m) + p = n + (m + p).
Proof.
intros.
induction n; simpl.
- reflexivity.
- f_equal. apply IHn.
Qed.
Lemma commutativite :
forall n m: nat, n + m = m +n.
Proof.
intros.
induction m; simpl.
- apply plus_n_0.
- rewrite plus_n_Sm. f_equal. apply IHm.
Qed.
Lemma exo2_2_2 :
forall m n : nat, n * S m = n + n * m.
Proof.
intros.
induction n; simpl.
- reflexivity.
- f_equal. rewrite IHn. rewrite <- 2 associative. f_equal. apply commutativite.
Qed.
Lemma distributivite :
forall n m p : nat, (n + m) * p = n * p + m * p.
Proof.
intros.
induction n; simpl.
- reflexivity.
- rewrite IHn. rewrite <- associative. reflexivity.
Qed.
Lemma mul_commutativite :
forall n m : nat, n * m = m * n.
Proof.
intros.
induction m; simpl.
- apply exo2_2.
- rewrite <- IHm. apply exo2_2_2.
Qed.
Lemma mul_associativite :
forall n m p : nat, n * (m * p) = (n * m) * p.
Proof.
intros.
induction n; simpl.
- reflexivity.
- rewrite IHn. rewrite distributivite. reflexivity.
Qed.
Definition le (n m : nat) := exists p, n + p = m.
Lemma le_refl : forall n, le n n.
Proof.
intros.
exists 0.
apply plus_n_0.
Qed.
Lemma le_trans : forall n m p, le n m -> le m p -> le n p.
Proof.
intros.
destruct H.
destruct H.
destruct H0.
exists (x + x0).
rewrite <- associative.
apply H.
Qed.
Lemma le_antisym : forall n m, le n m -> le m n -> n = m.
Proof.
intros.
destruct H.
destruct H.
induction n; simpl.
- destruct H0. |
module Minecraft.Base.PreClassic.Stone.Export
import public Minecraft.Base.PreClassic.Stone.Block.Export
import public Minecraft.Base.PreClassic.Stone.Item.Export
import public Minecraft.Base.PreClassic.Stone.ItemEntity.Export
%default total
|
Formal statement is: lemma abs_triangle_half_r: fixes y :: "'a::linordered_field" shows "abs (y - x1) < e / 2 \<Longrightarrow> abs (y - x2) < e / 2 \<Longrightarrow> abs (x1 - x2) < e" Informal statement is: If $|y - x_1| < \frac{e}{2}$ and $|y - x_2| < \frac{e}{2}$, then $|x_1 - x_2| < e$. |
module Min where
open import Data.Nat using (ℕ; zero; suc)
open import Algebra.Bundles using (CommutativeRing)
open import Algebra.Module.Bundles using (Module)
open import Data.Product using (Σ-syntax; ∃-syntax; _×_; proj₁; proj₂; _,_)
open import Data.Sum using (_⊎_; inj₁; inj₂)
open import Relation.Binary.Core using (Rel)
import Algebra.Module.Construct.Zero as Zero
import Algebra.Module.Construct.DirectProduct as Prod
import Algebra.Module.Construct.TensorUnit as Unit
module _
{r ℓr} {CR : CommutativeRing r ℓr}
{ma ℓma} (MA : Module CR ma ℓma)
{mb ℓmb} (MB : Module CR mb ℓmb)
where
open import Linear MA MB
module _
{rel} (_<_ : Rel (CommutativeRing.Carrier CR) rel)
(∥_∥ᴬ : A → R)
(∥_∥ᴮ : B → R)
(_÷_ : B → R → B)
where
_LimitOf_x→_ Limit-syntax : (L : B) (f : A → B) (c : A)→ Set _
L LimitOf f x→ c = ∀ ε → ∃[ δ ] ∥ (f (c +ᴬ δ) -ᴮ L) ∥ᴮ < ∥ ε ∥ᴮ
Limit-syntax = _LimitOf_x→_
syntax Limit-syntax L (λ h → f) c = L LimitOf f ∶ h ⟶ c
Diff' : (f : A → B) (x : A) (f' : A → A → B) → Set _
Diff' f x f' = 0ᴮ LimitOf tmp dx ∶ dx ⟶ 0ᴬ
where
tmp : (dx : A) → B
tmp dx = (f (x +ᴬ dx) -ᴮ f x -ᴮ f' x dx) ÷ ∥ dx ∥ᴬ
Differentiable : (f : A → B) → A → Set _
Differentiable f x =
Σ[ f' ∈ (A → A → B) ]
Linear (f' x)
-- × ∀ dy → ∃[ dx ] (∥ ((f (x +ᴬ dx) -ᴮ f x) -ᴮ (f' x dx)) ∥ᴮ) < (∥ dy ∥ᴮ)
D : {f : A → B} {x : A} (d : Differentiable f x) → A → B
D {x = x} d = proj₁ d x
n-module : ∀ {a b} {S : CommutativeRing a b} → ℕ → Module S a b
n-module zero = Zero.⟨module⟩
n-module (suc n) = Prod.⟨module⟩ Unit.⟨module⟩ (n-module n)
open import Tactic.RingSolver
module Line {a b} (CR : CommutativeRing a b) where
open CommutativeRing CR
open import Relation.Binary.Reasoning.Setoid setoid
open import Tactic.RingSolver.Core.AlmostCommutativeRing
line : Carrier → Carrier → Carrier → Carrier
line m b x = m * x + b
x+y-x≈y : ∀ x y → (x + y) - x ≈ y
x+y-x≈y x y =
begin
(x + y) - x
≈⟨ +-assoc x y (- x) ⟩
x + (y - x)
≈⟨ +-cong refl (+-comm y (- x)) ⟩
x + (- x + y)
≈⟨ sym (+-assoc x (- x) y) ⟩
(x - x) + y
≈⟨ +-cong (proj₂ -‿inverse x) refl ⟩
0# + y
≈⟨ +-identityˡ y ⟩
y
∎
x+y+z≈x+z+y : ∀ x y z → x + y + z ≈ x + z + y
x+y+z≈x+z+y x y z =
begin
(x + y) + z
≈⟨ +-assoc x y z ⟩
x + (y + z)
≈⟨ +-cong refl (+-comm y z) ⟩
x + (z + y)
≈⟨ sym (+-assoc x z y) ⟩
(x + z) + y
∎
linear-diff : ∀ m b x dy → line m b (x + dy) - line m b x ≈ m * dy
linear-diff m b x dy =
begin
m * (x + dy) + b - (m * x + b)
≈⟨ +-cong (+-cong (distribˡ m x dy) refl) refl ⟩
m * x + m * dy + b - (m * x + b)
≈⟨ +-cong (x+y+z≈x+z+y (m * x) (m * dy) b) refl ⟩
m * x + b + m * dy - (m * x + b)
≈⟨ x+y-x≈y (m * x + b) (m * dy) ⟩
m * dy
∎
|
module Proof where
open import Prelude
open import Lambda
open import Subst
open import Trans
open import Reduction
import Chain
open module C = Chain _≤_ (\x -> refl-≤) (\x y z -> trans-≤)
renaming (_===_by_ to _<≤>_by_)
data SN {Γ : Ctx}{τ : Type}(t : Term Γ τ) : Set where
bound : (n : Nat) ->
({u : Term Γ τ}(r : t ⟶β* u) -> length r ≤ n) -> SN t
SNˢ : forall {Γ Δ} -> Terms Γ Δ -> Set
SNˢ ts = All² SN ts
-- Let's prove a simple lemma
lem-SN⟶β : {Γ : Ctx}{τ : Type}{t u : Term Γ τ} ->
SN t -> t ⟶β* u -> SN u
lem-SN⟶β {Γ}{τ}{t}{u}(bound n cap) r = bound n \r' ->
chain> length r'
<≤> length r + length r' by lem-≤+L (length r)
<≤> length (r ▹◃ r') by refl-≤' (lem-length▹◃ r r')
<≤> n by cap (r ▹◃ r')
qed
lem-SN-map : {Γ Δ : Ctx}{σ τ : Type}
(tm : Term Γ σ -> Term Δ τ) ->
(f : {t u : Term Γ σ} -> t ⟶β u -> tm t ⟶β tm u)
{t : Term Γ σ} -> SN (tm t) -> SN t
lem-SN-map tm f (bound n p) = bound n \r ->
chain> length r
<≤> length {R = _⟶β_} (map tm f r)
by refl-≤' (lem-length-map tm f r)
<≤> n by p (map tm f r)
qed
lem-SN•L : {Γ : Ctx}{σ τ : Type}{t : Term Γ (σ ⟶ τ)}{u : Term Γ σ} ->
SN (t • u) -> SN t
lem-SN•L {u = u} = lem-SN-map (\v -> v • u) •⟶L
lem-SN↑ : {Γ : Ctx}(Δ : Ctx){σ : Type}{t : Term Γ σ} ->
SN (t ↑ Δ) -> SN t
lem-SN↑ Δ = lem-SN-map (\v -> v ↑ Δ) (↑⟶β Δ)
lem-SN-x : {Γ Δ : Ctx}{σ : Type}(x : Var Γ (Δ ⇒ σ))
{ts : Terms Γ Δ} -> SNˢ ts -> SN (var x •ˢ ts)
lem-SN-x x ∅² = bound zero red-var
where
red-var : forall {u} -> (r : var x ⟶β* u) -> length r ≤ 0
red-var ()
lem-SN-x x (_◄²_ {x = t}{xs = ts} snts snt) = {! !}
where
sn-xts : SN (var x •ˢ ts)
sn-xts = lem-SN-x x snts
infix 30 ⟦_⟧ ∋_
⟦_⟧ ∋_ : (τ : Type){Γ : Ctx} -> Term Γ τ -> Set
⟦ ι ⟧ ∋ t = SN t
⟦ σ ⟶ τ ⟧ ∋ t = forall {Δ}(u : Term (_ ++ Δ) σ) ->
⟦ σ ⟧ ∋ u -> ⟦ τ ⟧ ∋ t ↑ Δ • u
mutual
lem-⟦⟧⊆SN : (σ : Type){Γ : Ctx}{t : Term Γ σ} ->
⟦ σ ⟧ ∋ t -> SN t
lem-⟦⟧⊆SN ι okt = okt
lem-⟦⟧⊆SN (σ ⟶ τ) {Γ}{t} okt = lem-SN↑ (ε , σ) sn-t↑
where
ih : {Δ : Ctx}{u : Term Δ τ} -> ⟦ τ ⟧ ∋ u -> SN u
ih = lem-⟦⟧⊆SN τ
sn• : (Δ : Ctx)(u : Term (Γ ++ Δ) σ) -> ⟦ σ ⟧ ∋ u -> SN (t ↑ Δ • u)
sn• Δ u h = ih (okt {Δ} u h)
sn-t↑ : SN (wk t)
sn-t↑ = lem-SN•L (sn• (ε , σ) vz (lem-⟦⟧ˣ σ vzero ∅²))
lem-⟦⟧ˣ : (σ : Type){Γ Δ : Ctx}(x : Var Γ (Δ ⇒ σ)){ts : Terms Γ Δ} ->
SNˢ ts -> ⟦ σ ⟧ ∋ var x •ˢ ts
lem-⟦⟧ˣ ι x snts = lem-SN-x x snts
lem-⟦⟧ˣ (σ ⟶ τ) {Γ}{Δ} x {ts} snts = \u oku -> {! !}
where
snts↑ : (Δ : Ctx) -> SNˢ (ts ↑ˢ Δ)
snts↑ Δ = {! !}
rem : (Δ : Ctx)(u : Term (Γ ++ Δ) σ) ->
⟦ σ ⟧ ∋ u -> ⟦ τ ⟧ ∋ var (x ↑ˣ Δ) •ˢ ts ↑ˢ Δ • u
rem Δ u oku = lem-⟦⟧ˣ τ (x ↑ˣ Δ) (snts↑ Δ ◄² lem-⟦⟧⊆SN σ oku)
lem-⟦⟧subst : {Γ Δ : Ctx}{τ : Type}(σ : Type)
{t : Term (Γ , τ) (Δ ⇒ σ)}{u : Term Γ τ}{vs : Terms Γ Δ} ->
⟦ σ ⟧ ∋ (t / [ u ]) •ˢ vs -> ⟦ σ ⟧ ∋ (ƛ t) • u •ˢ vs
lem-⟦⟧subst ι h = {!h !}
lem-⟦⟧subst (σ₁ ⟶ σ₂) h = {! !}
|
-- Andreas, 2019-07-05, during work on issue #3889
-- Test-case for with extracted from the standard library
{-# OPTIONS --cubical-compatible #-}
open import Agda.Primitive
open import Agda.Builtin.Equality
open import Agda.Builtin.List
open import Common.Equality
open import Common.Product
data Any {a}{A : Set a} {p} (P : A → Set p) : List A → Set (a ⊔ p) where
here : ∀ {x xs} (px : P x) → Any P (x ∷ xs)
there : ∀ {x xs} (pxs : Any P xs) → Any P (x ∷ xs)
_∈_ : ∀{a}{A : Set a} → A → List A → Set _
x ∈ xs = Any (x ≡_) xs
map : ∀ {a} {A : Set a} {p q} {P : A → Set p} {Q : A → Set q} → (∀{x} → P x → Q x) → ∀{xs} → Any P xs → Any Q xs
map g (here px) = here (g px)
map g (there pxs) = there (map g pxs)
map₁ : ∀ {a b c} {A : Set a} {B : Set b} {C : Set c} →
(A → B) → A × C → B × C
map₁ f (x , y)= f x , y
map₂ : ∀ {a b c} {A : Set a} {B : A → Set b} {C : A → Set c} →
(∀ {x} → B x → C x) → Σ A B → Σ A C
map₂ f (x , y) = (x , f y)
find : ∀ {a} {p} {A : Set a} {P : A → Set p} {xs} → Any P xs → Σ _ λ x → x ∈ xs × P x
find (here px) = (_ , here refl , px)
find (there pxs) = map₂ (map₁ there) (find pxs)
lose : ∀ {a} {p} {A : Set a} {P : A → Set p} {x xs} → x ∈ xs → P x → Any P xs
lose x∈xs px = map (λ eq → subst _ eq px) x∈xs
map∘find : ∀ {a p} {A : Set a} {P : A → Set p} {xs}
(p : Any P xs) → let p′ = find p in
{f : ∀{x} → proj₁ p′ ≡ x → P x} →
f refl ≡ proj₂ (proj₂ p′) →
map f (proj₁ (proj₂ p′)) ≡ p
map∘find (here p) hyp = cong here hyp
map∘find (there p) hyp = cong there (map∘find p hyp)
find∘map : ∀ {a p q} {A : Set a} {P : A → Set p} {Q : A → Set q}
{xs : List A} (p : Any P xs) (f : ∀{x} → P x → Q x) →
find (map f p) ≡ map₂ (map₂ f) (find p)
find∘map (here p) f = refl
find∘map (there p) f rewrite find∘map p f = refl
lose∘find : ∀ {a p} {A : Set a} {P : A → Set p} {xs : List A}
(p : Any P xs) →
(let (y , z) = proj₂ (find p)) →
lose y z ≡ p
lose∘find p = map∘find p refl
postulate
a b ℓ : Level
A : Set a
B : Set b
P : A → Set ℓ
Q : B → Set ℓ -- Level needed
Any-×⁺ : ∀ {xs ys} → Any P xs × Any Q ys →
Any (λ x → Any (λ y → P x × Q y) ys) xs
Any-×⁺ (p , q) = map (λ p → map (λ q → (p , q)) q) p
Any-×⁻ : ∀ {xs ys} → Any (λ x → Any (λ y → P x × Q y) ys) xs →
Any P xs × Any Q ys
Any-×⁻ pq with map₂ (map₂ find) (find pq)
... | (x , x∈xs , y , y∈ys , p , q) = (lose x∈xs p , lose y∈ys q)
module _ where
from∘to : ∀{xs ys} (pq : Any P xs × Any Q ys) → Any-×⁻ (Any-×⁺ pq) ≡ pq
-- from∘to (p , q) = {!!}
from∘to (p , q) rewrite
find∘map p (λ p → map (λ q → (p , q)) q)
| find∘map q (λ q → proj₂ (proj₂ (find p)) , q)
| lose∘find p
| lose∘find q
= refl
|
great(jj1,kk1).
great(y1,ll1).
great(c1,y1).
great(g1,ll1).
great(u1,b1).
great(u1,n1).
great(f1,o1).
great(f1,h1).
great(q1,cc1).
great(c1,ii1).
great(i1,v1).
great(c1,aa1).
great(e1,ll1).
great(e1,w1).
great(o1,n1).
great(ii1,t1).
great(r1,ee1).
great(q1,o1).
great(dd1,bb1).
great(l1,w1).
great(k1,ll1).
great(b1,p1).
great(ff1,n1).
great(bb1,ll1).
great(z1,ee1).
great(ii1,cc1).
great(y1,p1).
great(ff1,aa1).
great(f1,aa1).
great(jj1,ee1).
great(f1,dd1).
great(jj1,v1).
great(l1,dd1).
great(e1,q1).
great(ii1,g1).
great(a1,y1).
great(d1,q1).
great(f1,x1).
great(dd1,aa1).
great(d1,i1).
great(r1,x1).
great(e1,j1).
great(h1,g1).
great(c1,ee1).
great(b1,t1).
great(f1,kk1).
great(ee1,t1).
great(x1,o1).
great(l1,ll1).
great(ee1,bb1).
great(r1,dd1).
great(c1,q1).
great(aa1,t1).
great(z1,o1).
great(kk1,b1).
great(g1,n1).
great(t1,o1).
great(h1,i1).
great(dd1,ee1).
great(i1,m1).
great(h1,n1).
great(aa1,ll1).
great(v1,ll1).
great(d1,x1).
great(jj1,bb1).
great(s1,w1).
|
section \<open>Recursive inseperability\<close>
theory Recursive_Inseparability
imports "Recursion-Theory-I.RecEnSet"
begin
text \<open>Two sets $A$ and $B$ are recursively inseparable if there is no computable set that
contains $A$ and is disjoint from $B$. In particular, a set is computable if the set and its
complement are recursively inseparable. The terminology was introduced by Smullyan~@{cite R58}.
The underlying idea can be traced back to Rosser, who essentially showed that provable and
disprovable sentences are \emph{arithmetically} inseparable in Peano Arithmetic~@{cite R36};
see also Kleene's symmetric version of Gödel's incompleteness theorem~@{cite K52}.
Here we formalize recursive inseparability on top of the \texttt{Recursion-Theory-I} AFP
entry~@{cite RTI}. Our main result is a version of Rice' theorem that states that the index
sets of any two given recursively enumerable sets are recursively inseparable.\<close>
subsection \<open>Definition and basic facts\<close>
text \<open>Two sets $A$ and $B$ are recursively inseparable if there are no decidable sets $X$ such
that $A$ is a subset of $X$ and $X$ is disjoint from $B$.\<close>
definition rec_inseparable where
"rec_inseparable A B \<equiv> \<forall>X. A \<subseteq> X \<and> B \<subseteq> - X \<longrightarrow> \<not> computable X"
lemma rec_inseparableI:
"(\<And>X. A \<subseteq> X \<Longrightarrow> B \<subseteq> - X \<Longrightarrow> computable X \<Longrightarrow> False) \<Longrightarrow> rec_inseparable A B"
unfolding rec_inseparable_def by blast
lemma rec_inseparableD:
"rec_inseparable A B \<Longrightarrow> A \<subseteq> X \<Longrightarrow> B \<subseteq> - X \<Longrightarrow> computable X \<Longrightarrow> False"
unfolding rec_inseparable_def by blast
text \<open>Recursive inseperability is symmetric and enjoys a monotonicity property.\<close>
lemma rec_inseparable_symmetric:
"rec_inseparable A B \<Longrightarrow> rec_inseparable B A"
unfolding rec_inseparable_def computable_def by (metis double_compl)
lemma rec_inseparable_mono:
"rec_inseparable A B \<Longrightarrow> A \<subseteq> A' \<Longrightarrow> B \<subseteq> B' \<Longrightarrow> rec_inseparable A' B'"
unfolding rec_inseparable_def by (meson subset_trans)
text \<open>Many-to-one reductions apply to recursive inseparability as well.\<close>
lemma rec_inseparable_many_reducible:
assumes "total_recursive f" "rec_inseparable (f -` A) (f -` B)"
shows "rec_inseparable A B"
proof (intro rec_inseparableI)
fix X assume "A \<subseteq> X" "B \<subseteq> - X" "computable X"
moreover have "many_reducible_to (f -` X) X" using assms(1)
by (auto simp: many_reducible_to_def many_reducible_to_via_def)
ultimately have "computable (f -` X)" and "(f -` A) \<subseteq> (f -` X)" and "(f -` B) \<subseteq> - (f -` X)"
by (auto dest!: m_red_to_comp)
then show "False" using assms(2) unfolding rec_inseparable_def by blast
qed
text \<open>Recursive inseparability of $A$ and $B$ holds vacuously if $A$ and $B$ are not disjoint.\<close>
lemma rec_inseparable_collapse:
"A \<inter> B \<noteq> {} \<Longrightarrow> rec_inseparable A B"
by (auto simp: rec_inseparable_def)
text \<open>Recursive inseparability is intimately connected to non-computability.\<close>
lemma rec_inseparable_non_computable:
"A \<inter> B = {} \<Longrightarrow> rec_inseparable A B \<Longrightarrow> \<not> computable A"
by (auto simp: rec_inseparable_def)
lemma computable_rec_inseparable_conv:
"computable A \<longleftrightarrow> \<not> rec_inseparable A (- A)"
by (auto simp: computable_def rec_inseparable_def)
subsection \<open>Rice's theorem\<close>
text \<open>We provide a stronger version of Rice's theorem compared to @{cite RTI}.
Unfolding the definition of recursive inseparability, it states that there are no decidable
sets $X$ such that
\begin{itemize}
\item there is a r.e.\ set such that all its indices are elements of $X$; and
\item there is a r.e.\ set such that none of its indices are elements of $X$.
\end{itemize}
This is true even if $X$ is not an index set (i.e., if an index of a r.e.\ set is an element
of $X$, then $X$ contains all indices of that r.e.\ set), which is a requirement of Rice's
theorem in @{cite RTI}.\<close>
lemma Rice_rec_inseparable:
"rec_inseparable {k. nat_to_ce_set k = nat_to_ce_set n} {k. nat_to_ce_set k = nat_to_ce_set m}"
proof (intro rec_inseparableI, goal_cases)
case (1 X)
text \<open>Note that @{thm Rice_2} is not applicable because X may not be an index set.\<close>
let ?Q = "{q. s_ce q q \<in> X} \<times> nat_to_ce_set m \<union> {q. s_ce q q \<in> - X} \<times> nat_to_ce_set n"
have "?Q \<in> ce_rels"
using 1(3) ce_set_lm_5 comp2_1[OF s_ce_is_pr id1_1 id1_1] unfolding computable_def
by (intro ce_union[of "ce_rel_to_set _" "ce_rel_to_set _", folded ce_rel_lm_32 ce_rel_lm_8]
ce_rel_lm_29 nat_to_ce_set_into_ce) blast+
then obtain q where "nat_to_ce_set q = {c_pair q x |q x. (q, x) \<in> ?Q}"
unfolding ce_rel_lm_8 ce_rel_to_set_def by (metis (no_types, lifting) nat_to_ce_set_srj)
from eqset_imp_iff[OF this, of "c_pair q _"]
have "nat_to_ce_set (s_ce q q) = (if s_ce q q \<in> X then nat_to_ce_set m else nat_to_ce_set n)"
by (auto simp: s_lm c_pair_inj' nat_to_ce_set_def fn_to_set_def pr_conv_1_to_2_def)
then show ?case using 1(1,2)[THEN subsetD, of "s_ce q q"] by (auto split: if_splits)
qed
end |
theory "HoareTripleForStorage"
imports "HoareTripleForInstructions"
begin
lemma storage_inst_advance [simp] :
"program_content (cctx_program co_ctx) (vctx_pc x1) = Some (Storage m) \<Longrightarrow>
k = vctx_pc x1 \<Longrightarrow>
vctx_pc (vctx_advance_pc co_ctx x1) = vctx_pc x1 + 1"
by (simp add: vctx_next_instruction_def
vctx_advance_pc_def inst_size_def inst_code.simps)
lemma update_storage_preserves_pc [simp] :
"vctx_pc (vctx_update_storage idx new x1) = vctx_pc x1"
by (simp add: vctx_update_storage_def)
lemma update_storage_updates [simp] :
"vctx_storage (vctx_update_storage idx new x1) idx = new"
by (simp add: vctx_update_storage_def)
lemma update_storage_preserves_gas [simp] :
"vctx_gas (vctx_update_storage idx new x1) = vctx_gas x1"
by (simp add: vctx_update_storage_def)
lemma some_list_gotcha :
" rev ta ! fst x2 = snd x2 \<longrightarrow> \<not> fst x2 < length ta \<Longrightarrow>
x2 \<noteq> (Suc (length ta), idx) \<Longrightarrow>
x2 \<noteq> (length ta, new) \<Longrightarrow>
(fst x2 < length ta \<and> rev ta ! fst x2 = snd x2 \<or>
length ta \<le> fst x2 \<and> [new, idx] ! (fst x2 - length ta) = snd x2) \<and>
fst x2 < Suc (Suc (length ta)) \<Longrightarrow>
elm = StackElm x2 \<Longrightarrow> False"
apply(case_tac x2; auto)
apply(case_tac "a - length ta"; simp)
apply(rename_tac smaller)
apply(case_tac smaller; simp)
apply(case_tac "a - length ta"; simp)
apply(rename_tac smaller)
apply(case_tac smaller; simp)
done
lemma next_state_noop[simp]:
"next_state stopper c net (InstructionToEnvironment x y z) = (InstructionToEnvironment x y z)"
by (simp add: next_state_def)+
lemmas hoare_simps = stateelm_means_simps stateelm_equiv_simps
next_state_def rev_nth_simps instruction_sem_simps gas_value_simps
inst_numbers_simps instruction_failure_result_def
advance_pc_simps
method hoare_sep uses sep simp dest split =
((sep_simp simp: sep)+,
clarsimp simp: simp dest:dest split:split)
lemma sstore_gas_triple :
"triple net {OutOfGas}
(\<langle> h \<le> 1024\<rangle>
** stack_height (h + 2)
** stack (h + 1) idx
** stack h new
** program_counter k ** storage idx old
** gas_pred g ** continuing)
{(k, Storage SSTORE)}
(stack_height h
** program_counter (k + 1) ** storage idx new **
gas_pred (g - Csstore old new) ** continuing)"
apply (clarsimp simp: triple_def)
apply(rule_tac x = 1 in exI)
apply (clarsimp simp add: program_sem.simps next_state_def failed_for_reasons_def)
apply(case_tac presult ; (solves \<open>(hoare_sep sep: evm_sep simp: stateelm_means_simps dest: stateelm_dest)\<close>) ?)
apply (hoare_sep sep: evm_sep
simp: instruction_result_as_set_def sstore_def
vctx_update_storage_def hoare_simps
split:if_splits)
apply (erule_tac P=rest in back_subst)
apply(rule Set.equalityI; clarify)
apply(rename_tac elm)
apply(simp add: vctx_update_storage_def)
apply (case_tac elm; simp add: hoare_simps split:if_splits)
using some_list_gotcha gasprice_advance_pc apply blast
apply(rename_tac elm)
apply (simp add: set_diff_eq)
apply (case_tac elm; simp add: hoare_simps split:if_splits)
apply auto
done
lemma sload_gas_triple :
"triple net {OutOfGas}
(\<langle> h \<le> 1023 \<and> unat bn \<ge> 2463000 \<and> at_least_eip150 net\<rangle>
** block_number_pred bn ** stack_height (h + 1)
** stack h idx
** program_counter k ** storage idx w ** gas_pred g ** account_existence c existence ** continuing)
{(k, Storage SLOAD)}
(block_number_pred bn ** stack_height (h + 1) ** stack h w
** program_counter (k + 1) ** storage idx w ** gas_pred (g - Gsload net) ** account_existence c existence ** continuing )"
apply(clarsimp simp add: triple_def)
apply(rule_tac x = 1 in exI)
apply(clarsimp simp add: program_sem.simps next_state_def failed_for_reasons_def)
apply(case_tac presult; (solves \<open>(hoare_sep sep: evm_sep simp: set_diff_eq stateelm_means_simps dest: stateelm_dest)\<close>)?)
apply clarsimp
apply(hoare_sep sep: evm_sep
simp: instruction_result_as_set_def sstore_def
vctx_update_storage_def hoare_simps set_diff_eq
split:if_split_asm)
apply(erule_tac P=rest in back_subst)
apply(rule Set.equalityI; clarify)
apply(simp)
apply(rename_tac elm)
apply(case_tac elm; simp add: hoare_simps split:if_splits)
apply(rename_tac pair)
apply(case_tac pair; fastforce)
apply(simp)
apply(rename_tac elm)
apply(case_tac elm; simp add: hoare_simps split:if_splits)
apply(rename_tac pair)
apply(case_tac pair; fastforce)
done
end |
[STATEMENT]
lemma frechet_derivative_zero_fun: "frechet_derivative 0 (at a) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. frechet_derivative 0 (at a) = 0
[PROOF STEP]
by (auto simp: frechet_derivative_const zero_fun_def) |
lemma Zfun_diff: "Zfun f F \<Longrightarrow> Zfun g F \<Longrightarrow> Zfun (\<lambda>x. f x - g x) F" |
! { dg-do compile }
! PR fortran/51993
! Code contributed by Sebastien Bardeau <bardeau at iram dot fr>
module mymod
type :: mytyp
character(len=3) :: a = .true. ! { dg-error "convert LOGICAL" }
end type mytyp
end module mymod
|
# Method
# 1. Argument reduction: Reduce x to an r so that |r| <= 0.5*log10(2). Given x,
# find r and integer k such that
#
# x = k*log10(2) + r, |r| <= 0.5*log10(2).
#
# 2. Approximate exp10(r) by a polynomial on the interval [-0.5*log10(2), 0.5*log10(2)]:
#
# exp10(x) = 1.0 + polynomial(x),
#
# sup norm relative error within the interval of the polynomial approximations:
# Float64 : [2.7245504724394698952e-18; 2.7245529895753476720e-18]
# Float32 : [9.6026471477842205871e-10; 9.6026560194009888672e-10]
#
# 3. Scale back: exp10(x) = 2^k * exp10(r)
# log2(10)
const LOG2_10 = 3.321928094887362347870319429489390175864831393024580612054756395815934776608624
# log10(2)
const LOG10_2 = 3.010299956639811952137388947244930267681898814621085413104274611271081892744238e-01
# log(10)
const LN10 = 2.302585092994045684017991454684364207601101488628772976033327900967572609677367
# log10(2) into upper and lower bits
LOG10_2U(::Type{Float64}) = 3.01025390625000000000e-1
LOG10_2U(::Type{Float32}) = 3.00781250000000000000f-1
LOG10_2L(::Type{Float64}) = 4.60503898119521373889e-6
LOG10_2L(::Type{Float32}) = 2.48745663981195213739f-4
# max and min arguments
MAX_EXP10(::Type{Float64}) = 3.08254715559916743851e2 # log 2^1023*(2-2^-52)
MAX_EXP10(::Type{Float32}) = 38.531839419103626f0 # log 2^127 *(2-2^-23)
# one less than the min exponent since we can sqeeze a bit more from the exp10 function
MIN_EXP10(::Type{Float64}) = -3.23607245338779784854769e2 # log10 2^-1075
MIN_EXP10(::Type{Float32}) = -45.15449934959718f0 # log10 2^-150
@inline exp10_kernel(x::Float64) =
@horner(x, 1.0,
2.30258509299404590109361379290930926799774169921875,
2.6509490552391992146397114993305876851081848144531,
2.03467859229323178027470930828712880611419677734375,
1.17125514891212478829629617393948137760162353515625,
0.53938292928868392106522833273629657924175262451172,
0.20699584873167015119932443667494226247072219848633,
6.8089348259156870502017966373387025669217109680176e-2,
1.9597690535095281527677713029333972372114658355713e-2,
5.015553121397981796436571499953060992993414402008e-3,
1.15474960721768829356725927226534622604958713054657e-3,
1.55440426715227567738830671828509366605430841445923e-4,
3.8731032432074128681303432086835414338565897196531e-5,
2.3804466459036747669197886523306806338950991630554e-3,
9.3881392238209649520573607528461934634833596646786e-5,
-2.64330486232183387018679354696359951049089431762695e-2)
@inline exp10_kernel(x::Float32) =
@horner(x, 1.0f0,
2.302585124969482421875f0,
2.650949001312255859375f0,
2.0346698760986328125f0,
1.17125606536865234375f0,
0.5400512218475341796875f0,
0.20749187469482421875f0,
5.2789829671382904052734375f-2)
@eval exp10_small_thres(::Type{Float64}) = $(2.0^-29)
@eval exp10_small_thres(::Type{Float32}) = $(2.0f0^-14)
"""
exp10(x)
Compute ``10^x``.
# Examples
```jldoctest
julia> exp10(2)
100.0
julia> exp10(0.2)
1.5848931924611136
```
"""
function exp10(x::T) where T<:Union{Float32,Float64}
xa = reinterpret(Unsigned, x) & ~sign_mask(T)
xsb = signbit(x)
# filter out non-finite arguments
if xa > reinterpret(Unsigned, MAX_EXP10(T))
if xa >= exponent_mask(T)
xa & significand_mask(T) != 0 && return T(NaN)
return xsb ? T(0.0) : T(Inf) # exp10(+-Inf)
end
x > MAX_EXP10(T) && return T(Inf)
x < MIN_EXP10(T) && return T(0.0)
end
# compute approximation
if xa > reinterpret(Unsigned, T(0.5)*T(LOG10_2)) # |x| > 0.5 log10(2).
# argument reduction
if xa < reinterpret(Unsigned, T(1.5)*T(LOG10_2)) # |x| <= 1.5 log10(2)
if xsb
k = -1
r = LOG10_2U(T) + x
r = LOG10_2L(T) + r
else
k = 1
r = x - LOG10_2U(T)
r = r - LOG10_2L(T)
end
else
n = round(T(LOG2_10)*x)
k = unsafe_trunc(Int,n)
r = muladd(n, -LOG10_2U(T), x)
r = muladd(n, -LOG10_2L(T), r)
end
# compute approximation on reduced argument
y = exp10_kernel(r)
# scale back
if k > -significand_bits(T)
# multiply by 2.0 first to prevent overflow, extending the range
k == exponent_max(T) && return y * T(2.0) * T(2.0)^(exponent_max(T) - 1)
twopk = reinterpret(T, rem(exponent_bias(T) + k, fpinttype(T)) << significand_bits(T))
return y*twopk
else
# add significand_bits(T) + 1 to lift the range outside the subnormals
twopk = reinterpret(T, rem(exponent_bias(T) + significand_bits(T) + 1 + k, fpinttype(T)) << significand_bits(T))
return y * twopk * T(2.0)^(-significand_bits(T) - 1)
end
elseif xa < reinterpret(Unsigned, exp10_small_thres(T)) # |x| < exp10_small_thres
# Taylor approximation for small values: exp10(x) ≈ 1.0 + log(10)*x
return muladd(x, T(LN10), T(1.0))
else
# primary range with k = 0, so compute approximation directly
return exp10_kernel(x)
end
end
|
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020 - 2022 Pionix GmbH and Contributors to EVerest
#ifndef OCPP1_6_SETCHARGINGPROFILE_HPP
#define OCPP1_6_SETCHARGINGPROFILE_HPP
#include <boost/optional.hpp>
#include <ocpp1_6/enums.hpp>
#include <ocpp1_6/ocpp_types.hpp>
#include <ocpp1_6/types.hpp>
namespace ocpp1_6 {
/// \brief Contains a OCPP 1.6 SetChargingProfile message
struct SetChargingProfileRequest : public Message {
int32_t connectorId;
ChargingProfile csChargingProfiles;
/// \brief Provides the type of this SetChargingProfile message as a human readable string
/// \returns the message type as a human readable string
std::string get_type() const;
};
/// \brief Conversion from a given SetChargingProfileRequest \p k to a given json object \p j
void to_json(json& j, const SetChargingProfileRequest& k);
/// \brief Conversion from a given json object \p j to a given SetChargingProfileRequest \p k
void from_json(const json& j, SetChargingProfileRequest& k);
/// \brief Writes the string representation of the given SetChargingProfileRequest \p k to the given output stream \p os
/// \returns an output stream with the SetChargingProfileRequest written to
std::ostream& operator<<(std::ostream& os, const SetChargingProfileRequest& k);
/// \brief Contains a OCPP 1.6 SetChargingProfileResponse message
struct SetChargingProfileResponse : public Message {
ChargingProfileStatus status;
/// \brief Provides the type of this SetChargingProfileResponse message as a human readable string
/// \returns the message type as a human readable string
std::string get_type() const;
};
/// \brief Conversion from a given SetChargingProfileResponse \p k to a given json object \p j
void to_json(json& j, const SetChargingProfileResponse& k);
/// \brief Conversion from a given json object \p j to a given SetChargingProfileResponse \p k
void from_json(const json& j, SetChargingProfileResponse& k);
/// \brief Writes the string representation of the given SetChargingProfileResponse \p k to the given output stream \p
/// os \returns an output stream with the SetChargingProfileResponse written to
std::ostream& operator<<(std::ostream& os, const SetChargingProfileResponse& k);
} // namespace ocpp1_6
#endif // OCPP1_6_SETCHARGINGPROFILE_HPP
|
open import SOAS.Metatheory.Syntax
-- Metasubstitution operation
module SOAS.Metatheory.SecondOrder.Metasubstitution {T : Set}(Syn : Syntax {T}) where
open Syntax Syn
open import SOAS.Metatheory.FreeMonoid Syn
open import SOAS.Common
open import SOAS.Families.Core {T}
open import SOAS.Families.Build
open import SOAS.Context
open import SOAS.Variable
open import SOAS.Construction.Structure as Structure
open import SOAS.ContextMaps.Combinators
open import SOAS.ContextMaps.CategoryOfRenamings
open import SOAS.Abstract.Hom
open import SOAS.Abstract.ExpStrength
import SOAS.Abstract.Coalgebra as →□
open →□.Sorted
import SOAS.Abstract.Box as □ ; open □.Sorted
open import Categories.Monad
open import SOAS.Coalgebraic.Monoid
open import SOAS.Metatheory Syn
private
variable
Γ Δ Π : Ctx
α β τ : T
𝔛 𝔜 ℨ : Familyₛ
𝔐 𝔑 : MCtx
open Theory
-- Ground metasubstitution from the monad structure
msub₀ : (𝔛 ⇾̣ 𝕋 𝔜) → 𝕋 𝔛 ⇾̣ 𝕋 𝔜
msub₀ {𝔛}{𝔜} κ t = μ.η 𝔜 (F.₁ κ t) where open Monad ΣMon:Monad
-- Meta-algebra structure on the exponential [ 𝔛 ⊸ 𝒫 ] ⇨ ℳ
[_⊸_]⇨_ᵃ : (𝔛 {𝒫}{ℳ} : Familyₛ) → Coalg 𝒫 → ΣMon ℳ → (𝒫 ⇾̣ ℳ)
→ MetaAlg 𝔛 ([ 𝔛 ⊸ 𝒫 ] ⇨ ℳ)
[_⊸_]⇨_ᵃ 𝔛 {𝒫}{ℳ} 𝒫ᵇ Σℳᵐ ψ = record
{ 𝑎𝑙𝑔 = λ t ζ → ℳ.𝑎𝑙𝑔 (estr [ 𝔛 ⊸ 𝒫ᵇ ]ᵇ ℳ t ζ)
; 𝑣𝑎𝑟 = λ v ζ → ℳ.η v
; 𝑚𝑣𝑎𝑟 = λ 𝔪 ε ζ → ℳ.μ (ψ (ζ 𝔪)) (copair ℳ (λ x → ε x ζ) ℳ.η)
} where module ℳ = ΣMon Σℳᵐ
□[_⊸_]⇨_ᵃ : (𝔛 {𝒫}{ℳ} : Familyₛ) → Coalg 𝒫 → ΣMon ℳ → (𝒫 ⇾̣ ℳ)
→ MetaAlg 𝔛 ([ 𝔛 ⊸ 𝒫 ] ➡ ℳ)
□[ 𝔛 ⊸ 𝒫ᵇ ]⇨ Σℳᵐ ᵃ ψ = □ᵃ 𝔛 ([ 𝔛 ⊸ 𝒫ᵇ ]⇨ Σℳᵐ ᵃ ψ)
-- Derived meta-algebra instance for [ 𝔛 ⊸ 𝕋 𝔜 ] ⇨ 𝕋 𝔜
⟅_⇨_⟆ᵃ : (𝔛 𝔜 : Familyₛ) → MetaAlg 𝔛 ⟅ 𝔛 ⇨ 𝕋 𝔜 ⟆
⟅ 𝔛 ⇨ 𝔜 ⟆ᵃ = [ 𝔛 ⊸ 𝕋ᵇ 𝔜 ]⇨ Σ𝕋ᵐ 𝔜 ᵃ id
module MS {𝔛 𝔜 : Familyₛ} = Semantics 𝔛 ⟅ 𝔛 ⇨ 𝔜 ⟆ᵃ
module □MS {𝔛 𝔜 : Familyₛ} = □Traversal 𝔛 ⟅ 𝔛 ⇨ 𝔜 ⟆ᵃ
-- Metasubstitution operations
-- Base
msub : 𝕋 𝔛 ⇾̣ ⟅ 𝔛 ⇨ 𝕋 𝔜 ⟆
msub = MS.𝕤𝕖𝕞
-- Parametrised
□msub : 𝕋 𝔛 ⇾̣ ⟅ 𝔛 ➡ 𝕋 𝔜 ⟆
□msub = □MS.𝕥𝕣𝕒𝕧
-- Linear
○msub : 𝕋 𝔛 ⇾̣ ⟅ 𝔛 ⊸ 𝕋 𝔜 ⟆
○msub {𝔜 = 𝔜}{Γ = Γ} t ζ = □msub t (inl Γ) λ {_}{Π} 𝔪 → 𝕣𝕖𝕟 𝔜 (ζ 𝔪) (Π ∔∣ inr Γ)
-- Unit parametrisation
□msub-id : (t : 𝕋 𝔛 α Γ)(κ : [ 𝔛 ⊸ 𝕋 𝔜 ] Γ) → □msub t id κ ≡ msub t κ
□msub-id {𝔛}{𝔜 = 𝔜} t κ = cong (λ - → - κ) (□𝕥𝕣𝕒𝕧-id≈𝕤𝕖𝕞 𝔛 ⟅ 𝔛 ⇨ 𝔜 ⟆ᵃ)
-- Unit metasubstitution mapping
ms-unit : [ 𝔛 ⊸ 𝕋 𝔛 ] Γ
ms-unit {𝔛}{Δ = Δ} 𝔪 = 𝕞𝕧𝕒𝕣 𝔛 𝔪 (𝕧𝕒𝕣 𝔛 ∘ inl Δ)
-- | Inductive metasubstitution
-- List of terms in an extended (object variable) context mapped to every element of a metavariable context
data MSub (Γ : Ctx) : MCtx → MCtx → Set₁ where
◦ : MSub Γ ⁅⁆ 𝔑
_◃_ : (𝔑 ▷ 𝕋) α (Π ∔ Γ) → MSub Γ 𝔐 𝔑 → MSub Γ (⁅ Π ⊩ₙ α ⁆ 𝔐) 𝔑
infixr 15 _◃_ _▹_
-- Add term to the end of a metasubstitution map
_▹_ : MSub Γ 𝔐 𝔑 → (𝔑 ▷ 𝕋) τ (Π ∔ Γ) → MSub Γ (𝔐 ⁅ Π ⊩ₙ τ ⁆) 𝔑
◦ ▹ t = t ◃ ◦
(s ◃ ζ) ▹ t = s ◃ (ζ ▹ t)
-- Application of a metasubstitution to a metavariable
ix≀ : MSub Γ 𝔐 𝔑 → [ ∥ 𝔐 ∥ ⊸ 𝔑 ▷ 𝕋 ] Γ
ix≀ (t ◃ ζ) ↓ = t
ix≀ (t ◃ ζ) (↑ 𝔪) = ix≀ ζ 𝔪
-- Term corresponding to the topmost distinguished metavariable of an extended mvar context
_⊩◌ : (Π : Ctx) → (⁅ Π ⊩ₙ β ⁆ 𝔐 ▷ 𝕋) β (Π ∔ Γ)
_⊩◌ {β}{𝔐} Π = ms-unit ↓
◌ : (⁅ β ⁆ 𝔐 ▷ 𝕋) β Γ
◌ = ∅ ⊩◌
-- Weakening of metavariable context
wk≀ : (𝔐 ▷ 𝕋) α Γ → (⁅ Π ⊩ₙ τ ⁆ 𝔐 ▷ 𝕋) α Γ
wk≀ t = 𝕋₁ ↑_ t
-- Extension of the codomain of a metasubstitution
ext≀ : (Π : Ctx)(τ : T) → MSub Γ 𝔐 𝔑 → MSub Γ 𝔐 (⁅ Π ⊩ₙ τ ⁆ 𝔑)
ext≀ Π τ ◦ = ◦
ext≀ Π τ (t ◃ κ) = wk≀ t ◃ (ext≀ Π τ κ)
-- Lifting of a metasubstitution
lift≀ : (Π : Ctx)(τ : T) → MSub Γ 𝔐 𝔑 → MSub Γ (⁅ Π ⊩ₙ τ ⁆ 𝔐) (⁅ Π ⊩ₙ τ ⁆ 𝔑)
lift≀ Π τ κ = (Π ⊩◌) ◃ (ext≀ Π τ κ)
-- Identity metasubstitution
id≀ : (Γ : Ctx) → MSub Γ 𝔐 𝔐
id≀ {⁅⁆} Γ = ◦
id≀ {⁅ Π ⊩ₙ τ ⁆ 𝔐} Γ = lift≀ Π τ (id≀ Γ)
-- Left and right weakening of object context of a metasubstitution
inl≀ : MSub Γ 𝔐 𝔑 → MSub (Γ ∔ Δ) 𝔐 𝔑
inl≀ ◦ = ◦
inl≀ {𝔑 = 𝔑} (_◃_ {Π = Π} t κ) = 𝕣𝕖𝕟 ∥ 𝔑 ∥ t (Π ∔∣ inl _) ◃ (inl≀ κ)
inr≀ : (Γ : Ctx) → MSub Δ 𝔐 𝔑 → MSub (Γ ∔ Δ) 𝔐 𝔑
inr≀ _ ◦ = ◦
inr≀ {Δ}{𝔑 = 𝔑} Γ (_◃_ {Π = Π} t κ) = (𝕣𝕖𝕟 ∥ 𝔑 ∥ t (Π ∔∣ inr Γ)) ◃ (inr≀ Γ κ)
-- Application of weakened metasubstitution corresponds to centre weakening
ix-inr≀ : (κ : MSub Δ 𝔐 𝔑)(𝔪 : Π ⊩ τ ∈ 𝔐)
→ ix≀ (inr≀ Γ κ) 𝔪 ≡ (𝕣𝕖𝕟 ∥ 𝔑 ∥ (ix≀ κ 𝔪) (Π ∔∣ inr Γ))
ix-inr≀ (x ◃ κ) ↓ = refl
ix-inr≀ (x ◃ κ) (↑ 𝔪) = ix-inr≀ κ 𝔪
-- Correctness lemmas of weakening, lifting, identity
ext≀≈𝕋₁pop : (κ : MSub Γ 𝔐 𝔑)(𝔪 : Π ⊩ τ ∈ 𝔐) → ix≀ (ext≀ Δ β κ) 𝔪 ≡ wk≀ (ix≀ κ 𝔪)
ext≀≈𝕋₁pop (x ◃ κ) ↓ = refl
ext≀≈𝕋₁pop (x ◃ κ) (↑ 𝔪) = ext≀≈𝕋₁pop κ 𝔪
lift≀≈𝕋₁pop : (κ : MSub Γ 𝔐 𝔑)(𝔪 : Γ ⊩ α ∈ 𝔐) → ix≀ (lift≀ Γ α κ) (↑ 𝔪) ≡ wk≀ (ix≀ κ 𝔪)
lift≀≈𝕋₁pop (x ◃ κ) ↓ = refl
lift≀≈𝕋₁pop (x ◃ κ) (↑ 𝔪) = lift≀≈𝕋₁pop κ 𝔪
id≀≈ms-unit : (Γ : Ctx)(𝔪 : Π ⊩ τ ∈ 𝔐) → ix≀ (id≀ Γ) 𝔪 ≡ ms-unit 𝔪
id≀≈ms-unit {𝔐 = ⁅ Π ⊩ₙ τ ⁆ 𝔐} Γ ↓ = refl
id≀≈ms-unit {𝔐 = ⁅ Π ⊩ₙ τ ⁆ 𝔐} Γ (↑_ {Δ}{β}{Γ = .Π}{.τ} 𝔪) = begin
ix≀ (ext≀ Π τ (id≀ Γ)) 𝔪
≡⟨ ext≀≈𝕋₁pop (id≀ Γ) 𝔪 ⟩
wk≀ (ix≀ (id≀ Γ) 𝔪)
≡⟨ cong (wk≀) (id≀≈ms-unit Γ 𝔪) ⟩
wk≀ (ms-unit 𝔪)
≡⟨⟩
wk≀ (𝕞𝕧𝕒𝕣 ∥ 𝔐 ∥ 𝔪 (𝕧𝕒𝕣 ∥ 𝔐 ∥ ∘ ∔.i₁))
≡⟨ 𝕋₁∘𝕞𝕧𝕒𝕣[𝕧𝕒𝕣] ↑_ 𝔪 (∔.i₁) ⟩
𝕞𝕧𝕒𝕣 ∥ ⁅ Π ⊩ₙ τ ⁆ 𝔐 ∥ (↑ 𝔪) (𝕧𝕒𝕣 ∥ ⁅ Π ⊩ₙ τ ⁆ 𝔐 ∥ ∘ ∔.i₁)
∎ where open ≡-Reasoning
-- Inductive metasubstitution operations
-- Base
msub≀ : (𝔐 ▷ 𝕋) α Γ → MSub Γ 𝔐 𝔑 → (𝔑 ▷ 𝕋) α Γ
msub≀ t ζ = msub t (ix≀ ζ)
-- Parametrised
□msub≀ : (𝔐 ▷ 𝕋) α Γ → (Γ ↝ Δ) → MSub Δ 𝔐 𝔑 → (𝔑 ▷ 𝕋) α Δ
□msub≀ t ρ ζ = □msub t ρ (ix≀ ζ)
-- Linear
○msub≀ : (𝔐 ▷ 𝕋) α Γ → MSub Δ 𝔐 𝔑 → (𝔑 ▷ 𝕋) α (Γ ∔ Δ)
○msub≀ {Γ = Γ} t ζ = □msub≀ t (inl Γ) (inr≀ Γ ζ)
-- Syntactic sugar for metasubstitution application
_》 : (𝔑 ▷ 𝕋) α (Π ∔ Γ) → MSub Γ (⁅ Π ⊩ₙ α ⁆̣) 𝔑
t 》 = t ◃ ◦
_《_ : (𝔐 ▷ 𝕋) α Γ → MSub Γ 𝔐 𝔑 → (𝔑 ▷ 𝕋) α Γ
_《_ = msub≀
infixr 25 _》
infix 15 _《_
-- Instantiation of a term extended at the start of the context
instₛ : (⁅ Π ⊩ₙ α ⁆ 𝔐 ▷ 𝕋) β Γ → (𝔐 ▷ 𝕋) α (Π ∔ Γ) → (𝔐 ▷ 𝕋) β Γ
instₛ {Γ = Γ} h s = msub≀ h (s ◃ id≀ Γ)
-- Instantiation of a term extended at the end of the context
instₑ : (𝔐 ⁅ Π ⊩ₙ α ⁆ ▷ 𝕋) β Γ → (𝔐 ▷ 𝕋) α (Π ∔ Γ) → (𝔐 ▷ 𝕋) β Γ
instₑ {Γ = Γ} h s = msub≀ h ((id≀ Γ) ▹ s)
-- Instantiation of a term extended twice at the end of the context
instₑ₂ : {Π₁ Π₂ : Ctx}{α₁ α₂ : T}
→ ((𝔐 ⁅ Π₁ ⊩ₙ α₁ ⁆) ⁅ Π₂ ⊩ₙ α₂ ⁆ ▷ 𝕋) β Γ
→ (𝔐 ▷ 𝕋) α₁ (Π₁ ∔ Γ) → (𝔐 ▷ 𝕋) α₂ (Π₂ ∔ Γ) → (𝔐 ▷ 𝕋) β Γ
instₑ₂ {Γ = Γ} h s t = msub≀ h ((id≀ Γ ▹ s) ▹ t)
|
lemma tendsto_mult_left: "(f \<longlongrightarrow> l) F \<Longrightarrow> ((\<lambda>x. c * (f x)) \<longlongrightarrow> c * l) F" for c :: "'a::topological_semigroup_mult" |
[STATEMENT]
lemma cp_OclSelect: "(X->select\<^sub>S\<^sub>e\<^sub>t(a | P a)) \<tau> =
((\<lambda> _. X \<tau>)->select\<^sub>S\<^sub>e\<^sub>t(a | P a)) \<tau>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OclSelect X P \<tau> = OclSelect (\<lambda>_. X \<tau>) P \<tau>
[PROOF STEP]
by(simp add: OclSelect_def cp_defined[symmetric]) |
Formal statement is: lemma diff: "f \<in> R \<Longrightarrow> g \<in> R \<Longrightarrow> (\<lambda>x. f x - g x) \<in> R" Informal statement is: If $f$ and $g$ are Riemann integrable, then so is $f - g$. |
{-# OPTIONS --without-K --safe #-}
-- Quicksort
module Experiment.Induction where
-- agda-stdlib
open import Level
open import Data.List
open import Data.Product
open import Data.Nat as ℕ
open import Data.Nat.Induction as Ind
open import Relation.Binary as B
open import Relation.Unary as U
import Relation.Unary.Properties as Uₚ
open import Relation.Binary.PropositionalEquality as P using (_≡_)
open import Function.Base
open import Induction.WellFounded
private
variable
a p r : Level
|
State Before: R : Type u
L : Type v
L' : Type w₂
M : Type w
M' : Type w₁
inst✝¹² : CommRing R
inst✝¹¹ : LieRing L
inst✝¹⁰ : LieAlgebra R L
inst✝⁹ : LieRing L'
inst✝⁸ : LieAlgebra R L'
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M'
inst✝² : Module R M'
inst✝¹ : LieRingModule L M'
inst✝ : LieModule R L M'
f : L →ₗ⁅R⁆ L'
I : LieIdeal R L
J : LieIdeal R L'
x : L
⊢ ↑f x ∈ idealRange f State After: R : Type u
L : Type v
L' : Type w₂
M : Type w
M' : Type w₁
inst✝¹² : CommRing R
inst✝¹¹ : LieRing L
inst✝¹⁰ : LieAlgebra R L
inst✝⁹ : LieRing L'
inst✝⁸ : LieAlgebra R L'
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M'
inst✝² : Module R M'
inst✝¹ : LieRingModule L M'
inst✝ : LieModule R L M'
f : L →ₗ⁅R⁆ L'
I : LieIdeal R L
J : LieIdeal R L'
x : L
⊢ ↑f x ∈ LieIdeal.map f ⊤ Tactic: rw [idealRange_eq_map] State Before: R : Type u
L : Type v
L' : Type w₂
M : Type w
M' : Type w₁
inst✝¹² : CommRing R
inst✝¹¹ : LieRing L
inst✝¹⁰ : LieAlgebra R L
inst✝⁹ : LieRing L'
inst✝⁸ : LieAlgebra R L'
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M'
inst✝² : Module R M'
inst✝¹ : LieRingModule L M'
inst✝ : LieModule R L M'
f : L →ₗ⁅R⁆ L'
I : LieIdeal R L
J : LieIdeal R L'
x : L
⊢ ↑f x ∈ LieIdeal.map f ⊤ State After: no goals Tactic: exact LieIdeal.mem_map (LieSubmodule.mem_top x) |
While working as an interpreter at the Italian embassy in East Berlin for ten years, Augusto Bordato documented the daily triumphs and monotonies of life in the DDR [Eastern Germany]. Twenty-five years after the fall of the Berlin wall in 1989, Contrasto has published a collection of his work entitled DDR, Remembering East Germany. The photographs, taken with a classic Leica film camera, focus on the last two years of the regime: parades, war ruins, beach vacations, lines at the local store, and finally the opening of the Wall. The grainy black and white shots woo us with their familiarity and their humanness, enhanced by a remarkably rich tonal range.
In reality, however, the book shows us the limit of our ability to recuperate the past. In “remembering” the DDR, Bordato reinvents it at the same time, adding substantial text alongside the pictures that intellectualizes and changes the past he once experienced. The text (with captions in Italian and English on each page) prevent us from experiencing the photos as fragments of reality. We see the images as Bordato sees them now, with twenty-five years of hindsight.
Which is, in some ways, a shame, since the photographs are so evocative in their own right. In one photo, statues broken by the Dresden bombings look hauntingly human, entwined in the grass like naked lovers. The tones are superb: the stone bodies are a striking translucent white surrounded by even, gray earth tones and masked in the foreground by blurred shrubs in true black. The description, however, interrupts our reverie with a photographer’s musing: “Like death masks and photographs, casts are also impressions of something real.” We enter the photographer’s perspective as a historical witness and see the broken statues as symbols of the loss and death of an entire era.
Similarly, the last pages of the book offer Bordato’s re-interpretation, rather than pure memories, of the opening of the wall. His photos are vivid, blurred nighttime shots. We are in the crowd, scrambling to get the first peek over to the West. In the middle of this section, however, we find an image that takes us away from the moment. It is the cover photo: an image of a young couple wearing Communist military uniforms in what one assumes is ironic, or perhaps even earnest nostalgia. One would think they were part of the crowd that night of the 9th of November, but in reality the picture is taken months later when uniforms have safely been rendered memorabilia.
In his curation of the past, Bordato collapses historical time. He shows us that history is constantly being rewritten, even when we have the very images before us. Almost a hundred years after Marcel Proust, that great philosopher of memory, wrote “Remembrance of things past is not necessarily the remembrance of things as they were,” Bordato’s book shows us again how remembering means changing and how looking back also means recognizing that something has been lost.
Vera Carothers is a freelance writer and educator based in Paris, France. |
[STATEMENT]
lemma rel_ipurge_aux_single_dom:
"rel_ipurge_aux P I D {u} = rel_ipurge P I D u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_ipurge_aux P I D {u} = rel_ipurge P I D u
[PROOF STEP]
by (simp add: rel_ipurge_def rel_ipurge_aux_def ipurge_tr_rev_aux_single_dom) |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <getopt.h>
#include <iniparser.h>
#include "parmt_utils.h"
#include "iscl/array/array.h"
#include "iscl/memory/memory.h"
#include "iscl/os/os.h"
#ifdef PARMT_USE_INTEL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
#define PROGRAM_NAME "mergemt"
static void printUsage(void);
static int parseArguments(int argc, char *argv[],
char iniFile[PATH_MAX]);
struct mergemtParms_struct
{
char **files;
double *wts;
char archiveFile[PATH_MAX];
int nfiles;
};
int mergemt_readIni(const char *iniFile, struct mergemtParms_struct *parms);
int main(int argc, char *argv[])
{
struct mergemtParms_struct parms;
char iniFile[PATH_MAX], programNameIn[256];
double *beta0, *beta1, *betaLoc, *dep0, *dep1, *depLoc,
*gamma0, *gamma1, *gammaLoc, *kappa0, *kappa1, *kappaLoc,
*M0, *M1, *M0loc, *phi0, *phi1,
*phiLoc, *sigma0, *sigma1, *sigmaLoc, *theta0, *theta1,
*thetaLoc, xsum;
int i, ierr, il, im, indx, jndx, nb, nb0, nb1, ncopy, ng, ng0, ng1,
nk, nk0, nk1, nlocs, nlocs0, nlocs1, nm, nm0, nm1, nmt, nmt0, nmt1,
ns, ns0, ns1, nt, nt0, nt1;
const int jm = 0;
ierr = parseArguments(argc, argv, iniFile);
if (ierr != 0)
{
if (ierr ==-2){return EXIT_SUCCESS;}
printf("%s: Error parsing arguments\n", PROGRAM_NAME);
return EXIT_FAILURE;
}
if (!os_path_isfile(iniFile))
{
printf("%s: Error ini file doesn't exist\n", PROGRAM_NAME);
return EXIT_FAILURE;
}
ierr = mergemt_readIni(iniFile, &parms);
if (ierr != 0)
{
printf("%s: Error reading ini file\n", PROGRAM_NAME);
return EXIT_FAILURE;
}
xsum = array_sum64f(parms.nfiles, parms.wts, &ierr);
if (fabs(xsum) < 1.e-14)
{
printf("%s: Division by zero - setting to 1\n", PROGRAM_NAME);
xsum = 1.0;
}
cblas_dscal(parms.nfiles, 1.0/xsum, parms.wts, 1);
// Load the files
nlocs0 =-1;
nlocs1 =-1;
nm0 =-1;
nm1 =-1;
nb0 =-1;
nb1 =-1;
ng0 =-1;
ng1 =-1;
nk0 =-1;
nk1 =-1;
ns0 =-1;
ns1 =-1;
nt0 =-1;
nt1 =-1;
nmt0 =-1;
nmt1 =-1;
dep0 = NULL;
dep1 = NULL;
M0 = NULL;
M1 = NULL;
beta0 = NULL;
beta1 = NULL;
gamma0 = NULL;
gamma1 = NULL;
kappa0 = NULL;
kappa1 = NULL;
sigma0 = NULL;
sigma1 = NULL;
theta0 = NULL;
theta1 = NULL;
phi0 = NULL;
phi1 = NULL;
for (i=0; i<parms.nfiles; i++)
{
if (!os_path_isfile(parms.files[i])){continue;}
printf("%s: Loading archive: %s\n", PROGRAM_NAME, parms.files[i]);
ierr = parmt_io_readObjfnArchive64f(parms.files[i],
programNameIn,
&nlocs, &depLoc,
&nm, &M0loc,
&nb, &betaLoc,
&ng, &gammaLoc,
&nk, &kappaLoc,
&ns, &sigmaLoc,
&nt, &thetaLoc,
&nmt, &phiLoc);
if (ierr != 0)
{
printf("%s: Error loading %s; skipping...\n",
PROGRAM_NAME, parms.files[i]);
continue;
}
if (strcasecmp("parmt", programNameIn) == 0)
{
if (nm0 ==-1)
{
nlocs0 = nlocs;
nm0 = nm;
nb0 = nb;
ng0 = ng;
nk0 = nk;
ns0 = ns;
nt0 = nt;
nmt0 = nmt;
phi0 = array_zeros64f(nmt, &ierr);
dep0 = array_copy64f(nlocs, depLoc, &ierr);
M0 = array_copy64f(nm, M0loc, &ierr);
beta0 = array_copy64f(nb, betaLoc, &ierr);
gamma0 = array_copy64f(ng, gammaLoc, &ierr);
kappa0 = array_copy64f(nk, kappaLoc, &ierr);
sigma0 = array_copy64f(ns, sigmaLoc, &ierr);
theta0 = array_copy64f(nt, thetaLoc, &ierr);
}
if (nm0 != nm || nb0 != nb || ng0 != ng || nk0 != nk ||
ns0 != ns || nt0 != nt || nmt0 != nmt || nlocs0 != nlocs)
{
printf("%s: Size mismatch in parmt\n", PROGRAM_NAME);
continue;
}
cblas_daxpy(nmt, parms.wts[i], phiLoc, 1, phi0, 1);
}
else if (strcasecmp("polarmt", programNameIn) == 0)
{
if (nm1 ==-1)
{
nlocs1 = nlocs;
nm1 = nm;
nb1 = nb;
ng1 = ng;
nk1 = nk;
ns1 = ns;
nt1 = nt;
nmt1 = nmt;
phi1 = array_zeros64f(nmt, &ierr);
dep1 = array_copy64f(nlocs, depLoc, &ierr);
M1 = array_copy64f(nm, M0loc, &ierr);
beta1 = array_copy64f(nb, betaLoc, &ierr);
gamma1 = array_copy64f(ng, gammaLoc, &ierr);
kappa1 = array_copy64f(nk, kappaLoc, &ierr);
sigma1 = array_copy64f(ns, sigmaLoc, &ierr);
theta1 = array_copy64f(nt, thetaLoc, &ierr);
}
printf("%f\n",parms.wts[i]);
cblas_daxpy(nmt, parms.wts[i], phiLoc, 1, phi1, 1);
}
else
{
printf("%s: Unkown origin: %s\n", PROGRAM_NAME, programNameIn);
goto NEXT;
}
NEXT:;
memory_free64f(&M0loc);
memory_free64f(&betaLoc);
memory_free64f(&gammaLoc);
memory_free64f(&kappaLoc);
memory_free64f(&sigmaLoc);
memory_free64f(&thetaLoc);
memory_free64f(&phiLoc);
}
if (phi0 != NULL && phi1 != NULL)
{
if (nb0 != nb1 || ng0 != ng1 || nk0 != nk1 ||
ns0 != ns1 || nt0 != nt1 || nlocs0 != nlocs)
{
printf("%s: Inconsistent grid search sizes\n", PROGRAM_NAME);
return -1;
}
printf("%f\n", array_max64f(nmt0, phi0, &ierr));
printf("%f\n", array_max64f(nmt1, phi1, &ierr));
printf("%s: Stacking polarities into waveforms...\n", PROGRAM_NAME);
/*
for (i=0; i<nm0; i++)
{
//printf("%d %d\n", nmt, nmt0);
cblas_daxpy(nmt1, 1.0, phi1, 1, &phi0[nmt1*i], 1);
}
*/
for (il=0; il<nlocs0; il++)
{
for (im=0; im<nm0; im++)
{
indx = il*nm0*nb0*ng0*nk0*ns0*nt0
+ im*nb0*ng0*nk0*ns0*nt0;
jndx = il*nm1*nb1*ng1*nk1*ns1*nt1
+ jm*nb1*ng1*nk1*ns1*nt1;
ncopy = nb0*ng0*nk0*ns0*nt0;
cblas_daxpy(ncopy, 1.0, &phi1[jndx], 1, &phi0[indx], 1);
}
}
printf("%f\n", array_max64f(nmt0, phi0, &ierr));
printf("%s: Writing joint archive\n", PROGRAM_NAME);
ierr = parmt_io_createObjfnArchive64f(PROGRAM_NAME, parms.archiveFile,
1, //data.nobs, TODO - read and fix
nlocs0, dep0,
nm0, M0,
nb0, beta0,
ng0, gamma0,
nk0, kappa0,
ns0, sigma0,
nt0, theta0);
ierr = parmt_io_writeObjectiveFunction64f(parms.archiveFile,
nmt0, phi0);
}
else
{
if (phi0 != NULL)
{
printf("%s: Writing joint waveform archive\n", PROGRAM_NAME);
ierr = parmt_io_createObjfnArchive64f(PROGRAM_NAME, parms.archiveFile,
1, //data.nobs, TODO - read and fix
nlocs0, dep0,
nm0, M0,
nb0, beta0,
ng0, gamma0,
nk0, kappa0,
ns0, sigma0,
nt0, theta0);
ierr = parmt_io_writeObjectiveFunction64f(parms.archiveFile,
nmt0, phi0);
}
else if (phi1 != NULL)
{
printf("%s: Writing joint waveform archive\n", PROGRAM_NAME);
ierr = parmt_io_createObjfnArchive64f(PROGRAM_NAME, parms.archiveFile,
1, //data.nobs, TODO - read and fix
nlocs1, dep1,
nm1, M1,
nb1, beta1,
ng1, gamma1,
nk1, kappa1,
ns1, sigma1,
nt1, theta1);
}
}
memory_free64f(&dep0);
memory_free64f(&dep1);
memory_free64f(&M0);
memory_free64f(&M1);
memory_free64f(&beta0);
memory_free64f(&beta1);
memory_free64f(&gamma0);
memory_free64f(&gamma1);
memory_free64f(&kappa0);
memory_free64f(&kappa1);
memory_free64f(&sigma0);
memory_free64f(&sigma1);
memory_free64f(&theta0);
memory_free64f(&theta1);
memory_free64f(&phi0);
memory_free64f(&phi1);
return EXIT_SUCCESS;
}
//============================================================================//
int mergemt_readIni(const char *iniFile,
struct mergemtParms_struct *parms)
{
const char *fcnm = "mergemt_readIni\0";
const char *s;
char vname[256];
dictionary *ini;
int i, ierr;
memset(parms, 0, sizeof(struct mergemtParms_struct));
if (!os_path_isfile(iniFile))
{
printf("%s: Error ini file doesn't exist\n", fcnm);
return -1;
}
ini = iniparser_load(iniFile);
parms->nfiles = iniparser_getint(ini, "general:nmerge\0", 0);
if (parms->nfiles < 1)
{
printf("%s: No files to merge\n", fcnm);
return -1;
}
parms->files = (char **) calloc((size_t) parms->nfiles, sizeof(char *));
parms->wts = array_set64f(parms->nfiles, 1.0, &ierr);
for (i=0; i<parms->nfiles; i++)
{
parms->files[i] = (char *) calloc(PATH_MAX, sizeof(char));
memset(vname, 0, 256*sizeof(char));
sprintf(vname, "general:mergeFile_%d", i+1);
s = iniparser_getstring(ini, vname, NULL);
if (!os_path_isfile(s))
{
printf("%s: File %s doesn't exist\n", fcnm, s);
continue;
}
strcpy(parms->files[i], s);
memset(vname, 0, 256*sizeof(char));
sprintf(vname, "general:wtFile_%d", i+1);
parms->wts[i] = iniparser_getdouble(ini, vname, 1.0);
if (parms->wts[i] < 0.0)
{
printf("%s: Weight can't be negative: %f\n", fcnm, parms->wts[i]);
return -1;
}
}
memset(vname, 0, 256*sizeof(char));
strcpy(vname, "general:mergeArchive");
s = iniparser_getstring(ini, vname, "mergemt.h5");
strcpy(parms->archiveFile, s);
iniparser_freedict(ini);
return 0;
}
//============================================================================//
static int parseArguments(int argc, char *argv[],
char iniFile[PATH_MAX])
{
bool linFile;
int prod;
linFile = false;
memset(iniFile, 0, PATH_MAX*sizeof(char));
while (true)
{
static struct option longOptions[] =
{
{"help", no_argument, 0, '?'},
{"help", no_argument, 0, 'h'},
{"ini_file", required_argument, 0, 'i'},
{0, 0, 0, 0}
};
int c, optionIndex;
c = getopt_long(argc, argv, "?hi:m:l:",
longOptions, &optionIndex);
if (c ==-1){break;}
if (c == 'i')
{
strcpy(iniFile, (const char *) optarg);
linFile = true;
}
else if (c == 'h' || c == '?')
{
printUsage();
return -2;
}
else
{
printf("%s: Unknown options: %s\n",
PROGRAM_NAME, argv[optionIndex]);
}
}
if (!linFile)
{
printf("%s: Error must specify ini file\n\n", PROGRAM_NAME);
printUsage();
return -1;
}
return 0;
}
//============================================================================//
static void printUsage(void)
{
printf("Usage:\n mergemt -i input_file\n\n");
printf("Required arguments:\n");
printf(" -i input_file specifies the initialization file\n");
printf("\n");
printf("Optional arguments:\n");
printf(" -h displays this message\n");
return;
}
|
(* This file is an automatic translation, the licence of the source can be found here: *)
(* https://github.com/herd/herdtools7/blob/master/LICENSE.txt *)
(* Translation of model Really minimal *)
From Coq Require Import Relations Ensembles String.
From RelationAlgebra Require Import lattice prop monoid rel kat.
From Catincoq.lib Require Import Cat proprel.
Section Model.
Variable c : candidate.
Definition events := events c.
Definition R := R c.
Definition W := W c.
Definition IW := IW c.
Definition FW := FW c.
Definition B := B c.
Definition RMW := RMW c.
Definition F := F c.
Definition rf := rf c.
Definition po := po c.
Definition int := int c.
Definition ext := ext c.
Definition loc := loc c.
Definition addr := addr c.
Definition data := data c.
Definition ctrl := ctrl c.
Definition amo := amo c.
Definition rmw := rmw c.
Definition unknown_set := unknown_set c.
Definition unknown_relation := unknown_relation c.
Definition M := R ⊔ W.
Definition emptyset : set events := empty.
Definition classes_loc : set events -> Ensemble (Ensemble events) := partition loc.
Definition tag2events := unknown_relation "tag2events".
Definition emptyset_0 : set events := domain 0.
Definition partition := classes_loc.
Definition tag2instrs := tag2events.
Definition po_loc := po ⊓ loc.
Definition rfe := rf ⊓ ext.
Definition rfi := rf ⊓ int.
Definition co0 := loc ⊓ ([IW] ⋅ top ⋅ [(W ⊓ !IW)] ⊔ [(W ⊓ !FW)] ⋅ top ⋅ [FW]).
Definition toid (s : set events) : relation events := [s].
Definition fencerel (B : set events) := (po ⊓ [top] ⋅ top ⋅ [B]) ⋅ po.
Definition ctrlcfence (CFENCE : set events) := (ctrl ⊓ [top] ⋅ top ⋅ [CFENCE]) ⋅ po.
Definition imply (A : relation events) (B : relation events) := !A ⊔ B.
Definition nodetour (R1 : relation events) (R2 : relation events) (R3 : relation events) := R1 ⊓ !(R2 ⋅ R3).
Definition singlestep (R : relation events) := nodetour R R R.
(* Definition of map already included in the prelude *)
Definition LKW := (*failed: try LKW with emptyset_0*) emptyset_0.
(* Definition of co_locs already included in the prelude *)
(* Definition of cross already included in the prelude *)
Definition generate_orders s pco := cross (co_locs pco (partition s)).
Definition generate_cos pco := generate_orders W pco.
Variable co : relation events.
Definition witness_conditions := generate_cos co0 co.
Definition model_conditions := True.
End Model.
Hint Unfold events R W IW FW B RMW F rf po int ext loc addr data ctrl amo rmw unknown_set unknown_relation M emptyset classes_loc tag2events emptyset_0 partition tag2instrs po_loc rfe rfi co0 toid fencerel ctrlcfence imply nodetour singlestep LKW generate_orders generate_cos witness_conditions model_conditions : cat.
Definition valid (c : candidate) :=
exists co : relation (events c),
witness_conditions c co /\
True.
(* End of translation of model Really minimal *)
|
!> Implementation of an installer object.
!>
!> The installer provides a way to install objects to their respective directories
!> in the installation prefix, a generic install command allows to install
!> to any directory within the prefix.
module fpm_installer
use, intrinsic :: iso_fortran_env, only : output_unit
use fpm_environment, only : get_os_type, os_is_unix
use fpm_error, only : error_t, fatal_error
use fpm_filesystem, only : join_path, mkdir, exists, unix_path, windows_path, &
env_variable
implicit none
private
public :: installer_t, new_installer
!> Declaration of the installer type
type :: installer_t
!> Path to installation directory
character(len=:), allocatable :: prefix
!> Binary dir relative to the installation prefix
character(len=:), allocatable :: bindir
!> Library directory relative to the installation prefix
character(len=:), allocatable :: libdir
!> Include directory relative to the installation prefix
character(len=:), allocatable :: includedir
!> Output unit for informative printout
integer :: unit = output_unit
!> Verbosity of the installer
integer :: verbosity = 1
!> Command to copy objects into the installation prefix
character(len=:), allocatable :: copy
!> Cached operating system
integer :: os
contains
!> Install an executable in its correct subdirectory
procedure :: install_executable
!> Install a library in its correct subdirectory
procedure :: install_library
!> Install a header/module in its correct subdirectory
procedure :: install_header
!> Install a generic file into a subdirectory in the installation prefix
procedure :: install
!> Run an installation command, type-bound for unit testing purposes
procedure :: run
!> Create a new directory in the prefix, type-bound for unit testing purposes
procedure :: make_dir
end type installer_t
!> Default name of the binary subdirectory
character(len=*), parameter :: default_bindir = "bin"
!> Default name of the library subdirectory
character(len=*), parameter :: default_libdir = "lib"
!> Default name of the include subdirectory
character(len=*), parameter :: default_includedir = "include"
!> Default name of the installation prefix on Unix platforms
character(len=*), parameter :: default_prefix_unix = "/usr/local"
!> Default name of the installation prefix on Windows platforms
character(len=*), parameter :: default_prefix_win = "C:\"
!> Copy command on Unix platforms
character(len=*), parameter :: default_copy_unix = "cp"
!> Copy command on Windows platforms
character(len=*), parameter :: default_copy_win = "copy"
contains
!> Create a new instance of an installer
subroutine new_installer(self, prefix, bindir, libdir, includedir, verbosity, &
copy)
!> Instance of the installer
type(installer_t), intent(out) :: self
!> Path to installation directory
character(len=*), intent(in), optional :: prefix
!> Binary dir relative to the installation prefix
character(len=*), intent(in), optional :: bindir
!> Library directory relative to the installation prefix
character(len=*), intent(in), optional :: libdir
!> Include directory relative to the installation prefix
character(len=*), intent(in), optional :: includedir
!> Verbosity of the installer
integer, intent(in), optional :: verbosity
!> Copy command
character(len=*), intent(in), optional :: copy
self%os = get_os_type()
if (present(copy)) then
self%copy = copy
else
if (os_is_unix(self%os)) then
self%copy = default_copy_unix
else
self%copy = default_copy_win
end if
end if
if (present(includedir)) then
self%includedir = includedir
else
self%includedir = default_includedir
end if
if (present(prefix)) then
self%prefix = prefix
else
call set_default_prefix(self%prefix, self%os)
end if
if (present(bindir)) then
self%bindir = bindir
else
self%bindir = default_bindir
end if
if (present(libdir)) then
self%libdir = libdir
else
self%libdir = default_libdir
end if
if (present(verbosity)) then
self%verbosity = verbosity
else
self%verbosity = 1
end if
end subroutine new_installer
!> Set the default prefix for the installation
subroutine set_default_prefix(prefix, os)
!> Installation prefix
character(len=:), allocatable :: prefix
!> Platform identifier
integer, intent(in), optional :: os
character(len=:), allocatable :: home
if (os_is_unix(os)) then
call env_variable(home, "HOME")
if (allocated(home)) then
prefix = join_path(home, ".local")
else
prefix = default_prefix_unix
end if
else
call env_variable(home, "APPDATA")
if (allocated(home)) then
prefix = join_path(home, "local")
else
prefix = default_prefix_win
end if
end if
end subroutine set_default_prefix
!> Install an executable in its correct subdirectory
subroutine install_executable(self, executable, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Path to the executable
character(len=*), intent(in) :: executable
!> Error handling
type(error_t), allocatable, intent(out) :: error
integer :: ll
if (.not.os_is_unix(self%os)) then
ll = len(executable)
if (executable(max(1, ll-3):ll) /= ".exe") then
call self%install(executable//".exe", self%bindir, error)
return
end if
end if
call self%install(executable, self%bindir, error)
end subroutine install_executable
!> Install a library in its correct subdirectory
subroutine install_library(self, library, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Path to the library
character(len=*), intent(in) :: library
!> Error handling
type(error_t), allocatable, intent(out) :: error
call self%install(library, self%libdir, error)
end subroutine install_library
!> Install a header/module in its correct subdirectory
subroutine install_header(self, header, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Path to the header
character(len=*), intent(in) :: header
!> Error handling
type(error_t), allocatable, intent(out) :: error
call self%install(header, self%includedir, error)
end subroutine install_header
!> Install a generic file into a subdirectory in the installation prefix
subroutine install(self, source, destination, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Path to the original file
character(len=*), intent(in) :: source
!> Path to the destination inside the prefix
character(len=*), intent(in) :: destination
!> Error handling
type(error_t), allocatable, intent(out) :: error
character(len=:), allocatable :: install_dest
install_dest = join_path(self%prefix, destination)
if (os_is_unix(self%os)) then
install_dest = unix_path(install_dest)
else
install_dest = windows_path(install_dest)
end if
call self%make_dir(install_dest, error)
if (allocated(error)) return
if (self%verbosity > 0) then
if (exists(install_dest)) then
write(self%unit, '("# Update:", 1x, a, 1x, "->", 1x, a)') &
source, install_dest
else
write(self%unit, '("# Install:", 1x, a, 1x, "->", 1x, a)') &
source, install_dest
end if
end if
call self%run(self%copy//' "'//source//'" "'//install_dest//'"', error)
if (allocated(error)) return
end subroutine install
!> Create a new directory in the prefix
subroutine make_dir(self, dir, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Directory to be created
character(len=*), intent(in) :: dir
!> Error handling
type(error_t), allocatable, intent(out) :: error
if (.not.exists(dir)) then
if (self%verbosity > 1) then
write(self%unit, '("# Dir:", 1x, a)') dir
end if
call mkdir(dir)
end if
end subroutine make_dir
!> Run an installation command
subroutine run(self, command, error)
!> Instance of the installer
class(installer_t), intent(inout) :: self
!> Command to be launched
character(len=*), intent(in) :: command
!> Error handling
type(error_t), allocatable, intent(out) :: error
integer :: stat
if (self%verbosity > 1) then
write(self%unit, '("# Run:", 1x, a)') command
end if
call execute_command_line(command, exitstat=stat)
if (stat /= 0) then
call fatal_error(error, "Failed in command: '"//command//"'")
return
end if
end subroutine run
end module fpm_installer
|
[STATEMENT]
lemma length_mirror2_aux: "length ps = length (mirror2_aux n ps)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length ps = length (mirror2_aux n ps)
[PROOF STEP]
by (induction ps) auto |
(* Title: HOL/Auth/n_germanSimp_lemma_on_inv__23.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_on_inv__23 imports n_germanSimp_base
begin
section{*All lemmas on causal relation between inv__23 and some rule r*}
lemma n_SendInvAckVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntS)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__23:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntE))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__23:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendGntSVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntS i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__0Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__0Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInv__part__1Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendGntEVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntE N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__1Vsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__23:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
\chapter{Revision History}
\begin{longtable}[]{@{}lcl@{}}
\toprule
Date & Rev. & Comments\tabularnewline
\midrule
\endhead
13-Oct-2017 & 1.0 & Initial Release\tabularnewline
& & \tabularnewline
& & \tabularnewline
& & \tabularnewline
\bottomrule
\caption{Revision History}
\end{longtable} |
[STATEMENT]
lemma order_irr: "Coset.order (mult_of R) = CARD('a)^degree f - 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. order (mult_of R) = CARD('a) ^ degree f - 1
[PROOF STEP]
by (simp add: card_Diff_singleton Coset.order_def carrier_mult_of R_def) |
array[0]: 400 ptr2[0]: 400
array[1]: 800 ptr2[1]: 800
array[2]: 1200 ptr2[2]: 1200
|
Formal statement is: lemma filtermap_nhds_shift: "filtermap (\<lambda>x. x - d) (nhds a) = nhds (a - d)" for a d :: "'a::real_normed_vector" Informal statement is: The filter of neighborhoods of $a$ is the same as the filter of neighborhoods of $a - d$. |
/-
Copyright (c) 2020 Johan Commelin, Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin, Damiano Testa, Yaël Dillies
! This file was ported from Lean 3 source module order.synonym
! leanprover-community/mathlib commit 448144f7ae193a8990cb7473c9e9a01990f64ac7
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Logic.Equiv.Defs
import Mathbin.Logic.Nontrivial
import Mathbin.Order.Basic
/-!
# Type synonyms
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file provides two type synonyms for order theory:
* `order_dual α`: Type synonym of `α` to equip it with the dual order (`a ≤ b` becomes `b ≤ a`).
* `lex α`: Type synonym of `α` to equip it with its lexicographic order. The precise meaning depends
on the type we take the lex of. Examples include `prod`, `sigma`, `list`, `finset`.
## Notation
`αᵒᵈ` is notation for `order_dual α`.
The general rule for notation of `lex` types is to append `ₗ` to the usual notation.
## Implementation notes
One should not abuse definitional equality between `α` and `αᵒᵈ`/`lex α`. Instead, explicit
coercions should be inserted:
* `order_dual`: `order_dual.to_dual : α → αᵒᵈ` and `order_dual.of_dual : αᵒᵈ → α`
* `lex`: `to_lex : α → lex α` and `of_lex : lex α → α`.
In fact, those are bundled as `equiv`s to put goals in the right syntactic form for rewriting with
the `equiv` API (`⇑to_lex a` where `⇑` is `coe_fn : (α ≃ lex α) → α → lex α`, instead of a bare
`to_lex a`).
## See also
This file is similar to `algebra.group.type_tags`.
-/
variable {α β γ : Type _}
/-! ### Order dual -/
namespace OrderDual
instance [h : Nontrivial α] : Nontrivial αᵒᵈ :=
h
#print OrderDual.toDual /-
/-- `to_dual` is the identity function to the `order_dual` of a linear order. -/
def toDual : α ≃ αᵒᵈ :=
Equiv.refl _
#align order_dual.to_dual OrderDual.toDual
-/
#print OrderDual.ofDual /-
/-- `of_dual` is the identity function from the `order_dual` of a linear order. -/
def ofDual : αᵒᵈ ≃ α :=
Equiv.refl _
#align order_dual.of_dual OrderDual.ofDual
-/
#print OrderDual.toDual_symm_eq /-
@[simp]
theorem toDual_symm_eq : (@toDual α).symm = ofDual :=
rfl
#align order_dual.to_dual_symm_eq OrderDual.toDual_symm_eq
-/
#print OrderDual.ofDual_symm_eq /-
@[simp]
theorem ofDual_symm_eq : (@ofDual α).symm = toDual :=
rfl
#align order_dual.of_dual_symm_eq OrderDual.ofDual_symm_eq
-/
#print OrderDual.toDual_ofDual /-
@[simp]
theorem toDual_ofDual (a : αᵒᵈ) : toDual (ofDual a) = a :=
rfl
#align order_dual.to_dual_of_dual OrderDual.toDual_ofDual
-/
#print OrderDual.ofDual_toDual /-
@[simp]
theorem ofDual_toDual (a : α) : ofDual (toDual a) = a :=
rfl
#align order_dual.of_dual_to_dual OrderDual.ofDual_toDual
-/
#print OrderDual.toDual_inj /-
@[simp]
theorem toDual_inj {a b : α} : toDual a = toDual b ↔ a = b :=
Iff.rfl
#align order_dual.to_dual_inj OrderDual.toDual_inj
-/
#print OrderDual.ofDual_inj /-
@[simp]
theorem ofDual_inj {a b : αᵒᵈ} : ofDual a = ofDual b ↔ a = b :=
Iff.rfl
#align order_dual.of_dual_inj OrderDual.ofDual_inj
-/
#print OrderDual.toDual_le_toDual /-
@[simp]
theorem toDual_le_toDual [LE α] {a b : α} : toDual a ≤ toDual b ↔ b ≤ a :=
Iff.rfl
#align order_dual.to_dual_le_to_dual OrderDual.toDual_le_toDual
-/
#print OrderDual.toDual_lt_toDual /-
@[simp]
theorem toDual_lt_toDual [LT α] {a b : α} : toDual a < toDual b ↔ b < a :=
Iff.rfl
#align order_dual.to_dual_lt_to_dual OrderDual.toDual_lt_toDual
-/
#print OrderDual.ofDual_le_ofDual /-
@[simp]
theorem ofDual_le_ofDual [LE α] {a b : αᵒᵈ} : ofDual a ≤ ofDual b ↔ b ≤ a :=
Iff.rfl
#align order_dual.of_dual_le_of_dual OrderDual.ofDual_le_ofDual
-/
#print OrderDual.ofDual_lt_ofDual /-
@[simp]
theorem ofDual_lt_ofDual [LT α] {a b : αᵒᵈ} : ofDual a < ofDual b ↔ b < a :=
Iff.rfl
#align order_dual.of_dual_lt_of_dual OrderDual.ofDual_lt_ofDual
-/
#print OrderDual.le_toDual /-
theorem le_toDual [LE α] {a : αᵒᵈ} {b : α} : a ≤ toDual b ↔ b ≤ ofDual a :=
Iff.rfl
#align order_dual.le_to_dual OrderDual.le_toDual
-/
#print OrderDual.lt_toDual /-
theorem lt_toDual [LT α] {a : αᵒᵈ} {b : α} : a < toDual b ↔ b < ofDual a :=
Iff.rfl
#align order_dual.lt_to_dual OrderDual.lt_toDual
-/
#print OrderDual.toDual_le /-
theorem toDual_le [LE α] {a : α} {b : αᵒᵈ} : toDual a ≤ b ↔ ofDual b ≤ a :=
Iff.rfl
#align order_dual.to_dual_le OrderDual.toDual_le
-/
#print OrderDual.toDual_lt /-
theorem toDual_lt [LT α] {a : α} {b : αᵒᵈ} : toDual a < b ↔ ofDual b < a :=
Iff.rfl
#align order_dual.to_dual_lt OrderDual.toDual_lt
-/
#print OrderDual.rec /-
/-- Recursor for `αᵒᵈ`. -/
@[elab_as_elim]
protected def rec {C : αᵒᵈ → Sort _} (h₂ : ∀ a : α, C (toDual a)) : ∀ a : αᵒᵈ, C a :=
h₂
#align order_dual.rec OrderDual.rec
-/
#print OrderDual.forall /-
@[simp]
protected theorem forall {p : αᵒᵈ → Prop} : (∀ a, p a) ↔ ∀ a, p (toDual a) :=
Iff.rfl
#align order_dual.forall OrderDual.forall
-/
#print OrderDual.exists /-
@[simp]
protected theorem exists {p : αᵒᵈ → Prop} : (∃ a, p a) ↔ ∃ a, p (toDual a) :=
Iff.rfl
#align order_dual.exists OrderDual.exists
-/
alias to_dual_le_to_dual ↔ _ _root_.has_le.le.dual
#align has_le.le.dual LE.le.dual
alias to_dual_lt_to_dual ↔ _ _root_.has_lt.lt.dual
#align has_lt.lt.dual LT.lt.dual
alias of_dual_le_of_dual ↔ _ _root_.has_le.le.of_dual
#align has_le.le.of_dual LE.le.ofDual
alias of_dual_lt_of_dual ↔ _ _root_.has_lt.lt.of_dual
#align has_lt.lt.of_dual LT.lt.ofDual
end OrderDual
/-! ### Lexicographic order -/
#print Lex /-
/-- A type synonym to equip a type with its lexicographic order. -/
def Lex (α : Type _) :=
α
#align lex Lex
-/
#print toLex /-
/-- `to_lex` is the identity function to the `lex` of a type. -/
@[match_pattern]
def toLex : α ≃ Lex α :=
Equiv.refl _
#align to_lex toLex
-/
#print ofLex /-
/-- `of_lex` is the identity function from the `lex` of a type. -/
@[match_pattern]
def ofLex : Lex α ≃ α :=
Equiv.refl _
#align of_lex ofLex
-/
#print toLex_symm_eq /-
@[simp]
theorem toLex_symm_eq : (@toLex α).symm = ofLex :=
rfl
#align to_lex_symm_eq toLex_symm_eq
-/
#print ofLex_symm_eq /-
@[simp]
theorem ofLex_symm_eq : (@ofLex α).symm = toLex :=
rfl
#align of_lex_symm_eq ofLex_symm_eq
-/
#print toLex_ofLex /-
@[simp]
theorem toLex_ofLex (a : Lex α) : toLex (ofLex a) = a :=
rfl
#align to_lex_of_lex toLex_ofLex
-/
#print ofLex_toLex /-
@[simp]
theorem ofLex_toLex (a : α) : ofLex (toLex a) = a :=
rfl
#align of_lex_to_lex ofLex_toLex
-/
#print toLex_inj /-
@[simp]
theorem toLex_inj {a b : α} : toLex a = toLex b ↔ a = b :=
Iff.rfl
#align to_lex_inj toLex_inj
-/
#print ofLex_inj /-
@[simp]
theorem ofLex_inj {a b : Lex α} : ofLex a = ofLex b ↔ a = b :=
Iff.rfl
#align of_lex_inj ofLex_inj
-/
#print Lex.rec /-
/-- A recursor for `lex`. Use as `induction x using lex.rec`. -/
protected def Lex.rec {β : Lex α → Sort _} (h : ∀ a, β (toLex a)) : ∀ a, β a := fun a => h (ofLex a)
#align lex.rec Lex.rec
-/
|
from sympy import sieve
primelist = list(sieve.primerange(2,1000000))
listlen = len(primelist)
# ascending
pindex = 1
old_diff = -1
curr_list=[primelist[0]]
longest_list=[]
while pindex < listlen:
diff = primelist[pindex] - primelist[pindex-1]
if diff > old_diff:
curr_list.append(primelist[pindex])
if len(curr_list) > len(longest_list):
longest_list = curr_list
else:
curr_list = [primelist[pindex-1],primelist[pindex]]
old_diff = diff
pindex += 1
print(longest_list)
# descending
pindex = 1
old_diff = -1
curr_list=[primelist[0]]
longest_list=[]
while pindex < listlen:
diff = primelist[pindex] - primelist[pindex-1]
if diff < old_diff:
curr_list.append(primelist[pindex])
if len(curr_list) > len(longest_list):
longest_list = curr_list
else:
curr_list = [primelist[pindex-1],primelist[pindex]]
old_diff = diff
pindex += 1
print(longest_list) |
module LLG
# using DifferentialEquations
using StaticArrays:reshape
using Base:Float64
using ModelingToolkit
using Random, Distributions
import LinearAlgebra:cross,similar,det,⋅
import Symbolics:build_function
using GLMakie
## diffeq stuff:
import Symbolics:istree,operation,arguments
export LatticeDescription, generateLatticeShape, Atom, positionsLattice,mappingAtoms, coupleNN, coupleInternal, isless
export plot_atoms,plot_atoms!, plot_connections, plot_connections!, absPositionsParticle
export dmi_interaction_honeycomb, nn_coupling_honeycomb, nnn_coupling_honeycomb
export initBoltzmann
# export custom alg
# export LLGProblem, SIB
# export exchangeCoupling, DMICoupling, setupEOM
include("lattice.jl")
# include("diffeq.jl")
include("plotting.jl")
include("graphene.jl")
include("diffeq_toolkit.jl")
# include("custom_cache.jl")
# include("custom_alg.jl")
# Write your package code here.
end
|
State Before: α : Type u_1
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : GCDMonoid α
k m n : α
⊢ k ∣ m * n ↔ ∃ d₁ d₂, d₁ ∣ m ∧ d₂ ∣ n ∧ k = d₁ * d₂ State After: α : Type u_1
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : GCDMonoid α
k m n : α
⊢ (∃ d₁ d₂, d₁ ∣ m ∧ d₂ ∣ n ∧ k = d₁ * d₂) → k ∣ m * n Tactic: refine' ⟨exists_dvd_and_dvd_of_dvd_mul, _⟩ State Before: α : Type u_1
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : GCDMonoid α
k m n : α
⊢ (∃ d₁ d₂, d₁ ∣ m ∧ d₂ ∣ n ∧ k = d₁ * d₂) → k ∣ m * n State After: case intro.intro.intro.intro
α : Type u_1
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : GCDMonoid α
m n d₁ d₂ : α
hy : d₁ ∣ m
hz : d₂ ∣ n
⊢ d₁ * d₂ ∣ m * n Tactic: rintro ⟨d₁, d₂, hy, hz, rfl⟩ State Before: case intro.intro.intro.intro
α : Type u_1
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : GCDMonoid α
m n d₁ d₂ : α
hy : d₁ ∣ m
hz : d₂ ∣ n
⊢ d₁ * d₂ ∣ m * n State After: no goals Tactic: exact mul_dvd_mul hy hz |
theory Number_Theory_Aux imports
"HOL-Number_Theory.Cong"
"HOL-Number_Theory.Residues"
begin
abbreviation inverse where "inverse x q \<equiv> (fst (bezw x q))"
lemma inverse: assumes "gcd x q = 1"
shows "[x * inverse x q = 1] (mod q)"
proof-
have 2: "fst (bezw x q) * x + snd (bezw x q) * int q = 1"
using bezw_aux assms int_minus
by (metis Num.of_nat_simps(2))
hence 3: "(fst (bezw x q) * x + snd (bezw x q) * int q) mod q = 1 mod q"
by (metis assms bezw_aux of_nat_mod)
hence 4: "(fst (bezw x q) * x) mod q = 1 mod q"
by simp
hence 5: "[(fst (bezw x q)) * x = 1] (mod q)"
using 2 3 cong_def by force
then show ?thesis by(simp add: mult.commute)
qed
lemma prod_not_prime:
assumes "prime (x::nat)"
and "prime y"
and "x > 2"
and "y > 2"
shows "\<not> prime ((x-1)*(y-1))"
by (metis assms One_nat_def Suc_diff_1 nat_neq_iff numeral_2_eq_2 prime_gt_0_nat prime_product)
lemma ex_inverse:
assumes coprime: "coprime (e :: nat) ((P-1)*(Q-1))"
and "prime P"
and "prime Q"
and "P \<noteq> Q"
shows "\<exists> d. [e*d = 1] (mod (P-1)) \<and> d \<noteq> 0"
proof-
have "coprime e (P-1)"
using assms(1) by simp
then obtain d where d: "[e*d = 1] (mod (P-1))"
using cong_solve_coprime_nat by auto
then show ?thesis by (metis cong_0_1_nat cong_1 mult_0_right zero_neq_one)
qed
lemma ex_k1_k2:
assumes coprime: "coprime (e :: nat) ((P-1)*(Q-1))"
and " [e*d = 1] (mod (P-1))"
shows "\<exists> k1 k2. e*d + k1*(P-1) = 1 + k2*(P-1)"
by (metis assms(2) cong_iff_lin_nat)
lemma "a > b \<Longrightarrow>int a - int b = int (a - b)"
by simp
lemma ex_k_mod:
assumes coprime: "coprime (e :: nat) ((P-1)*(Q-1))"
and "P \<noteq> Q"
and "prime P"
and "prime Q"
and "d \<noteq> 0"
and " [e*d = 1] (mod (P-1))"
shows "\<exists> k. e*d = 1 + k*(P-1)"
proof-
have "e > 0"
using assms(1) assms(2) prime_gt_0_nat by fastforce
then have "e*d \<ge> 1" using assms by simp
then obtain k where k: "e*d = 1 + k*(P-1)"
using assms(6) cong_to_1'_nat by auto
then show ?thesis
by simp
qed
lemma fermat_little_theorem:
assumes "prime (P :: nat)"
shows "[x^P = x] (mod P)"
proof(cases "P dvd x")
case True
hence "x mod P = 0" by simp
moreover have "x ^ P mod P = 0"
by (simp add: True assms prime_dvd_power_nat_iff prime_gt_0_nat)
ultimately show ?thesis
by (simp add: cong_def)
next
case False
hence "[x ^ (P - 1) = 1] (mod P)" using fermat_theorem assms by blast
then show ?thesis
by (metis Suc_diff_1 assms cong_scalar_left nat_mult_1_right not_gr_zero not_prime_0 power_Suc)
qed
lemma prime_field:
assumes "prime (q::nat)"
and "a < q"
and "a \<noteq> 0"
shows "coprime a q"
by (meson assms coprime_commute dvd_imp_le linorder_not_le neq0_conv prime_imp_coprime)
end |
If $f$ is a continuous function on an open set $S$ and $f$ is holomorphic on $S - K$, where $K$ is a finite set, then $f$ is holomorphic on $S$. |
!
!@(#) using non-square viewports, the associated distortion -- and how to fix it
!(LICENSE:PD)
!
program fdistrt
use M_draw
integer BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
parameter(BLACK = 0)
parameter(RED = 1)
parameter(GREEN = 2)
parameter(YELLOW = 3)
parameter(BLUE = 4)
parameter(MAGENTA = 5)
parameter(CYAN = 6)
parameter(WHITE = 7)
character(len=50) :: device
character(len=120) :: buf
real xfact, yfact
print*,'Enter output device:'
read(*,'(a)') device
call vinit(device)
call color(BLACK)
call clear
!
! Make the viewport the same size as the screen/window.
!
call getfactors(xfact, yfact)
call viewport(-1.0, xfact, -1.0, yfact)
!
! Draw a square. (Looks like a rectangle, if the viewport
! wasn't "accidentally" square)
!
call color(1)
call rect(-0.5, -0.5, 0.5, 0.5)
!
! Tell them what it is.
!
call move2(-1.0, 0.9)
write(buf,'(''Distorted square (viewport(-1, '', F7.3, '', -1, '', F7.3, ''))'')') xfact, yfact
call drawstr(buf)
idum=getkey()
!
! Fix up the distortion (The actual formula to fix
! the distortion is (viewport.xmax * (1 + xfact) / 2.0),
! and similar for the y axis.
!
call ortho2(-1.0, xfact, -1.0, yfact)
!
! Draw another square (Really is square this time)
!
call color(3)
call rect(-0.5, -0.5, 0.5, 0.5)
!
! Tell them what it is.
!
call move2(-1.0, -0.9)
write(buf,'(''Fixed up square with ortho2(-1, '', F7.3, '', -1, '', F7.3, '')'')') xfact, yfact
call drawstr(buf)
idum=getkey()
!
! Do it with world coords going from 0 - 5, 0 - 5.
! Reset square viewport.
!
call color(0)
call clear
call viewport(-1.0, 1.0, -1.0, 1.0)
call ortho2(0.0, 5.0, 0.0, 5.0)
call textsize(0.1, 0.1)
!
! Square from 1 to 3. (Really is square)
!
call color(2)
call rect(1.0, 1.0, 3.0, 3.0)
call move2(0.0, 4.5)
call drawstr('Square from 0 - 3, 0 - 3')
idum=getkey()
!
! Distort it with a non-square viewport.
!
call viewport(-1.0, xfact, -1.0, yfact)
call color(4)
call rect(1.0, 1.0, 3.0, 3.0)
call move2(0.0, 0.5)
call drawstr('Distorted square from 0 - 3, 0 - 3')
idum=getkey()
!
! Fix the distortion.
!
call ortho2(0.0, 5.0 * (1.0 + xfact) / 2.0, 0.0, 5.0 * (1.0 + yfact) / 2.0)
call color(5)
call rect(1.0, 1.0, 3.0, 3.0)
call move2(0.0, 2.5)
call drawstr('Fixed up square from 0 - 3, 0 - 3')
idum=getkey()
call vexit
end
|
Suppose $f$ is a continuous function defined on a convex set $S$ and $f$ is holomorphic on the interior of $S$ except for a finite set of points $k$. Suppose $\gamma$ is a closed path in $S$ that does not pass through any of the points in $k$. Then the integral of $f(w)/(w-z)$ along $\gamma$ is $2\pi i$ times the winding number of $\gamma$ around $z$ times $f(z)$. |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj20synthconj5_hyp: forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 lv1) (mult lv1 (Succ lv2))) -> (@eq natural (Succ (plus lv0 (plus lv1 lv2))) (plus (mult lv1 (plus Zero (Succ lv2))) (plus Zero (Succ lv2)))).
Admitted.
QuickChick conj20synthconj5_hyp.
|
section \<open>\<open>Cblinfun_Code\<close> -- Support for code generation\<close>
text \<open>This theory provides support for code generation involving on complex vector spaces and
bounded operators (e.g., types \<open>cblinfun\<close> and \<open>ell2\<close>).
To fully support code generation, in addition to importing this theory,
one need to activate support for code generation (import theory \<open>Jordan_Normal_Form.Matrix_Impl\<close>)
and for real and complex numbers (import theory \<open>Real_Impl.Real_Impl\<close> for support of reals of the
form \<open>a + b * sqrt c\<close> or \<open>Algebraic_Numbers.Real_Factorization\<close> (much slower) for support of algebraic reals;
support of complex numbers comes "for free").
The builtin support for real and complex numbers (in \<open>Complex_Main\<close>) is not sufficient because it
does not support the computation of square-roots which are used in the setup below.
It is also recommended to import \<open>HOL-Library.Code_Target_Numeral\<close> for faster support of nats
and integers.\<close>
theory Cblinfun_Code
imports
Cblinfun_Matrix Containers.Set_Impl Jordan_Normal_Form.Matrix_Kernel
begin
no_notation "Lattice.meet" (infixl "\<sqinter>\<index>" 70)
no_notation "Lattice.join" (infixl "\<squnion>\<index>" 65)
hide_const (open) Coset.kernel
hide_const (open) Matrix_Kernel.kernel
hide_const (open) Order.bottom Order.top
unbundle jnf_notation
unbundle cblinfun_notation
subsection \<open>Code equations for cblinfun operators\<close>
text \<open>In this subsection, we define the code for all operations involving only
operators (no combinations of operators/vectors/subspaces)\<close>
text \<open>The following lemma registers cblinfun as an abstract datatype with
constructor \<^const>\<open>cblinfun_of_mat\<close>.
That means that in generated code, all cblinfun operators will be represented
as \<^term>\<open>cblinfun_of_mat X\<close> where X is a matrix.
In code equations for operations involving operators (e.g., +), we
can then write the equation directly in terms of matrices
by writing, e.g., \<^term>\<open>mat_of_cblinfun (A+B)\<close> in the lhs,
and in the rhs we define the matrix that corresponds to the sum of A,B.
In the rhs, we can access the matrices corresponding to A,B by
writing \<^term>\<open>mat_of_cblinfun B\<close>.
(See, e.g., lemma \<open>cblinfun_of_mat_plusOp\<close> below).
See @{cite "code-generation-tutorial"} for more information on
@{theory_text \<open>[code abstype]\<close>}.\<close>
declare mat_of_cblinfun_inverse [code abstype]
text \<open>This lemma defines addition. By writing \<^term>\<open>mat_of_cblinfun (M + N)\<close>
on the left hand side, we get access to the\<close>
declare mat_of_cblinfun_plus[code]
\<comment> \<open>Code equation for addition of cblinfuns\<close>
declare mat_of_cblinfun_id[code]
\<comment> \<open>Code equation for computing the identity operator\<close>
declare mat_of_cblinfun_1[code]
\<comment> \<open>Code equation for computing the one-dimensional identity\<close>
declare mat_of_cblinfun_zero[code]
\<comment> \<open>Code equation for computing the zero operator\<close>
declare mat_of_cblinfun_uminus[code]
\<comment> \<open>Code equation for computing the unary minus on cblinfun's\<close>
declare mat_of_cblinfun_minus[code]
\<comment> \<open>Code equation for computing the difference of cblinfun's\<close>
declare mat_of_cblinfun_classical_operator[code]
\<comment> \<open>Code equation for computing the "classical operator"\<close>
declare mat_of_cblinfun_compose[code]
\<comment> \<open>Code equation for computing the composition/product of cblinfun's\<close>
declare mat_of_cblinfun_scaleC[code]
\<comment> \<open>Code equation for multiplication with complex scalar\<close>
declare mat_of_cblinfun_scaleR[code]
\<comment> \<open>Code equation for multiplication with real scalar\<close>
declare mat_of_cblinfun_adj[code]
\<comment> \<open>Code equation for computing the adj\<close>
text \<open>This instantiation defines a code equation for equality tests for cblinfun.\<close>
instantiation cblinfun :: (onb_enum,onb_enum) equal begin
definition [code]: "equal_cblinfun M N \<longleftrightarrow> mat_of_cblinfun M = mat_of_cblinfun N"
for M N :: "'a \<Rightarrow>\<^sub>C\<^sub>L 'b"
instance
apply intro_classes
unfolding equal_cblinfun_def
using mat_of_cblinfun_inj injD by fastforce
end
subsection \<open>Vectors\<close>
text \<open>In this section, we define code for operations on vectors. As with operators above,
we do this by using an isomorphism between finite vectors
(i.e., types T of sort \<open>complex_vector\<close>) and the type \<^typ>\<open>complex vec\<close> from
\<^session>\<open>Jordan_Normal_Form\<close>. We have developed such an isomorphism in
theory \<open>Cblinfun_Matrix\<close> for
any type T of sort \<open>onb_enum\<close> (i.e., any type with a finite canonical orthonormal basis)
as was done above for bounded operators.
Unfortunately, we cannot declare code equations for a type class,
code equations must be related to a specific type constructor.
So we give code definition only for vectors of type \<^typ>\<open>'a ell2\<close> (where \<^typ>\<open>'a\<close>
must be of sort \<open>enum\<close> to make make sure that \<^typ>\<open>'a ell2\<close> is finite dimensional).
The isomorphism between \<^typ>\<open>'a ell2\<close> is given by the constants \<open>ell2_of_vec\<close>
and \<open>vec_of_ell2\<close> which are copies of the more general \<^const>\<open>basis_enum_of_vec\<close>
and \<^const>\<open>vec_of_basis_enum\<close> but with a more restricted type to be usable in our code equations.
\<close>
definition ell2_of_vec :: "complex vec \<Rightarrow> 'a::enum ell2" where "ell2_of_vec = basis_enum_of_vec"
definition vec_of_ell2 :: "'a::enum ell2 \<Rightarrow> complex vec" where "vec_of_ell2 = vec_of_basis_enum"
text \<open>The following theorem registers the isomorphism \<open>ell2_of_vec\<close>/\<open>vec_of_ell2\<close>
for code generation. From now on,
code for operations on \<^typ>\<open>_ ell2\<close> can be expressed by declarations such
as \<^term>\<open>vec_of_ell2 (f a b) = g (vec_of_ell2 a) (vec_of_ell2 b)\<close>
if the operation f on \<^typ>\<open>_ ell2\<close> corresponds to the operation g on
\<^typ>\<open>complex vec\<close>.\<close>
lemma vec_of_ell2_inverse [code abstype]:
"ell2_of_vec (vec_of_ell2 B) = B"
unfolding ell2_of_vec_def vec_of_ell2_def
by (rule vec_of_basis_enum_inverse)
text \<open>This instantiation defines a code equation for equality tests for ell2.\<close>
instantiation ell2 :: (enum) equal begin
definition [code]: "equal_ell2 M N \<longleftrightarrow> vec_of_ell2 M = vec_of_ell2 N"
for M N :: "'a::enum ell2"
instance
apply intro_classes
unfolding equal_ell2_def
by (metis vec_of_ell2_inverse)
end
lemma vec_of_ell2_zero[code]:
\<comment> \<open>Code equation for computing the zero vector\<close>
"vec_of_ell2 (0::'a::enum ell2) = zero_vec (CARD('a))"
by (simp add: vec_of_ell2_def vec_of_basis_enum_zero)
lemma vec_of_ell2_ket[code]:
\<comment> \<open>Code equation for computing a standard basis vector\<close>
"vec_of_ell2 (ket i) = unit_vec (CARD('a)) (enum_idx i)"
for i::"'a::enum"
using vec_of_ell2_def vec_of_basis_enum_ket by metis
lemma vec_of_ell2_timesScalarVec[code]:
\<comment> \<open>Code equation for multiplying a vector with a complex scalar\<close>
"vec_of_ell2 (scaleC a \<psi>) = smult_vec a (vec_of_ell2 \<psi>)"
for \<psi> :: "'a::enum ell2"
by (simp add: vec_of_ell2_def vec_of_basis_enum_scaleC)
lemma vec_of_ell2_scaleR[code]:
\<comment> \<open>Code equation for multiplying a vector with a real scalar\<close>
"vec_of_ell2 (scaleR a \<psi>) = smult_vec (complex_of_real a) (vec_of_ell2 \<psi>)"
for \<psi> :: "'a::enum ell2"
by (simp add: vec_of_ell2_def vec_of_basis_enum_scaleR)
lemma ell2_of_vec_plus[code]:
\<comment> \<open>Code equation for adding vectors\<close>
"vec_of_ell2 (x + y) = (vec_of_ell2 x) + (vec_of_ell2 y)" for x y :: "'a::enum ell2"
by (simp add: vec_of_ell2_def vec_of_basis_enum_add)
lemma ell2_of_vec_minus[code]:
\<comment> \<open>Code equation for subtracting vectors\<close>
"vec_of_ell2 (x - y) = (vec_of_ell2 x) - (vec_of_ell2 y)" for x y :: "'a::enum ell2"
by (simp add: vec_of_ell2_def vec_of_basis_enum_minus)
lemma ell2_of_vec_uminus[code]:
\<comment> \<open>Code equation for negating a vector\<close>
"vec_of_ell2 (- y) = - (vec_of_ell2 y)" for y :: "'a::enum ell2"
by (simp add: vec_of_ell2_def vec_of_basis_enum_uminus)
lemma cinner_ell2_code' [code]: "cinner \<psi> \<phi> = cscalar_prod (vec_of_ell2 \<phi>) (vec_of_ell2 \<psi>)"
\<comment> \<open>Code equation for the inner product of vectors\<close>
by (simp add: cscalar_prod_vec_of_basis_enum vec_of_ell2_def)
lemma norm_ell2_code [code]:
\<comment> \<open>Code equation for the norm of a vector\<close>
"norm \<psi> = (let \<psi>' = vec_of_ell2 \<psi> in
sqrt (\<Sum> i \<in> {0 ..< dim_vec \<psi>'}. let z = vec_index \<psi>' i in (Re z)\<^sup>2 + (Im z)\<^sup>2))"
by (simp add: norm_ell2_vec_of_basis_enum vec_of_ell2_def)
lemma times_ell2_code'[code]:
\<comment> \<open>Code equation for the product in the algebra of one-dimensional vectors\<close>
fixes \<psi> \<phi> :: "'a::{CARD_1,enum} ell2"
shows "vec_of_ell2 (\<psi> * \<phi>)
= vec_of_list [vec_index (vec_of_ell2 \<psi>) 0 * vec_index (vec_of_ell2 \<phi>) 0]"
by (simp add: vec_of_ell2_def vec_of_basis_enum_times)
lemma divide_ell2_code'[code]:
\<comment> \<open>Code equation for the product in the algebra of one-dimensional vectors\<close>
fixes \<psi> \<phi> :: "'a::{CARD_1,enum} ell2"
shows "vec_of_ell2 (\<psi> / \<phi>)
= vec_of_list [vec_index (vec_of_ell2 \<psi>) 0 / vec_index (vec_of_ell2 \<phi>) 0]"
by (simp add: vec_of_ell2_def vec_of_basis_enum_divide)
lemma inverse_ell2_code'[code]:
\<comment> \<open>Code equation for the product in the algebra of one-dimensional vectors\<close>
fixes \<psi> :: "'a::{CARD_1,enum} ell2"
shows "vec_of_ell2 (inverse \<psi>)
= vec_of_list [inverse (vec_index (vec_of_ell2 \<psi>) 0)]"
by (simp add: vec_of_ell2_def vec_of_basis_enum_to_inverse)
lemma one_ell2_code'[code]:
\<comment> \<open>Code equation for the unit in the algebra of one-dimensional vectors\<close>
"vec_of_ell2 (1 :: 'a::{CARD_1,enum} ell2) = vec_of_list [1]"
by (simp add: vec_of_ell2_def vec_of_basis_enum_1)
subsection \<open>Vector/Matrix\<close>
text \<open>We proceed to give code equations for operations involving both
operators (cblinfun) and vectors. As explained above, we have to restrict
the equations to vectors of type \<^typ>\<open>'a ell2\<close> even though the theory is available
for any type of class \<^class>\<open>onb_enum\<close>. As a consequence, we run into an
addition technicality now. For example, to define a code equation for applying
an operator to a vector, we might try to give the following lemma:
\<^theory_text>\<open>lemma cblinfun_apply_code[code]:
"vec_of_ell2 (M *\<^sub>V x) = (mult_mat_vec (mat_of_cblinfun M) (vec_of_ell2 x))"
by (simp add: mat_of_cblinfun_cblinfun_apply vec_of_ell2_def)\<close>
Unfortunately, this does not work, Isabelle produces the warning
"Projection as head in equation", most likely due to the fact that
the type of \<^term>\<open>(*\<^sub>V)\<close> in the equation is less general than the type of
\<^term>\<open>(*\<^sub>V)\<close> (it is restricted to @{type ell2}). We overcome this problem
by defining a constant \<open>cblinfun_apply_code\<close> which is equal to \<^term>\<open>(*\<^sub>V)\<close>
but has a more restricted type. We then instruct the code generation
to replace occurrences of \<^term>\<open>(*\<^sub>V)\<close> by \<open>cblinfun_apply_code\<close> (where possible),
and we add code generation for \<open>cblinfun_apply_code\<close> instead of \<^term>\<open>(*\<^sub>V)\<close>.
\<close>
definition cblinfun_apply_code :: "'a ell2 \<Rightarrow>\<^sub>C\<^sub>L 'b ell2 \<Rightarrow> 'a ell2 \<Rightarrow> 'b ell2"
where [code del, code_abbrev]: "cblinfun_apply_code = (*\<^sub>V)"
\<comment> \<open>@{attribute code_abbrev} instructs the code generation to replace the
rhs \<^term>\<open>(*\<^sub>V)\<close> by the lhs \<^term>\<open>cblinfun_apply_code\<close> before starting
the actual code generation.\<close>
lemma cblinfun_apply_code[code]:
\<comment> \<open>Code equation for \<^term>\<open>cblinfun_apply_code\<close>, i.e., for applying an operator
to an \<^type>\<open>ell2\<close> vector\<close>
"vec_of_ell2 (cblinfun_apply_code M x) = (mult_mat_vec (mat_of_cblinfun M) (vec_of_ell2 x))"
by (simp add: cblinfun_apply_code_def mat_of_cblinfun_cblinfun_apply vec_of_ell2_def)
text \<open>For the constant \<^term>\<open>vector_to_cblinfun\<close> (canonical isomorphism from
vectors to operators), we have the same problem and define a constant
\<open>vector_to_cblinfun_code\<close> with more restricted type\<close>
definition vector_to_cblinfun_code :: "'a ell2 \<Rightarrow> 'b::one_dim \<Rightarrow>\<^sub>C\<^sub>L 'a ell2" where
[code del,code_abbrev]: "vector_to_cblinfun_code = vector_to_cblinfun"
\<comment> \<open>@{attribute code_abbrev} instructs the code generation to replace the
rhs \<^term>\<open>vector_to_cblinfun\<close> by the lhs \<^term>\<open>vector_to_cblinfun_code\<close>
before starting the actual code generation.\<close>
lemma vector_to_cblinfun_code[code]:
\<comment> \<open>Code equation for translating a vector into an operation (single-column matrix)\<close>
"mat_of_cblinfun (vector_to_cblinfun_code \<psi>) = mat_of_cols (CARD('a)) [vec_of_ell2 \<psi>]"
for \<psi>::"'a::enum ell2"
by (simp add: mat_of_cblinfun_vector_to_cblinfun vec_of_ell2_def vector_to_cblinfun_code_def)
subsection \<open>Subspaces\<close>
text \<open>In this section, we define code equations for handling subspaces, i.e.,
values of type \<^typ>\<open>'a ccsubspace\<close>. We choose to computationally represent
a subspace by a list of vectors that span the subspace. That is,
if \<^term>\<open>vecs\<close> are vectors (type \<^typ>\<open>complex vec\<close>), \<open>SPAN vecs\<close> is defined to be their
span. Then the code generation can simply represent all subspaces in this form, and
we need to define the operations on subspaces in terms of list of vectors
(e.g., the closed union of two subspaces would be computed as the concatenation
of the two lists, to give one of the simplest examples).
To support this, \<open>SPAN\<close> is declared as a "\<open>code_datatype\<close>".
(Not as an abstract datatype like \<^term>\<open>cblinfun_of_mat\<close>/\<^term>\<open>mat_of_cblinfun\<close>
because that would require \<open>SPAN\<close> to be injective.)
Then all code equations for different operations need to be formulated as
functions of values of the form \<open>SPAN x\<close>. (E.g., \<open>SPAN x + SPAN y = SPAN (\<dots>)\<close>.)\<close>
definition [code del]: "SPAN x = (let n = length (canonical_basis :: 'a::onb_enum list) in
ccspan (basis_enum_of_vec ` Set.filter (\<lambda>v. dim_vec v = n) (set x)) :: 'a ccsubspace)"
\<comment> \<open>The SPAN of vectors x, as a \<^type>\<open>ccsubspace\<close>.
We filter out vectors of the wrong dimension because \<open>SPAN\<close> needs to have
well-defined behavior even in cases that would not actually occur in an execution.\<close>
code_datatype SPAN
text \<open>We first declare code equations for \<^term>\<open>Proj\<close>, i.e., for
turning a subspace into a projector. This means, we would need a code equation
of the form \<open>mat_of_cblinfun (Proj (SPAN S)) = \<dots>\<close>. However, this equation is
not accepted by the code generation for reasons we do not understand. But
if we define an auxiliary constant \<open>mat_of_cblinfun_Proj_code\<close> that stands for
\<open>mat_of_cblinfun (Proj _)\<close>, define a code equation for \<open>mat_of_cblinfun_Proj_code\<close>,
and then define a code equation for \<open>mat_of_cblinfun (Proj S)\<close> in terms of
\<open>mat_of_cblinfun_Proj_code\<close>, Isabelle accepts the code equations.\<close>
definition "mat_of_cblinfun_Proj_code S = mat_of_cblinfun (Proj S)"
declare mat_of_cblinfun_Proj_code_def[symmetric, code]
lemma mat_of_cblinfun_Proj_code_code[code]:
\<comment> \<open>Code equation for computing a projector onto a set S of vectors.
We first make the vectors S into an orthonormal basis using
the Gram-Schmidt procedure and then compute the projector
as the sum of the "butterflies" \<open>x * x*\<close> of the vectors \<open>x\<in>S\<close>
(done by \<^term>\<open>mk_projector_orthog\<close>).\<close>
"mat_of_cblinfun_Proj_code (SPAN S :: 'a::onb_enum ccsubspace) =
(let d = length (canonical_basis :: 'a list) in mk_projector_orthog d
(gram_schmidt0 d (filter (\<lambda>v. dim_vec v = d) S)))"
proof -
have *: "map_option vec_of_basis_enum (if dim_vec x = length (canonical_basis :: 'a list) then Some (basis_enum_of_vec x :: 'a) else None)
= (if dim_vec x = length (canonical_basis :: 'a list) then Some x else None)" for x
by auto
show ?thesis
unfolding SPAN_def mat_of_cblinfun_Proj_code_def
using mat_of_cblinfun_Proj_ccspan[where S =
"map basis_enum_of_vec (filter (\<lambda>v. dim_vec v = (length (canonical_basis :: 'a list))) S) :: 'a list"]
apply (simp only: Let_def map_filter_map_filter filter_set image_set map_map_filter o_def)
unfolding *
by (simp add: map_filter_map_filter[symmetric])
qed
lemma top_ccsubspace_code[code]:
\<comment> \<open>Code equation for \<^term>\<open>top\<close>, the subspace containing everything.
Top is represented as the span of the standard basis vectors.\<close>
"(top::'a ccsubspace) =
(let n = length (canonical_basis :: 'a::onb_enum list) in SPAN (unit_vecs n))"
unfolding SPAN_def
apply (simp only: index_unit_vec Let_def map_filter_map_filter filter_set image_set map_map_filter
map_filter_map o_def unit_vecs_def)
apply (simp add: basis_enum_of_vec_unit_vec)
apply (subst nth_image)
by (auto simp: )
lemma bot_as_span[code]:
\<comment> \<open>Code equation for \<^term>\<open>bot\<close>, the subspace containing everything.
Top is represented as the span of the standard basis vectors.\<close>
"(bot::'a::onb_enum ccsubspace) = SPAN []"
unfolding SPAN_def by (auto simp: Set.filter_def)
lemma sup_spans[code]:
\<comment> \<open>Code equation for the join (lub) of two subspaces (union of the generating lists)\<close>
"SPAN A \<squnion> SPAN B = SPAN (A @ B)"
unfolding SPAN_def
by (auto simp: ccspan_union image_Un filter_Un Let_def)
text \<open>We do not need an equation for \<^term>\<open>(+)\<close> because \<^term>\<open>(+)\<close>
is defined in terms of \<^term>\<open>(\<squnion>)\<close> (for \<^type>\<open>ccsubspace\<close>), thus the code generation automatically
computes \<^term>\<open>(+)\<close> in terms of the code for \<^term>\<open>(\<squnion>)\<close>\<close>
definition [code del,code_abbrev]: "Span_code (S::'a::enum ell2 set) = (ccspan S)"
\<comment> \<open>A copy of \<^term>\<open>ccspan\<close> with restricted type. For analogous reasons as
\<^term>\<open>cblinfun_apply_code\<close>, see there for explanations\<close>
lemma span_Set_Monad[code]: "Span_code (Set_Monad l) = (SPAN (map vec_of_ell2 l))"
\<comment> \<open>Code equation for the span of a finite set. (\<^term>\<open>Set_Monad\<close> is a datatype
constructor that represents sets as lists in the computation.)\<close>
apply (simp add: Span_code_def SPAN_def Let_def)
apply (subst Set_filter_unchanged)
apply (auto simp add: vec_of_ell2_def)[1]
by (metis (no_types, lifting) ell2_of_vec_def image_image map_idI set_map vec_of_ell2_inverse)
text \<open>This instantiation defines a code equation for equality tests for \<^type>\<open>ccsubspace\<close>.
The actual code for equality tests is given below (lemma \<open>equal_ccsubspace_code\<close>).\<close>
instantiation ccsubspace :: (onb_enum) equal begin
definition [code del]: "equal_ccsubspace (A::'a ccsubspace) B = (A=B)"
instance apply intro_classes unfolding equal_ccsubspace_def by simp
end
lemma leq_ccsubspace_code[code]:
\<comment> \<open>Code equation for deciding inclusion of one space in another.
Uses the constant \<^term>\<open>is_subspace_of_vec_list\<close> which implements the actual
computation by checking for each generator of A whether it is in the
span of B (by orthogonal projection onto an orthonormal basis of B
which is computed using Gram-Schmidt).\<close>
"SPAN A \<le> (SPAN B :: 'a::onb_enum ccsubspace)
\<longleftrightarrow> (let d = length (canonical_basis :: 'a list) in
is_subspace_of_vec_list d
(filter (\<lambda>v. dim_vec v = d) A)
(filter (\<lambda>v. dim_vec v = d) B))"
proof -
define d A' B' where "d = length (canonical_basis :: 'a list)"
and "A' = filter (\<lambda>v. dim_vec v = d) A"
and "B' = filter (\<lambda>v. dim_vec v = d) B"
show ?thesis
unfolding SPAN_def d_def[symmetric] filter_set Let_def
A'_def[symmetric] B'_def[symmetric] image_set
apply (subst ccspan_leq_using_vec)
unfolding d_def[symmetric] map_map o_def
apply (subst map_cong[where xs=A', OF refl])
apply (rule basis_enum_of_vec_inverse)
apply (simp add: A'_def d_def)
apply (subst map_cong[where xs=B', OF refl])
apply (rule basis_enum_of_vec_inverse)
by (simp_all add: B'_def d_def)
qed
lemma equal_ccsubspace_code[code]:
\<comment> \<open>Code equation for equality test. By checking mutual inclusion
(for which we have code by the preceding code equation).\<close>
"HOL.equal (A::_ ccsubspace) B = (A\<le>B \<and> B\<le>A)"
unfolding equal_ccsubspace_def by auto
lemma apply_cblinfun_code[code]:
\<comment> \<open>Code equation for applying an operator \<^term>\<open>A\<close> to a subspace.
Simply by multiplying each generator with \<^term>\<open>A\<close>\<close>
"A *\<^sub>S SPAN S = (let d = length (canonical_basis :: 'a list) in
SPAN (map (mult_mat_vec (mat_of_cblinfun A))
(filter (\<lambda>v. dim_vec v = d) S)))"
for A::"'a::onb_enum \<Rightarrow>\<^sub>C\<^sub>L'b::onb_enum"
proof -
define dA dB S'
where "dA = length (canonical_basis :: 'a list)"
and "dB = length (canonical_basis :: 'b list)"
and "S' = filter (\<lambda>v. dim_vec v = dA) S"
have "cblinfun_image A (SPAN S) = A *\<^sub>S ccspan (set (map basis_enum_of_vec S'))"
unfolding SPAN_def dA_def[symmetric] Let_def S'_def filter_set
by simp
also have "\<dots> = ccspan ((\<lambda>x. basis_enum_of_vec
(mat_of_cblinfun A *\<^sub>v vec_of_basis_enum (basis_enum_of_vec x :: 'a))) ` set S')"
apply (subst cblinfun_apply_ccspan_using_vec)
by (simp add: image_image)
also have "\<dots> = ccspan ((\<lambda>x. basis_enum_of_vec (mat_of_cblinfun A *\<^sub>v x)) ` set S')"
apply (subst image_cong[OF refl])
apply (subst basis_enum_of_vec_inverse)
by (auto simp add: S'_def dA_def)
also have "\<dots> = SPAN (map (mult_mat_vec (mat_of_cblinfun A)) S')"
unfolding SPAN_def dB_def[symmetric] Let_def filter_set
apply (subst filter_True)
by (simp_all add: dB_def mat_of_cblinfun_def image_image)
finally show ?thesis
unfolding dA_def[symmetric] S'_def[symmetric] Let_def
by simp
qed
definition [code del, code_abbrev]: "range_cblinfun_code A = A *\<^sub>S top"
\<comment> \<open>A new constant for the special case of applying an operator to the subspace \<^term>\<open>top\<close>
(i.e., for computing the range of the operator). We do this to be able to give
more specialized code for this specific situation. (The generic code for
\<^term>\<open>(*\<^sub>S)\<close> would work but is less efficient because it involves repeated matrix
multiplications. @{attribute code_abbrev} makes sure occurrences of \<^term>\<open>A *\<^sub>S top\<close>
are replaced before starting the actual code generation.\<close>
lemma range_cblinfun_code[code]:
\<comment> \<open>Code equation for computing the range of an operator \<^term>\<open>A\<close>.
Returns the columns of the matrix representation of \<^term>\<open>A\<close>.\<close>
fixes A :: "'a::onb_enum \<Rightarrow>\<^sub>C\<^sub>L 'b::onb_enum"
shows "range_cblinfun_code A = SPAN (cols (mat_of_cblinfun A))"
proof -
define dA dB
where "dA = length (canonical_basis :: 'a list)"
and "dB = length (canonical_basis :: 'b list)"
have carrier_A: "mat_of_cblinfun A \<in> carrier_mat dB dA"
unfolding mat_of_cblinfun_def dA_def dB_def by simp
have "range_cblinfun_code A = A *\<^sub>S SPAN (unit_vecs dA)"
unfolding range_cblinfun_code_def
by (metis dA_def top_ccsubspace_code)
also have "\<dots> = SPAN (map (\<lambda>i. mat_of_cblinfun A *\<^sub>v unit_vec dA i) [0..<dA])"
unfolding apply_cblinfun_code dA_def[symmetric] Let_def
apply (subst filter_True)
apply (meson carrier_vecD subset_code(1) unit_vecs_carrier)
by (simp add: unit_vecs_def o_def)
also have "\<dots> = SPAN (map (\<lambda>x. mat_of_cblinfun A *\<^sub>v col (1\<^sub>m dA) x) [0..<dA])"
apply (subst map_cong[OF refl])
by auto
also have "\<dots> = SPAN (map (col (mat_of_cblinfun A * 1\<^sub>m dA)) [0..<dA])"
apply (subst map_cong[OF refl])
apply (subst col_mult2[symmetric])
apply (rule carrier_A)
by auto
also have "\<dots> = SPAN (cols (mat_of_cblinfun A))"
unfolding cols_def dA_def[symmetric]
apply (subst right_mult_one_mat[OF carrier_A])
using carrier_A by blast
finally show ?thesis
by -
qed
lemma uminus_Span_code[code]: "- X = range_cblinfun_code (id_cblinfun - Proj X)"
\<comment> \<open>Code equation for the orthogonal complement of a subspace \<^term>\<open>X\<close>.
Computed as the range of one minus the projector on \<^term>\<open>X\<close>\<close>
unfolding range_cblinfun_code_def
by (metis Proj_ortho_compl Proj_range)
lemma kernel_code[code]:
\<comment> \<open>Computes the kernel of an operator \<^term>\<open>A\<close>.
This is implemented using the existing functions
for transforming a matrix into row echelon form (\<^term>\<open>gauss_jordan_single\<close>)
and for computing a basis of the kernel of such a matrix
(\<^term>\<open>find_base_vectors\<close>)\<close>
"kernel A = SPAN (find_base_vectors (gauss_jordan_single (mat_of_cblinfun A)))"
for A::"('a::onb_enum,'b::onb_enum) cblinfun"
proof -
define dA dB Am Ag base
where "dA = length (canonical_basis :: 'a list)"
and "dB = length (canonical_basis :: 'b list)"
and "Am = mat_of_cblinfun A"
and "Ag = gauss_jordan_single Am"
and "base = find_base_vectors Ag"
interpret complex_vec_space dA.
have Am_carrier: "Am \<in> carrier_mat dB dA"
unfolding Am_def mat_of_cblinfun_def dA_def dB_def by simp
have row_echelon: "row_echelon_form Ag"
unfolding Ag_def
using Am_carrier refl by (rule gauss_jordan_single)
have Ag_carrier: "Ag \<in> carrier_mat dB dA"
unfolding Ag_def
using Am_carrier refl by (rule gauss_jordan_single(2))
have base_carrier: "set base \<subseteq> carrier_vec dA"
unfolding base_def
using find_base_vectors(1)[OF row_echelon Ag_carrier]
using Ag_carrier mat_kernel_def by blast
interpret k: kernel dB dA Ag
apply standard using Ag_carrier by simp
have basis_base: "kernel.basis dA Ag (set base)"
using row_echelon Ag_carrier unfolding base_def
by (rule find_base_vectors(3))
have "space_as_set (SPAN base)
= space_as_set (ccspan (basis_enum_of_vec ` set base :: 'a set))"
unfolding SPAN_def dA_def[symmetric] Let_def filter_set
apply (subst filter_True)
using base_carrier by auto
also have "\<dots> = cspan (basis_enum_of_vec ` set base)"
apply transfer apply (subst closure_finite_cspan)
by simp_all
also have "\<dots> = basis_enum_of_vec ` span (set base)"
apply (subst basis_enum_of_vec_span)
using base_carrier dA_def by auto
also have "\<dots> = basis_enum_of_vec ` mat_kernel Ag"
using basis_base k.Ker.basis_def k.span_same by auto
also have "\<dots> = basis_enum_of_vec ` {v \<in> carrier_vec dA. Ag *\<^sub>v v = 0\<^sub>v dB}"
apply (rule arg_cong[where f="\<lambda>x. basis_enum_of_vec ` x"])
unfolding mat_kernel_def using Ag_carrier
by simp
also have "\<dots> = basis_enum_of_vec ` {v \<in> carrier_vec dA. Am *\<^sub>v v = 0\<^sub>v dB}"
using gauss_jordan_single(1)[OF Am_carrier Ag_def[symmetric]]
by auto
also have "\<dots> = {w. A *\<^sub>V w = 0}"
proof -
have "basis_enum_of_vec ` {v \<in> carrier_vec dA. Am *\<^sub>v v = 0\<^sub>v dB}
= basis_enum_of_vec ` {v \<in> carrier_vec dA. A *\<^sub>V basis_enum_of_vec v = 0}"
apply (rule arg_cong[where f="\<lambda>t. basis_enum_of_vec ` t"])
apply (rule Collect_cong)
apply (simp add: Am_def)
by (metis Am_carrier Am_def carrier_matD(2) carrier_vecD dB_def mat_carrier
mat_of_cblinfun_def mat_of_cblinfun_cblinfun_apply vec_of_basis_enum_inverse
basis_enum_of_vec_inverse vec_of_basis_enum_zero)
also have "\<dots> = {w \<in> basis_enum_of_vec ` carrier_vec dA. A *\<^sub>V w = 0}"
apply (subst Compr_image_eq[symmetric])
by simp
also have "\<dots> = {w. A *\<^sub>V w = 0}"
apply auto
by (metis (no_types, lifting) Am_carrier Am_def carrier_matD(2) carrier_vec_dim_vec dim_vec_of_basis_enum' image_iff mat_carrier mat_of_cblinfun_def vec_of_basis_enum_inverse)
finally show ?thesis
by -
qed
also have "\<dots> = space_as_set (kernel A)"
apply transfer by auto
finally have "SPAN base = kernel A"
by (simp add: space_as_set_inject)
then show ?thesis
by (simp add: base_def Ag_def Am_def)
qed
lemma inf_ccsubspace_code[code]:
\<comment> \<open>Code equation for intersection of subspaces.
Reduced to orthogonal complement and sum of subspaces
for which we already have code equations.\<close>
"(A::'a::onb_enum ccsubspace) \<sqinter> B = - (- A \<squnion> - B)"
by (subst ortho_involution[symmetric], subst compl_inf, simp)
lemma Sup_ccsubspace_code[code]:
\<comment> \<open>Supremum (sum) of a set of subspaces. Implemented
by repeated pairwise sum.\<close>
"Sup (Set_Monad l :: 'a::onb_enum ccsubspace set) = fold sup l bot"
unfolding Set_Monad_def
by (simp add: Sup_set_fold)
lemma Inf_ccsubspace_code[code]:
\<comment> \<open>Infimum (intersection) of a set of subspaces.
Implemented by the orthogonal complement of the supremum.\<close>
"Inf (Set_Monad l :: 'a::onb_enum ccsubspace set)
= - Sup (Set_Monad (map uminus l))"
unfolding Set_Monad_def
apply (induction l)
by auto
subsection \<open>Miscellanea\<close>
text \<open>This is a hack to circumvent a bug in the code generation. The automatically
generated code for the class \<^class>\<open>uniformity\<close> has a type that is different from
what the generated code later assumes, leading to compilation errors (in ML at least)
in any expression involving \<^typ>\<open>_ ell2\<close> (even if the constant \<^const>\<open>uniformity\<close> is
not actually used).
The fragment below circumvents this by forcing Isabelle to use the right type.
(The logically useless fragment "\<open>let x = ((=)::'a\<Rightarrow>_\<Rightarrow>_)\<close>" achieves this.)\<close>
lemma uniformity_ell2_code[code]: "(uniformity :: ('a ell2 * _) filter) = Filter.abstract_filter (%_.
Code.abort STR ''no uniformity'' (%_.
let x = ((=)::'a\<Rightarrow>_\<Rightarrow>_) in uniformity))"
by simp
text \<open>Code equation for \<^term>\<open>UNIV\<close>.
It is now implemented via type class \<^class>\<open>enum\<close>
(which provides a list of all values).\<close>
declare [[code drop: UNIV]]
declare enum_class.UNIV_enum[code]
text \<open>Setup for code generation involving sets of \<^type>\<open>ell2\<close>/\<^type>\<open>ccsubspace\<close>.
This configures to use lists for representing sets in code.\<close>
derive (eq) ceq ccsubspace
derive (no) ccompare ccsubspace
derive (monad) set_impl ccsubspace
derive (eq) ceq ell2
derive (no) ccompare ell2
derive (monad) set_impl ell2
unbundle no_jnf_notation
unbundle no_cblinfun_notation
end
|
#pragma once
#include <string>
#include <boost/beast/core.hpp>
namespace ott
{
namespace http
{
boost::beast::string_view
mime_type(boost::beast::string_view path);
} // http namespace
} // ott namespace
|
FL Davies served on the City Council from 4/20/1940 to 4/20/1948.
|
If $a$ and $b$ are in the same path component of the complement of $s$, then the Borsuk maps from $a$ and $b$ are homotopic in $s$. |
-- Andreas, 2013-03-22
module Issue473a where
data D : Set where
d : D
data P : D → Set where
p : P d
record Rc : Set where
constructor c
field f : D
works : {r : Rc} → P (Rc.f r) → Set
works p = D
works' : (r : Rc) → P (Rc.f r) → Set
works' (c .d) p = D
-- If we remove the constructor, the example fails:
record R : Set where
field f : D
fails : {r : R} → P (R.f r) → Set
fails p = D
-- d != R.f r of type D
-- when checking that the pattern p has type P (R.f r)
-- The error is justified since there is no pattern we could write down
-- for r. It would have to look like
--
-- record { f = .d }
--
-- but anonymous record patterns are not supported.
|
Highly Recommend written by mobile Good good deal!!
Mouse superb!! Clicking is Super silent!! Good!! Sensitivity is great!! Everything is gd!! Delivery fast!! Recommended!!
Recommend written by mobile Looks good.
Fast delivery. Good quality build. Fit to the hand though the scroll is too soft for CAD works. Otherwise perfect.
Fast delivery the service was good.
Looks good when I tested..no button click sound and it fits well in hand ..
Speedy delivery, item came in good condition.
Item is packaged well and the product is good looking. Delivery is very fast.
It has some flickering but overall it’s good!
Hi,is there a way to turn off the led light?
Hi, thanks for your interest. There are 3 switches - OFF, ON & O.
If switch to “ON mode”, LED will only light up when u move the mouse.
If switch to “O” mode, Colourful LED will be in light up mode throughout the usage of the mouse.
ALCATROZ ONLINE SPECIAL - ASIC 3 | ASIC 5 | ASIC 6 ESSENTIAL WIRED MOUSE. Local Stocks and Warranty.
ESSENTIAL MICE - AIRMOUSE | ASIC + PRO | WIRELESS, WIRED + BLUE RAY SENSOR! Free Mousemat Now! Local Stocks with 12 Months Warranty! |
{-# OPTIONS --cubical-compatible --rewriting --confluence-check #-}
module Issue1719.Spans where
open import Issue1719.Common
record Span : Set₁ where
constructor span
field
A B C : Set
f : C → A
g : C → B
open Span public
|
lemma joinable_connected_component_eq: "\<lbrakk>connected T; T \<subseteq> S; connected_component_set S x \<inter> T \<noteq> {}; connected_component_set S y \<inter> T \<noteq> {}\<rbrakk> \<Longrightarrow> connected_component_set S x = connected_component_set S y" |
In October 1920 , the battleship King George V arrived to replace Marlborough in the Mediterranean Fleet . Marlborough then returned to Devonport , where she was paid off for a major refit that took place between February 1921 and January 1922 . During the refit , range dials were installed , along with another range @-@ finder on the rear superstructure . The aircraft platform was removed from " B " turret . Long @-@ base range @-@ finders were installed on " X " turret . After completing the refit in January 1922 , Marlborough was recommissioned and assigned to the Mediterranean , where she replaced Emperor of India . She served as the second command flagship until October . Following the Treaty of Lausanne in 1923 , the Allied countries withdrew their occupation forces from Turkey ; Marlborough was involved in escorting the troop convoys out of Constantinople .
|
lemma linear_bounded_pos: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::real_normed_vector" assumes lf: "linear f" obtains B where "B > 0" "\<And>x. norm (f x) \<le> B * norm x" |
||| A minimalistic testing framework with pretty priting
||| and (not yet implemented) value diffing.
module Test.Mini
import Control.ANSI.SGR
import Generics.Derive
import Text.Show.Pretty
%language ElabReflection
--------------------------------------------------------------------------------
-- Test Results
--------------------------------------------------------------------------------
namespace Success
||| A successful test case
public export
record Success i o where
constructor MkSuccess
input : i
result : o
%runElab derive "Success" [Generic,Meta,Show,Eq,PrettyVal]
namespace Failure
||| A failed test case
public export
record Failure i o where
constructor MkFailure
input : i
result : o
expected : o
%runElab derive "Failure" [Generic,Meta,Show,Eq,PrettyVal]
public export
record Result i o where
constructor MkResult
ok : List (Success i o)
failed : List (Failure i o)
%runElab derive "Result" [Generic,Meta,Show,Eq,Semigroup,Monoid]
export
Semigroup (Result i o) where (<+>) = genAppend
export
Monoid (Result i o) where neutral = genNeutral
--------------------------------------------------------------------------------
-- Running Tests
--------------------------------------------------------------------------------
public export
run : Foldable t
=> (f : i -> Either (Failure i o) (Success i o))
-> t i
-> Result i o
run f = concatMap run'
where run' : i -> Result i o
run' inp = case f inp of
(Left x) => MkResult [] [x]
(Right x) => MkResult [x] []
public export
runEq : (Foldable t, Eq o) => (f : i -> o) -> t (i,o) -> Result i o
runEq f = concatMap run'
where run' : (i,o) -> Result i o
run' (inp,exp) = let res = f inp
in if exp == res
then MkResult [MkSuccess inp exp] []
else MkResult [] [MkFailure inp res exp]
--------------------------------------------------------------------------------
-- ANSI Colorings and Reporting
--------------------------------------------------------------------------------
export
foreground : Color -> String -> String
foreground c s = escapeSGR [SetForeground c] ++ s ++ escapeSGR [Reset]
export
greenOk : String
greenOk = "[" ++ foreground Green "OK" ++ "] "
export
redFailed : String
redFailed = "[" ++ foreground Red "Failed" ++ "] "
export
spaces : String
spaces = " "
export
report : PrettyVal i => PrettyVal o => Result i o -> IO Bool
report (MkResult ok []) =
putStrLn (greenOk ++ show (length ok) ++ " tests run") $> True
report (MkResult ok (f::fs)) =
do putStrLn (redFailed ++ summary)
putStrLn "First failure"
dumpIO f
pure False
where summary : String
summary = unlines [ show (length ok + length fs + 1) ++ " tests run"
, spaces ++ show (length fs + 1) ++ " tests failed"
]
export
testAll : List (IO Bool) -> IO Bool
testAll = map and . traverse (map delay)
|
header "A Typed Language"
(** Score: 5/5
*)
theory GabrielaLimonta
imports "~~/src/HOL/IMP/Star"
begin
subsection "Expressions"
datatype val = Iv int | Bv bool
type_synonym vname = string
type_synonym state = "vname \<Rightarrow> val"
datatype exp = N int | V vname | Plus exp exp |
Bc bool | Not exp | And exp exp | Less exp exp
inductive eval :: "exp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool" where
"eval (N i) s (Iv i)" |
"eval (V x) s (s x)" |
"eval a1 s (Iv i1) \<Longrightarrow> eval a2 s (Iv i2)
\<Longrightarrow> eval (Plus a1 a2) s (Iv(i1+i2))" |
"eval (Bc v) s (Bv v)" |
"eval b s (Bv bv) \<Longrightarrow> eval (Not b) s (Bv(\<not> bv))" |
"eval b1 s (Bv bv1) \<Longrightarrow> eval b2 s (Bv bv2) \<Longrightarrow> eval (And b1 b2) s (Bv(bv1 & bv2))" |
"eval a1 s (Iv i1) \<Longrightarrow> eval a2 s (Iv i2) \<Longrightarrow> eval (Less a1 a2) s (Bv(i1 < i2))"
inductive_cases [elim!]:
"eval (N i) s v"
"eval (V x) s v"
"eval (Plus a1 a2) s v"
"eval (Bc b) s v"
"eval (Not b) s v"
"eval (And b1 b2) s v"
"eval (Less a1 a2) s v"
subsection "Syntax of Commands"
(* a copy of Com.thy - keep in sync! *)
datatype
com = SKIP
| Assign vname exp ("_ ::= _" [1000, 61] 61)
| Seq com com ("_;; _" [60, 61] 60)
| If exp com com ("IF _ THEN _ ELSE _" [0, 0, 61] 61)
| While exp com ("WHILE _ DO _" [0, 61] 61)
subsection "Small-Step Semantics of Commands"
inductive
small_step :: "(com \<times> state) \<Rightarrow> (com \<times> state) \<Rightarrow> bool" (infix "\<rightarrow>" 55)
where
Assign: "eval a s v \<Longrightarrow> (x ::= a, s) \<rightarrow> (SKIP, s(x := v))" |
Seq1: "(SKIP;;c,s) \<rightarrow> (c,s)" |
Seq2: "(c1,s) \<rightarrow> (c1',s') \<Longrightarrow> (c1;;c2,s) \<rightarrow> (c1';;c2,s')" |
IfTrue: "eval b s (Bv True) \<Longrightarrow> (IF b THEN c1 ELSE c2,s) \<rightarrow> (c1,s)" |
IfFalse: "eval b s (Bv False) \<Longrightarrow> (IF b THEN c1 ELSE c2,s) \<rightarrow> (c2,s)" |
While: "(WHILE b DO c,s) \<rightarrow> (IF b THEN c;; WHILE b DO c ELSE SKIP,s)"
lemmas small_step_induct = small_step.induct[split_format(complete)]
subsection "The Type System"
datatype ty = Ity | Bty
type_synonym tyenv = "vname \<Rightarrow> ty"
inductive etyping :: "tyenv \<Rightarrow> exp \<Rightarrow> ty \<Rightarrow> bool"
("(1_/ \<turnstile>/ (_ :/ _))" [50,0,50] 50)
where
Ic_ty: "\<Gamma> \<turnstile> N i : Ity" |
V_ty: "\<Gamma> \<turnstile> V x : \<Gamma> x" |
Plus_ty: "\<Gamma> \<turnstile> a1 : Ity \<Longrightarrow> \<Gamma> \<turnstile> a2 : Ity \<Longrightarrow> \<Gamma> \<turnstile> Plus a1 a2 : Ity" |
B_ty: "\<Gamma> \<turnstile> Bc v : Bty" |
Not_ty: "\<Gamma> \<turnstile> b : Bty \<Longrightarrow> \<Gamma> \<turnstile> Not b : Bty" |
And_ty: "\<Gamma> \<turnstile> b1 : Bty \<Longrightarrow> \<Gamma> \<turnstile> b2 : Bty \<Longrightarrow> \<Gamma> \<turnstile> And b1 b2 : Bty" |
Less_ty: "\<Gamma> \<turnstile> a1 : Ity \<Longrightarrow> \<Gamma> \<turnstile> a2 : Ity \<Longrightarrow> \<Gamma> \<turnstile> Less a1 a2 : Bty"
inductive ctyping :: "tyenv \<Rightarrow> com \<Rightarrow> bool" (infix "\<turnstile>" 50) where
Skip_ty: "\<Gamma> \<turnstile> SKIP" |
Assign_ty: "\<Gamma> \<turnstile> a : \<Gamma>(x) \<Longrightarrow> \<Gamma> \<turnstile> x ::= a" |
Seq_ty: "\<Gamma> \<turnstile> c1 \<Longrightarrow> \<Gamma> \<turnstile> c2 \<Longrightarrow> \<Gamma> \<turnstile> c1;;c2" |
If_ty: "\<Gamma> \<turnstile> b : Bty \<Longrightarrow> \<Gamma> \<turnstile> c1 \<Longrightarrow> \<Gamma> \<turnstile> c2 \<Longrightarrow> \<Gamma> \<turnstile> IF b THEN c1 ELSE c2" |
While_ty: "\<Gamma> \<turnstile> b : Bty \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> WHILE b DO c"
inductive_cases [elim!]:
"\<Gamma> \<turnstile> x ::= a" "\<Gamma> \<turnstile> c1;;c2"
"\<Gamma> \<turnstile> IF b THEN c1 ELSE c2"
"\<Gamma> \<turnstile> WHILE b DO c"
subsection "Well-typed Programs Do Not Get Stuck"
fun type :: "val \<Rightarrow> ty" where
"type (Iv i) = Ity" |
"type (Bv r) = Bty"
lemma type_eq_Ity[simp]: "type v = Ity \<longleftrightarrow> (\<exists>i. v = Iv i)"
by (cases v) simp_all
lemma type_eq_Bty[simp]: "type v = Bty \<longleftrightarrow> (\<exists>r. v = Bv r)"
by (cases v) simp_all
definition styping :: "tyenv \<Rightarrow> state \<Rightarrow> bool" (infix "\<turnstile>" 50)
where "\<Gamma> \<turnstile> s \<longleftrightarrow> (\<forall>x. type (s x) = \<Gamma> x)"
lemma epreservation:
"\<Gamma> \<turnstile> a : \<tau> \<Longrightarrow> eval a s v \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> type v = \<tau>"
proof (induction rule: etyping.induct)
print_cases
case (Ic_ty \<Gamma> i)
thus ?case by auto
next
case (V_ty \<Gamma> x)
thus ?case using styping_def by auto
next
case (Plus_ty \<Gamma> a1 a2)
thus ?case by auto
next
case (B_ty \<Gamma> v)
thus ?case by auto
next
case (Not_ty \<Gamma> b)
thus ?case by auto
next
case (And_ty \<Gamma> b1 b2)
thus ?case by auto
next
case (Less_ty \<Gamma> a1 a2)
thus ?case by auto
qed
lemma eprogress: "\<Gamma> \<turnstile> a : \<tau> \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> \<exists>v. eval a s v"
proof (induction rule: etyping.induct)
print_cases
case (Ic_ty \<Gamma>)
thus ?case using eval.intros(1) by auto
next
case (V_ty \<Gamma>)
thus ?case using eval.intros(2) by auto
next
case (Plus_ty \<Gamma> a1 a2)
from this obtain v1 v2 where "eval a1 s v1" and "eval a2 s v2" by blast
from this and Plus_ty and epreservation have "type v1 = Ity" and "type v2 = Ity" by auto
from this and `eval a1 s v1` and `eval a2 s v2` and Plus_ty
obtain i1 i2 where "v1 = (Iv i1)" and "v2 = (Iv i2)" by auto
from this and Plus_ty and `eval a1 s v1` and `eval a2 s v2` and epreservation and eval.intros(3)
show ?case by blast
next
case (B_ty \<Gamma>)
thus ?case using eval.intros(4) by auto
next
case (Not_ty \<Gamma> b)
from this obtain v where "eval b s v" by blast
from this and Not_ty and epreservation have "type v = Bty" by auto
from this and `eval b s v` and Not_ty obtain bv where "v = (Bv bv)" by auto
from this and Not_ty and `eval b s v` and epreservation and eval.intros(5) show ?case by blast
next
case (And_ty \<Gamma> b1 b2)
from this obtain v1 v2 where "eval b1 s v1" and "eval b2 s v2" by blast
from this and And_ty and epreservation have "type v1 = Bty" and "type v2 = Bty" by auto
from this and `eval b1 s v1` and `eval b2 s v2` and And_ty
obtain bv1 bv2 where "v1 = (Bv bv1)" and "v2 = (Bv bv2)" by auto
from this and And_ty and `eval b1 s v1` and `eval b2 s v2` and epreservation and eval.intros(6)
show ?case by blast
next
case (Less_ty \<Gamma> a1 a2)
from this obtain v1 v2 where "eval a1 s v1" and "eval a2 s v2" by blast
from this and Less_ty and epreservation have "type v1 = Ity" and "type v2 = Ity" by auto
from this and `eval a1 s v1` and `eval a2 s v2` and Less_ty
obtain i1 i2 where "v1 = (Iv i1)" and "v2 = (Iv i2)" by auto
from this and Less_ty and `eval a1 s v1` and `eval a2 s v2` and epreservation and eval.intros(7)
show ?case by blast
qed
theorem progress:
"\<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> c \<noteq> SKIP \<Longrightarrow> \<exists>cs'. (c,s) \<rightarrow> cs'"
proof (induction rule: ctyping.induct)
print_cases
case (Skip_ty \<Gamma>)
thus ?case by auto
next
case (Assign_ty \<Gamma> a x)
from this and eprogress obtain v where "eval a s v" by blast
from this and Assign show ?case by blast
next
case (Seq_ty \<Gamma> c1 c2)
thus ?case by (metis PairE Seq1 Seq2)
next
case (If_ty \<Gamma> b c1 c2)
from this and eprogress obtain v where "eval b s v" by blast
moreover have "eval b s (Bv False) \<Longrightarrow> (IF b THEN c1 ELSE c2, s) \<rightarrow> (c2, s)" using IfFalse by auto
moreover have "eval b s (Bv True) \<Longrightarrow> (IF b THEN c1 ELSE c2, s) \<rightarrow> (c1, s)" using IfTrue by auto
ultimately show ?case using If_ty and epreservation and type_eq_Bty by metis
next
case (While_ty \<Gamma> b c)
from this have "(WHILE b DO c, s) \<rightarrow> (IF b THEN c;; WHILE b DO c ELSE SKIP, s)" using While by blast
thus ?case by auto
qed
theorem styping_preservation:
"(c,s) \<rightarrow> (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> \<Gamma> \<turnstile> s'"
proof (induction rule: small_step_induct)
print_cases
case (Assign a s v x)
thus ?case using styping_def and epreservation by auto
next
case (Seq1 c s)
thus ?case by auto
next
case (Seq2 c1 s c1' s' c2)
thus ?case by auto
next
case (IfTrue b s c1 c2)
thus ?case by auto
next
case (IfFalse b s c1 c2)
thus ?case by auto
next
case (While b c s)
thus ?case by auto
qed
theorem ctyping_preservation:
"(c,s) \<rightarrow> (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> c'"
proof (induction rule: small_step_induct)
print_cases
case (Assign a s v)
thus ?case using ctyping.Skip_ty by simp
next
case (Seq1 c)
thus ?case by auto
next
case (Seq2 c1 s c1' s' c2)
thus ?case using ctyping.Seq_ty and small_step.Seq2 by blast
next
case (IfTrue b s c1 c2)
thus ?case by auto
next
case (IfFalse b s c1 c2)
thus ?case by auto
next
case (While b c)
thus ?case using ctyping.intros by auto
qed
abbreviation small_steps :: "com * state \<Rightarrow> com * state \<Rightarrow> bool" (infix "\<rightarrow>*" 55)
where "x \<rightarrow>* y == star small_step x y"
theorem type_sound:
"(c,s) \<rightarrow>* (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> c' \<noteq> SKIP
\<Longrightarrow> \<exists>cs''. (c',s') \<rightarrow> cs''"
proof (induction rule: star_induct)
print_cases
case (refl a b)
thus ?case using progress by auto
next
case (step a1 b1 a' b' a2 b2)
thus ?case using ctyping_preservation and styping_preservation by auto
qed
end
|
Require Export Iron.Language.SystemF2Cap.Type.Exp.
Require Export Iron.Language.SystemF2Cap.Type.Operator.LiftTT.
Require Export Iron.Language.SystemF2Cap.Type.Operator.LowerTT.
Require Export Iron.Language.SystemF2Cap.Type.Relation.WfT.
(********************************************************************)
(* Substitution of Types in Types. *)
Fixpoint substTT (d: nat) (u: ty) (tt: ty) : ty
:= match tt with
| TVar ix
=> match nat_compare ix d with
| Eq => u
| Gt => TVar (ix - 1)
| _ => TVar ix
end
| TForall k t => TForall k (substTT (S d) (liftTT 1 0 u) t)
| TApp t1 t2 => TApp (substTT d u t1) (substTT d u t2)
| TSum t1 t2 => TSum (substTT d u t1) (substTT d u t2)
| TBot k => TBot k
| TCon0 tc => TCon0 tc
| TCon1 tc t1 => TCon1 tc (substTT d u t1)
| TCon2 tc t1 t2 => TCon2 tc (substTT d u t1) (substTT d u t2)
| TCap _ => tt
end.
(********************************************************************)
(* What might happen when we substitute for a variable.
This can be easier use than the raw substTT definition. *)
Lemma substTT_TVar_cases
: forall n1 n2 t1
, (substTT n1 t1 (TVar n2) = t1 /\ n1 = n2)
\/ (substTT n1 t1 (TVar n2) = TVar (n2 - 1) /\ n1 < n2)
\/ (substTT n1 t1 (TVar n2) = TVar n2 /\ n1 > n2).
Proof.
intros.
unfold substTT.
lift_cases; burn.
Qed.
Lemma substTT_wfT_above
: forall d ix t t2
, WfT d t
-> substTT (d + ix) t2 t = t.
Proof.
intros. gen d ix t2.
induction t; rip; inverts H; simpl; f_equal; burn.
Case "TVar".
norm; omega.
lets D: IHt H1. burn.
Qed.
Hint Resolve substTT_wfT_above.
Lemma substTT_wfT
: forall d ix t1 t2
, ix <= d
-> WfT (S d) t1
-> WfT d t2
-> WfT d (substTT ix t2 t1).
Proof.
intros. gen d ix t2.
induction t1; rip; inverts H0; simpl; snorm.
Qed.
Hint Resolve substTT_wfT.
(* Closing substitution of types in types *)
Lemma substTT_closing
: forall t1 t2
, WfT 1 t1
-> ClosedT t2
-> ClosedT (substTT 0 t2 t1).
Proof. eauto. Qed.
Hint Resolve substTT_closing.
Lemma substTT_closedT_id
: forall d t t2
, ClosedT t
-> substTT d t2 t = t.
Proof.
intros. rrwrite (d = d + 0). eauto.
Qed.
Hint Resolve substTT_closedT_id.
Lemma substTT_liftTT_wfT1
: forall t1 t2
, WfT 1 t1
-> ClosedT t2
-> substTT 0 t2 t1 = liftTT 1 0 (substTT 0 t2 t1).
Proof.
intros.
have (ClosedT (substTT 0 t2 t1)).
rrwrite (liftTT 1 0 (substTT 0 t2 t1) = substTT 0 t2 t1).
trivial.
Qed.
Hint Resolve substTT_liftTT_wfT1.
(* Substituting into TBot is still TBot. *)
Lemma substTT_TBot
: forall d t2 k
, substTT d t2 (TBot k) = TBot k.
Proof. burn. Qed.
Hint Resolve substTT_TBot.
Hint Rewrite substTT_TBot : global.
|
module Circuits.NetList.Check
import Decidable.Equality
import Data.Nat
import Data.String
import Data.List.Elem
import Data.List.Quantifiers
import Data.Fin
import Toolkit.Decidable.Informative
import Toolkit.Data.Location
import Toolkit.Data.Whole
import Toolkit.Data.List.DeBruijn
import Ref
import Circuits.NetList.Types
import Circuits.NetList.Terms
import Circuits.NetList.AST
%default total
data Entry : (String,Ty) -> Type where
MkEntry : (name : String)
-> (type : Ty)
-> Entry (MkPair name type)
Env : List (String,Ty) -> Type
Env = Env (String,Ty) Entry
export
data Error = Mismatch Ty Ty
| MismatchD DType DType
| NotBound String
| VectorExpected
| PortChanExpected
| PortExpected
| OOB Nat Nat
| ErrI String
| Err FileContext Error
export
Show Error where
show (Mismatch x y)
= "Type Mismatch:\n\n"
<+>
unlines [unwords ["\tExpected:",show x], unwords ["\tGiven:", show y]]
show (MismatchD x y)
= "Type Mismatch:\n\n"
<+>
unlines [unwords ["\tExpected:",show x], unwords ["\tGiven:", show y]]
show (NotBound x)
= unwords ["Undeclared variable:", x]
show (VectorExpected)
= "Vector Expected"
show (PortChanExpected)
= "Port or Wire Expected"
show (PortExpected)
= "Port Expected"
show (ErrI msg)
= "Internal Err: " <+> msg
show (OOB x y)
= unwords ["Out of Bounds:" , show x, "is not within", show y]
show (Err x y) = unwords [show x, show y]
strip : {ctxt : List (String, Ty)}
-> Elem (s,type) ctxt -> Elem type (map Builtin.snd ctxt)
strip Here = Here
strip (There x) = There (strip x)
public export
TyCheck : Type -> Type
TyCheck = Either Error
lift : Dec a -> Error -> TyCheck a
lift (Yes prf) _ = Right prf
lift (No contra) e = Left e
namespace Elab
export
getDataType : FileContext
-> (term : (type ** Term ctxt type))
-> TyCheck DType
getDataType fc (MkDPair (TyPort (d,x)) snd) = pure x
getDataType fc (MkDPair (TyChan x) snd) = pure x
getDataType fc (MkDPair type snd)
= Left (Err fc PortChanExpected)
||| Need to make sure that the indices are in the correct direction.
rewriteTerm : Cast flow expected
-> Index INOUT
-> Term ctxt (TyPort (flow,type))
-> Term ctxt (TyPort (flow,type))
rewriteTerm c d (Var prf) = Var prf
rewriteTerm BI (UP UB) (Index idir what idx)
= Index (UP UB) (rewriteTerm BI (UP UB) what) idx
rewriteTerm BI (DOWN DB) (Index idir what idx)
= Index (DOWN DB) (rewriteTerm BI (DOWN DB) what) idx
rewriteTerm BO (UP UB) (Index idir what idx)
= Index (UP UB) (rewriteTerm BI (UP UB) what) idx
rewriteTerm BO (DOWN DB) (Index idir what idx)
= Index (DOWN DB) (rewriteTerm BI (DOWN DB) what) idx
rewriteTerm BI _ (Project WRITE _) impossible
rewriteTerm BO _ (Project WRITE _) impossible
rewriteTerm BI _ (Project READ _) impossible
rewriteTerm BO _ (Project READ _) impossible
rewriteTerm BI _ (Cast BI _) impossible
rewriteTerm BO _ (Cast BI _) impossible
rewriteTerm BI _ (Cast BO _) impossible
rewriteTerm BO _ (Cast BO _) impossible
||| When casting we finally know which direction indexing should go, so lets fix that.
shouldCast : {type : DType}
-> (flow,expected : Direction)
-> (term : Term ctxt (TyPort (flow,type)))
-> Dec ( Cast flow expected
, Term ctxt (TyPort (expected,type))
)
shouldCast flow expected term with (Cast.cast flow expected)
shouldCast INOUT INPUT term | (Yes BI) with (dirFromCast BI)
shouldCast INOUT INPUT term | (Yes BI) | idir
= Yes $ MkPair BI (Cast BI (rewriteTerm BI idir term))
shouldCast INOUT OUTPUT term | (Yes BO) with (dirFromCast BO)
shouldCast INOUT OUTPUT term | (Yes BO) | idir
= Yes $ MkPair BO (Cast BO (rewriteTerm BO idir term))
shouldCast flow expected term | (No contra)
= No (\(prf,t) => contra prf)
portCast : {type : DType}
-> {flow : Direction}
-> FileContext
-> (expected : Direction)
-> (term : Term ctxt (TyPort (flow,type)))
-> TyCheck (Term ctxt (TyPort (expected,type)))
portCast {type} {flow = flow} fc exp term with (shouldCast flow exp term)
portCast {type} {flow = flow} fc exp term | Yes (p,e)
= Right e
portCast {type} {flow = flow} fc exp term | No contra with (decEq flow exp)
portCast {type} {flow = exp} fc exp term | No contra | (Yes Refl)
= Right term
portCast {type} {flow = flow} fc exp term | No contra | (No f)
= Left (Err fc (Mismatch (TyPort (exp,type)) (TyPort (flow,type))))
export
checkPort : (fc : FileContext)
-> (exdir : Direction)
-> (expty : DType)
-> (term : (type ** Term ctxt type))
-> TyCheck (Term ctxt (TyPort (exdir,expty)))
-- [ NOTE ]
--
checkPort fc exdir exty (MkDPair (TyPort (given,x)) term)
= do Refl <- lift (decEq exty x)
(Err fc (MismatchD exty x))
portCast fc exdir term
-- [ NOTE ]
--
-- READ implies INPUT
checkPort fc INPUT exty (MkDPair (TyChan x) term)
= do Refl <- lift (decEq exty x)
(Err fc (MismatchD exty x))
pure (Project READ term)
-- [ NOTE ]
--
-- WRITE implies OUTPUT
checkPort fc OUTPUT exty (MkDPair (TyChan x) term)
= do Refl <- lift (decEq exty x)
(Err fc (MismatchD exty x))
Right (Project WRITE term)
-- [ NOTE ]
--
-- INOUT Chan's impossible.
checkPort fc INOUT exty (MkDPair (TyChan x) term)
= Left (Err fc (ErrI "INOUT CHAN not expected"))
-- [ NOTE ]
--
-- Gates/TyUnit not expected
checkPort fc exdir exty (MkDPair type term)
= Left (Err fc (Mismatch (TyPort (exdir,exty)) type))
export
indexDir : {flow : Direction}
-> (fc : FileContext)
-> (term : Term ctxt (TyPort (flow,BVECT (W (S n) ItIsSucc) type)))
-> TyCheck (Index flow)
indexDir {flow} fc term with (flow)
indexDir {flow = flow} fc term | INPUT
= pure (DOWN DI)
indexDir {flow = flow} fc term | OUTPUT
= pure (UP UO)
indexDir {flow = flow} fc (Var prf) | INOUT
= pure (UP UB)
indexDir {flow = flow} fc (Index idir what idx) | INOUT
= pure idir
indexDir {flow = flow} fc (Project how what) | INOUT
= Left (Err fc (ErrI "Shouldn't happen impossible indexing a projection with inout"))
indexDir {flow = flow} fc (Cast x what) | INOUT
= Left (Err fc (ErrI "Shouldn't happen impossible indexing a cast with inout"))
namespace TypeCheck
export
typeCheck : {ctxt : List (String,Ty)}
-> (curr : Env ctxt)
-> (ast : AST)
-> TyCheck (DPair Ty (Term (map Builtin.snd ctxt)))
typeCheck {ctxt} curr (Var x)
= do (ty ** prf) <- lift (isIndex (get x) ctxt)
(Err (span x) (NotBound (get x)))
pure (ty ** Var (strip prf))
typeCheck curr (Port fc flow ty n body)
= do (TyUnit ** term) <- typeCheck (MkEntry (get n) (TyPort (flow,ty))::curr) body
| (type ** _) => Left (Err fc (Mismatch TyUnit type))
pure (_ ** Port flow ty term)
typeCheck curr (Wire fc ty n body)
= do (TyUnit ** term) <- typeCheck (MkEntry (get n) (TyChan ty)::curr) body
| (type ** _) => Left (Err fc (Mismatch TyUnit type))
pure (_ ** Wire ty term)
typeCheck curr (GateDecl fc n g body)
= do (TyGate ** gate) <- typeCheck curr g
| (type ** _) => Left (Err fc (Mismatch TyGate type))
(TyUnit ** term) <- typeCheck (MkEntry (get n) (TyGate)::curr) body
| (type ** _) => Left (Err fc (Mismatch TyUnit type))
pure (_ ** GateDecl gate term)
typeCheck curr (Assign fc i o rest)
= do termI <- typeCheck curr i
ity <- getDataType fc termI
termO <- typeCheck curr o
oty <- getDataType fc termO
Refl <- lift (decEq ity oty)
(Err fc (MismatchD ity oty))
i' <- checkPort fc INPUT ity termI
o' <- checkPort fc OUTPUT ity termO
(TyUnit ** r') <- typeCheck curr rest
| (type ** _) => Left (Err fc (Mismatch TyUnit type))
pure (_ ** Assign i' o' r')
typeCheck curr (Mux fc o c l r)
= do termO <- typeCheck curr o
termC <- typeCheck curr c
termL <- typeCheck curr l
termR <- typeCheck curr r
o' <- checkPort fc OUTPUT LOGIC termO
c' <- checkPort fc INPUT LOGIC termC
l' <- checkPort fc INPUT LOGIC termL
r' <- checkPort fc INPUT LOGIC termR
pure (_ ** Mux o' c' l' r')
typeCheck curr (GateU fc k o i)
= do termO <- typeCheck curr o
termI <- typeCheck curr i
o' <- checkPort fc OUTPUT LOGIC termO
i' <- checkPort fc INPUT LOGIC termI
pure (_ ** GateU k o' i')
typeCheck curr (GateB fc k o l r)
= do termO <- typeCheck curr o
termL <- typeCheck curr l
termR <- typeCheck curr r
o' <- checkPort fc OUTPUT LOGIC termO
l' <- checkPort fc INPUT LOGIC termL
r' <- checkPort fc INPUT LOGIC termR
pure (_ ** GateB k o' l' r')
typeCheck curr (Index fc idx t)
= do (TyPort (flow,BVECT (W (S n) ItIsSucc) type) ** term) <- typeCheck curr t
| (type ** term)
=> Left (Err fc VectorExpected)
case natToFin idx (S n) of
Nothing => Left (OOB idx (S n))
Just idx' => do idir <- indexDir fc term
pure (_ ** Index idir term idx')
typeCheck curr (Split fc a b i)
= do termA <- typeCheck curr a
termB <- typeCheck curr b
termI <- typeCheck curr i
a' <- checkPort fc OUTPUT LOGIC termA
b' <- checkPort fc OUTPUT LOGIC termB
i' <- checkPort fc INPUT LOGIC termI
pure (_ ** Split a' b' i')
typeCheck curr (Collect fc o l r)
= do (TyPort (OUTPUT,BVECT (W (S (S Z)) ItIsSucc) type) ** o') <- typeCheck curr o
| (TyPort (flow,BVECT (W (S (S n)) ItIsSucc) type) ** term)
=> Left (Err fc (Mismatch (TyPort (OUTPUT, BVECT (W (S (S Z)) ItIsSucc) type))
(TyPort (flow, BVECT (W (S (S n)) ItIsSucc) type))
))
| (type ** term)
=> Left (Err fc VectorExpected)
termL <- typeCheck curr l
termR <- typeCheck curr r
l' <- checkPort fc INPUT type termL
r' <- checkPort fc INPUT type termR
pure (_ ** Collect o' l' r')
typeCheck curr (Shim fc dir thing)
= do t <- typeCheck curr thing
dtype <- getDataType fc t
term <- checkPort fc dir dtype t
pure (_ ** term)
typeCheck curr (Stop x)
= pure (_ ** Stop)
namespace Design
export
typeCheck : (ast : AST) -> TyCheck (Term Nil TyUnit)
typeCheck ast with (typeCheck Nil ast)
typeCheck ast | (Left x) = Left x
typeCheck ast | (Right (MkDPair TyUnit term)) = Right term
typeCheck ast | (Right (MkDPair ty snd)) = Left (Mismatch TyUnit ty)
export
typeCheckIO : (ast : AST) -> IO (TyCheck (Term Nil TyUnit))
typeCheckIO ast = pure (typeCheck ast)
-- [ EOF ]
|
module Test.Int8
import Data.Prim.Int8
import Data.SOP
import Hedgehog
import Test.RingLaws
allInt8 : Gen Int8
allInt8 = int8 (linear (-0x80) 0xff)
prop_ltMax : Property
prop_ltMax = property $ do
b8 <- forAll allInt8
(b8 <= MaxInt8) === True
prop_ltMin : Property
prop_ltMin = property $ do
b8 <- forAll allInt8
(b8 >= MinInt8) === True
prop_comp : Property
prop_comp = property $ do
[m,n] <- forAll $ np [allInt8, allInt8]
toOrdering (comp m n) === compare m n
export
props : Group
props = MkGroup "Int8" $
[ ("prop_ltMax", prop_ltMax)
, ("prop_ltMin", prop_ltMin)
, ("prop_comp", prop_comp)
] ++ ringProps allInt8
|
module Complex
import Control.Monad.TransitionIndexed
import Control.Monad.TransitionIndexed.Do
data Substate = Sub1 | Sub2
data ComplexState = First Substate | Second | Third
data IsFirst : ComplexState -> Type where
ItIsFirst : IsFirst (First _)
||| Result of moving from Second to Third state
data Trans2Result = OK | Error
data ComplexCmd : (ty : Type) ->
ComplexState ->
(ty -> ComplexState) ->
Type where
Init : ComplexCmd () (First Sub1) (const (First Sub1))
SubTrans : ComplexCmd () (First Sub1) (const (First Sub2))
Trans1Easy : IsFirst s => ComplexCmd () s (const Second)
Trans1Hard : ComplexCmd () (First Sub2) (const Second)
Trans2 : ComplexCmd Trans2Result Second (\case OK => Third; Error => Second)
Cheat : ComplexCmd () Second (const Third)
Pure : (res : ty) -> ComplexCmd ty (state_fn res) state_fn
Bind : ComplexCmd a state1 state2_fn ->
((res: a) -> ComplexCmd b (state2_fn res) state3_fn) ->
ComplexCmd b state1 state3_fn
TransitionIndexedPointed ComplexState ComplexCmd where
pure = Pure
TransitionIndexedMonad ComplexState ComplexCmd where
bind = Bind
easyProg : ComplexCmd () (First Sub1) (const Third)
easyProg = do
Init
Trans1Easy
OK <- Trans2
| Error => Cheat
Pure ()
hardProg : ComplexCmd () (First Sub1) (const Third)
hardProg = do
Init
SubTrans
Trans1Hard
res <- Trans2
case res of
OK => Pure ()
Error => Cheat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.